[Intel-gfx] [PATCH 1/2] drm: Add some kselftests for the DRM range manager (struct drm_mm)
Chris Wilson
chris at chris-wilson.co.uk
Sun Nov 27 14:08:34 UTC 2016
A set of test cases to capture some annoying bugs and to ensure that
correct behaviour does not regress whilst fixing those!
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/Kconfig | 13 +
drivers/gpu/drm/Makefile | 2 +
drivers/gpu/drm/test-drm_mm.c | 829 ++++++++++++++++++++++++++++++++++
tools/testing/selftests/Makefile | 1 +
tools/testing/selftests/drm/drm_mm.sh | 11 +
5 files changed, 856 insertions(+)
create mode 100644 drivers/gpu/drm/test-drm_mm.c
create mode 100644 tools/testing/selftests/drm/drm_mm.sh
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 95fc0410e129..22b0f28c0137 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -48,6 +48,19 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_DEBUG_MM_SELFTEST
+ tristate "kselftests for DRM range manager (struct drm_mm)"
+ depends on DRM
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option provides a kernel module that can be used to test
+ the DRM range manager (drm_mm) and its API. This option is not
+ useful for distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ Say N if you are unsure
+
config DRM_KMS_HELPER
tristate
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 883f3e75cfbc..57818abdb5b8 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,6 +18,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o
+obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o
+
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
diff --git a/drivers/gpu/drm/test-drm_mm.c b/drivers/gpu/drm/test-drm_mm.c
new file mode 100644
index 000000000000..c2ffb6a0f344
--- /dev/null
+++ b/drivers/gpu/drm/test-drm_mm.c
@@ -0,0 +1,829 @@
+/*
+ * Test cases for the drm_mm range manager
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_mm.h>
+
+static unsigned long *primes;
+static unsigned long prime_last, prime_sz;
+
+static unsigned long slow_next_prime_number(unsigned long x)
+{
+ for (;;) {
+ unsigned long y = int_sqrt(++x) + 1;
+ while (y > 1) {
+ if ((x % y) == 0)
+ break;
+ y--;
+ }
+ if (y == 1)
+ return x;
+ }
+}
+
+static unsigned long mark_multiples(unsigned long x,
+ unsigned long *p,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long m;
+
+ m = 2*x;
+ if (m < start)
+ m = (start / x + 1) * x;
+
+ while (m < end) {
+ __clear_bit(m, p);
+ m += x;
+ }
+
+ return x;
+}
+
+static unsigned long next_prime_number(unsigned long x)
+{
+ if (x == 1)
+ return 2;
+
+ if (x >= prime_last) {
+ unsigned long sz, y;
+ unsigned long *nprimes;
+
+ sz = x*x;
+ if (sz < x)
+ return slow_next_prime_number(x);
+
+ sz = round_up(sz, BITS_PER_LONG);
+ nprimes = krealloc(primes, sz / sizeof(long), GFP_KERNEL);
+ if (!nprimes)
+ return slow_next_prime_number(x);
+
+ /* Where memory permits, track the primes using the
+ * Sieve of Eratosthenes.
+ */
+ memset(nprimes + prime_sz / BITS_PER_LONG,
+ 0xff, (sz - prime_sz) / sizeof(long));
+ for (y = 2UL; y < sz; y = find_next_bit(nprimes, sz, y + 1))
+ prime_last = mark_multiples(y, nprimes, prime_sz, sz);
+
+ primes = nprimes;
+ prime_sz = sz;
+ }
+
+ return find_next_bit(primes, prime_last, x + 1);
+}
+
+#define for_each_prime(prime, max) \
+ for (prime = 1; prime < (max); prime = next_prime_number(prime))
+
+enum test {
+ SUBTEST_INIT,
+ SUBTEST_RESERVE,
+ SUBTEST_INSERT,
+ SUBTEST_ALIGN32,
+ SUBTEST_ALIGN64,
+ SUBTEST_EVICT,
+ SUBTEST_TOPDOWN,
+};
+
+static int subtest_init(void)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *hole;
+ u64 start, end;
+ int ret = -EINVAL;
+
+ drm_mm_init(&mm, 0, 4096);
+ if (!drm_mm_clean(&mm)) {
+ pr_err("mm not empty on creation\n");
+ goto out;
+ }
+
+ drm_mm_for_each_hole(hole, &mm, start, end) {
+ if (start != 0 || end != 4096) {
+ pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
+ start, end,
+ 0ull, 4096ull);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ if (ret)
+ drm_mm_debug_table(&mm, __func__);
+ drm_mm_takedown(&mm);
+ return ret;
+}
+
+static int *random_order(int count)
+{
+ int *order;
+ int n;
+
+ order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+ if (!order)
+ return order;
+
+ for (n = 0; n < count; n++)
+ order[n] = n;
+
+ for (n = count-1; n > 1; n--) {
+ int r = get_random_int() % (n + 1);
+ if (r != n) {
+ int tmp = order[n];
+ order[n] = order[r];
+ order[r] = tmp;
+ }
+ }
+
+ return order;
+}
+
+static int __subtest_reserve(int count, u64 size)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *node, *next;
+ int *order, n;
+ int ret;
+
+ /* Fill a range with lots of nodes, check it doesn't fail too early */
+
+ ret = -ENOMEM;
+ order = random_order(count);
+ if (!order)
+ goto err;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+ if (!drm_mm_clean(&mm)) {
+ pr_err("mm not empty on creation\n");
+ goto out;
+ }
+
+ for (n = 0; n < count; n++) {
+ int err;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->start = order[n] * size;
+ node->size = size;
+
+ err = drm_mm_reserve_node(&mm, node);
+ if (err) {
+ pr_err("reserve failed, step %d, start %llu\n",
+ n, node->start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ /* Repeated use should then fail */
+ for (n = 0; n < count; n++) {
+ struct drm_mm_node tmp = {
+ .start = order[n] * size,
+ .size = 1
+ };
+
+ if (!drm_mm_reserve_node(&mm, &tmp)) {
+ drm_mm_remove_node(&tmp);
+ pr_err("impossible reserve succeed, step %d, start %llu\n",
+ n, tmp.start);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ list_for_each_entry_safe(node, next,
+ &mm.head_node.node_list, node_list) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+ kfree(order);
+err:
+ return ret;
+}
+
+static int subtest_reserve(void)
+{
+ int n, ret;
+
+ for (n = 1; n < 50; n++) {
+ ret = __subtest_reserve(8192, (1ull << n) - 1);
+ if (ret)
+ return ret;
+
+ ret = __subtest_reserve(8192, 1ull << n);
+ if (ret)
+ return ret;
+
+ ret = __subtest_reserve(8192, (1ull << n) + 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __subtest_insert(int count, u64 size)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ int *order, n, o = 0;
+ int ret;
+
+ /* Fill a range with lots of nodes, check it doesn't fail too early */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(count * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = random_order(count);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+ if (!drm_mm_clean(&mm)) {
+ pr_err("mm not empty on creation\n");
+ goto out;
+ }
+
+ for (n = 0; n < count; n++) {
+ int err;
+
+ err = drm_mm_insert_node(&mm, &nodes[n], size, 0,
+ DRM_MM_SEARCH_DEFAULT);
+ if (err) {
+ pr_err("insert failed, step %d, start %llu\n",
+ n, nodes[n].start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ /* Repeated use should then fail */
+ if (1) {
+ struct drm_mm_node tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+ if (!drm_mm_insert_node(&mm, &tmp, size, 0,
+ DRM_MM_SEARCH_DEFAULT)) {
+ drm_mm_remove_node(&tmp);
+ pr_err("impossible insert succeed, step %d, start %llu\n",
+ n, tmp.start);
+ goto out;
+ }
+ }
+
+ n = 0;
+ drm_mm_for_each_node(node, &mm) {
+ if (node->start != n * size) {
+ pr_err("node %d out of order, expected start %llx, found %llx\n",
+ n, n * size, node->start);
+ goto out;
+ }
+
+ if (node->size != size) {
+ pr_err("node %d has wrong size, expected size %llx, found %llx\n",
+ n, size, node->size);
+ goto out;
+ }
+
+ if (node->hole_follows) {
+ pr_err("node %d is followed by a hole!\n", n);
+ goto out;
+ }
+
+ n++;
+ }
+
+ for (n = 0; n < count; n++) {
+ drm_mm_for_each_node_in_range(node, &mm, n * size, (n + 1) * size) {
+ if (node->start != n * size) {
+ pr_err("lookup node %d out of order, expected start %llx, found %llx\n",
+ n, n * size, node->start);
+ goto out;
+ }
+ }
+ }
+
+ /* Remove one and reinsert, as the only hole it should refill itself */
+ for (n = 0; n < count; n++) {
+ int err;
+
+ drm_mm_remove_node(&nodes[n]);
+ err = drm_mm_insert_node(&mm, &nodes[n], size, 0,
+ DRM_MM_SEARCH_DEFAULT);
+ if (err) {
+ pr_err("reinsert failed, step %d\n", n);
+ ret = err;
+ goto out;
+ }
+
+ if (nodes[n].start != n * size) {
+ pr_err("reinsert node moved, step %d, expected %llx, found %llx\n",
+ n, n * size, nodes[n].start);
+ goto out;
+ }
+ }
+
+ /* Remove several, reinsert, check full */
+ for_each_prime(n, count) {
+ int m;
+
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ }
+
+ for (m = 0; m < n; m++) {
+ int err;
+
+ node = &nodes[order[(o + m) % count]];
+ err = drm_mm_insert_node(&mm, node, size, 0,
+ DRM_MM_SEARCH_DEFAULT);
+ if (err) {
+ pr_err("insert failed, step %d, start %llu\n",
+ n, node->start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ o += n;
+
+ if (1) {
+ struct drm_mm_node tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+ if (!drm_mm_insert_node(&mm, &tmp, size, 0,
+ DRM_MM_SEARCH_DEFAULT)) {
+ drm_mm_remove_node(&tmp);
+ pr_err("impossible insert succeed, start %llu\n",
+ tmp.start);
+ goto out;
+ }
+ }
+
+ m = 0;
+ drm_mm_for_each_node(node, &mm) {
+ if (node->start != m * size) {
+ pr_err("node %d out of order, expected start %llx, found %llx\n",
+ m, m * size, node->start);
+ goto out;
+ }
+
+ if (node->size != size) {
+ pr_err("node %d has wrong size, expected size %llx, found %llx\n",
+ m, size, node->size);
+ goto out;
+ }
+
+ if (node->hole_follows) {
+ pr_err("node %d is followed by a hole!\n", m);
+ goto out;
+ }
+
+ m++;
+ }
+ }
+
+ ret = 0;
+out:
+ list_for_each_entry_safe(node, next, &mm.head_node.node_list, node_list)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int subtest_insert(void)
+{
+ int n, ret;
+
+ for (n = 1; n < 50; n++) {
+ ret = __subtest_insert(8192, (1ull << n) - 1);
+ if (ret)
+ return ret;
+
+ ret = __subtest_insert(8192, 1ull << n);
+ if (ret)
+ return ret;
+
+ ret = __subtest_insert(8192, (1ull << n) + 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int subtest_align_pot(int max)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *node, *next;
+ int bit;
+ int ret = -EINVAL;
+
+ drm_mm_init(&mm, 1, -2);
+ if (!drm_mm_clean(&mm)) {
+ pr_err("mm not empty on creation\n");
+ goto out;
+ }
+
+ for (bit = max - 1; bit; bit--) {
+ int err;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ err = drm_mm_insert_node_generic(&mm, node, 1,
+ BIT_ULL(bit), bit,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ if (err) {
+ pr_err("insert failed with alignment=%llx [%d]",
+ BIT_ULL(bit), bit);
+ ret = err;
+ goto out;
+ }
+
+ if (node->start & (BIT_ULL(bit) - 1)) {
+ pr_err("node inserted into wrong location %llx, expected alignment to %llx [%d]\n",
+ node->start, BIT_ULL(bit), bit);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ list_for_each_entry_safe(node, next,
+ &mm.head_node.node_list, node_list) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+ return ret;
+}
+
+static int subtest_align32(void)
+{
+ return subtest_align_pot(32);
+}
+
+static int subtest_align64(void)
+{
+ return subtest_align_pot(64);
+}
+
+static int subtest_evict(void)
+{
+ const int size = 8192;
+ struct drm_mm mm;
+ struct evict_node {
+ struct drm_mm_node node;
+ struct list_head link;
+ } *nodes;
+ struct drm_mm_node *node, *next;
+ int *order, n, m;
+ int ret;
+
+ ret = -ENOMEM;
+ nodes = vzalloc(size * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = random_order(size);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, size);
+ for (n = 0; n < size; n++) {
+ int err;
+
+ err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
+ DRM_MM_SEARCH_DEFAULT);
+ if (err) {
+ pr_err("insert failed, step %d\n", n);
+ ret = err;
+ goto out;
+ }
+ }
+
+ for (n = 1; n < size; n <<= 1) {
+ const int nsize = size / 2;
+ LIST_HEAD(evict_list);
+ struct evict_node *e, *en;
+ struct drm_mm_node tmp;
+ bool found = false;
+ int err;
+
+ drm_mm_init_scan(&mm, nsize, n, 0);
+ for (m = 0; m < size; m++) {
+ e = &nodes[order[m]];
+ list_add(&e->link, &evict_list);
+ if (drm_mm_scan_add_block(&e->node)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("Failed to fail eviction: size=%d, align=%d\n",
+ nsize, n);
+ goto out;
+ }
+
+ list_for_each_entry_safe(e, en, &evict_list, link) {
+ if (!drm_mm_scan_remove_block(&e->node))
+ list_del(&e->link);
+ }
+
+ list_for_each_entry(e, &evict_list, link)
+ drm_mm_remove_node(&e->node);
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node(&mm, &tmp, nsize, n,
+ DRM_MM_SEARCH_DEFAULT);
+ if (err) {
+ pr_err("Failed to insert into eviction hole: size=%d, alignt=%d\n",
+ nsize, n);
+ goto out;
+ }
+
+ if (tmp.start % n || tmp.size != nsize || tmp.hole_follows) {
+ pr_err("Inserted did not align to eviction hole: size=%lld [%d], align=%d, start=%llx, hole-follows?=%d\n",
+ tmp.size, nsize, n, tmp.start, tmp.hole_follows);
+
+ drm_mm_remove_node(&tmp);
+ goto out;
+ }
+
+ drm_mm_remove_node(&tmp);
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(&mm, &e->node);
+ if (err) {
+ pr_err("Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ ret = err;
+ goto out;
+ }
+ }
+ }
+
+ for_each_prime(n, size) {
+ LIST_HEAD(evict_list);
+ struct evict_node *e, *en;
+ struct drm_mm_node tmp;
+ int nsize = (size - n - 1) / 2;
+ bool found = false;
+ int err;
+
+ drm_mm_init_scan(&mm, nsize, n, 0);
+ for (m = 0; m < size; m++) {
+ e = &nodes[order[m]];
+ list_add(&e->link, &evict_list);
+ if (drm_mm_scan_add_block(&e->node)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("Failed to fail eviction: size=%d, align=%d\n",
+ nsize, n);
+ goto out;
+ }
+
+ list_for_each_entry_safe(e, en, &evict_list, link) {
+ if (!drm_mm_scan_remove_block(&e->node))
+ list_del(&e->link);
+ }
+
+ list_for_each_entry(e, &evict_list, link)
+ drm_mm_remove_node(&e->node);
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node(&mm, &tmp, nsize, n,
+ DRM_MM_SEARCH_DEFAULT);
+ if (err) {
+ pr_err("Failed to insert into eviction hole: size=%d, alignt=%d\n",
+ nsize, n);
+ goto out;
+ }
+
+ if (tmp.start % n || tmp.size != nsize || tmp.hole_follows) {
+ pr_err("Inserted did not align to eviction hole: size=%lld [%d], align=%d, start=%llx, hole-follows?=%d\n",
+ tmp.size, nsize, n, tmp.start, tmp.hole_follows);
+
+ drm_mm_remove_node(&tmp);
+ goto out;
+ }
+
+ drm_mm_remove_node(&tmp);
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(&mm, &e->node);
+ if (err) {
+ pr_err("Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ ret = err;
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+out:
+ list_for_each_entry_safe(node, next, &mm.head_node.node_list, node_list)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int subtest_topdown(void)
+{
+ const int size = 8192;
+ unsigned long *bitmap;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ int *order, n, m, o = 0;
+ int ret;
+
+ ret = -ENOMEM;
+ nodes = vzalloc(size * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ bitmap = kzalloc(size / BITS_PER_LONG * sizeof(unsigned long),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto err_nodes;
+
+ order = random_order(size);
+ if (!order)
+ goto err_bitmap;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, size);
+ for (n = 0; n < size; n++) {
+ int err;
+
+ err = drm_mm_insert_node_generic(&mm, &nodes[n], 1, 0, 0,
+ DRM_MM_SEARCH_BELOW,
+ DRM_MM_CREATE_TOP);
+ if (err) {
+ pr_err("insert failed, step %d\n", n);
+ ret = err;
+ goto out;
+ }
+
+ if (nodes[n].hole_follows) {
+ pr_err("hole after topdown insert %d, start=%llx\n",
+ n, nodes[n].start);
+ goto out;
+ }
+ }
+
+ for_each_prime(n, size) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % size]];
+ drm_mm_remove_node(node);
+ __set_bit(node->start, bitmap);
+ }
+
+ for (m = 0; m < n; m++) {
+ int err, last;
+
+ node = &nodes[order[(o + m) % size]];
+ err = drm_mm_insert_node_generic(&mm, node, 1, 0, 0,
+ DRM_MM_SEARCH_BELOW,
+ DRM_MM_CREATE_TOP);
+ if (err) {
+ pr_err("insert failed, step %d/%d\n", m, n);
+ ret = err;
+ goto out;
+ }
+
+ if (node->hole_follows) {
+ pr_err("hole after topdown insert %d/%d, start=%llx\n",
+ m, n, node->start);
+ goto out;
+ }
+
+ last = find_last_bit(bitmap, size);
+ if (node->start != last) {
+ pr_err("node %d/%d not inserted into upmost hole, expected %d, found %lld\n",
+ m, n, last, node->start);
+ goto out;
+ }
+ __clear_bit(last, bitmap);
+ }
+
+ o += n;
+ }
+
+ ret = 0;
+out:
+ list_for_each_entry_safe(node, next, &mm.head_node.node_list, node_list)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_bitmap:
+ kfree(bitmap);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+struct subtest {
+ const char *name;
+ int (*func)(void);
+ bool enabled;
+} subtests[] = {
+ [SUBTEST_INIT] = { "init", subtest_init },
+ [SUBTEST_RESERVE] = { "reserve", subtest_reserve },
+ [SUBTEST_INSERT] = { "insert", subtest_insert },
+ [SUBTEST_ALIGN32] = { "align32", subtest_align32 },
+ [SUBTEST_ALIGN64] = { "align64", subtest_align64 },
+ [SUBTEST_EVICT] = { "evict", subtest_evict },
+ [SUBTEST_TOPDOWN] = { "topdown", subtest_topdown },
+};
+
+/* Tests executed in reverse order (last in list goes first) */
+module_param_named(subtest__topdown, subtests[SUBTEST_TOPDOWN].enabled, bool, 0400);
+module_param_named(subtest__evict, subtests[SUBTEST_EVICT].enabled, bool, 0400);
+module_param_named(subtest__align64, subtests[SUBTEST_ALIGN64].enabled, bool, 0400);
+module_param_named(subtest__align32, subtests[SUBTEST_ALIGN32].enabled, bool, 0400);
+module_param_named(subtest__insert, subtests[SUBTEST_INSERT].enabled, bool, 0400);
+module_param_named(subtest__reserve, subtests[SUBTEST_RESERVE].enabled, bool, 0400);
+module_param_named(subtest__init, subtests[SUBTEST_INIT].enabled, bool, 0400);
+
+static void set_default_test_all(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(subtests); i++)
+ if (subtests[i].enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subtests); i++)
+ subtests[i].enabled = true;
+}
+
+static int __init test_drm_mm_init(void)
+{
+ int i;
+
+ pr_info("Testing DRM range manger (struct drm_mm)\n");
+ set_default_test_all();
+
+ for (i = 0; i < ARRAY_SIZE(subtests); i++) {
+ int err;
+
+ if (!subtests[i].enabled)
+ continue;
+
+ pr_debug("Running %s\n", subtests[i].name);
+ err = subtests[i].func();
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit test_drm_mm_exit(void)
+{
+ kfree(primes);
+}
+
+module_init(test_drm_mm_init);
+module_exit(test_drm_mm_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f770dba2a6f6..f48229197fa8 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,6 +1,7 @@
TARGETS = breakpoints
TARGETS += capabilities
TARGETS += cpu-hotplug
+TARGETS += drm
TARGETS += efivarfs
TARGETS += exec
TARGETS += firmware
diff --git a/tools/testing/selftests/drm/drm_mm.sh b/tools/testing/selftests/drm/drm_mm.sh
new file mode 100644
index 000000000000..59c20ce50b10
--- /dev/null
+++ b/tools/testing/selftests/drm/drm_mm.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+# Runs API tests for struct drm_mm (DRM range manager)
+
+if /sbin/modprobe -q test-drm_mm; then
+ /sbin/modprobe -q -r test-drm_mm
+ echo "drm/drm_mm: ok"
+else
+ echo "drm/drm_mm: [FAIL]"
+ exit 1
+fi
+
--
2.10.2
More information about the Intel-gfx
mailing list