[PATCH 2/9] drm: kselftest for drm_mm_reserve_node()
Chris Wilson
chris at chris-wilson.co.uk
Fri Dec 9 13:08:18 UTC 2016
Exercise drm_mm_reserve_node(), check that we can't reserve an already
occupied range and that the lists are correct after reserving/removing.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/drm_mm_selftests.h | 1 +
drivers/gpu/drm/test-drm_mm.c | 143 +++++++++++++++++++++++++++++++++++++
2 files changed, 144 insertions(+)
diff --git a/drivers/gpu/drm/drm_mm_selftests.h b/drivers/gpu/drm/drm_mm_selftests.h
index 8a9166f4626f..204200227b3c 100644
--- a/drivers/gpu/drm/drm_mm_selftests.h
+++ b/drivers/gpu/drm/drm_mm_selftests.h
@@ -5,5 +5,6 @@
*
* Tests are executed in reverse order by igt/drm_mm
*/
+selftest(reserve, igt_reserve)
selftest(init, igt_init)
selftest(sanitycheck, igt_sanitycheck) /* keep last */
diff --git a/drivers/gpu/drm/test-drm_mm.c b/drivers/gpu/drm/test-drm_mm.c
index d89615987303..d7ab054678a8 100644
--- a/drivers/gpu/drm/test-drm_mm.c
+++ b/drivers/gpu/drm/test-drm_mm.c
@@ -61,6 +61,149 @@ static int igt_init(void *ignored)
return ret;
}
+static int *random_order(int count)
+{
+ int *order;
+ int n;
+
+ order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+ if (!order)
+ return order;
+
+ for (n = 0; n < count; n++)
+ order[n] = n;
+
+ for (n = count-1; n > 1; n--) {
+ int r = get_random_int() % (n + 1);
+ if (r != n) {
+ int tmp = order[n];
+ order[n] = order[r];
+ order[r] = tmp;
+ }
+ }
+
+ return order;
+}
+
+static int __igt_reserve(int count, u64 size)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *node, *next;
+ int *order, n;
+ int ret;
+
+ /* Fill a range with lots of nodes, check it doesn't fail too early */
+
+ ret = -ENOMEM;
+ order = random_order(count);
+ if (!order)
+ goto err;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+ if (!drm_mm_clean(&mm)) {
+ pr_err("mm not empty on creation\n");
+ goto out;
+ }
+
+ for (n = 0; n < count; n++) {
+ int err;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->start = order[n] * size;
+ node->size = size;
+
+ err = drm_mm_reserve_node(&mm, node);
+ if (err) {
+ pr_err("reserve failed, step %d, start %llu\n",
+ n, node->start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ /* Repeated use should then fail */
+ for (n = 0; n < count; n++) {
+ struct drm_mm_node tmp = {
+ .start = order[n] * size,
+ .size = 1
+ };
+
+ if (!drm_mm_reserve_node(&mm, &tmp)) {
+ drm_mm_remove_node(&tmp);
+ pr_err("impossible reserve succeeded, step %d, start %llu\n",
+ n, tmp.start);
+ goto out;
+ }
+ }
+
+ /* Overlapping use should then fail */
+ for (n = 0; n < count; n++) {
+ struct drm_mm_node tmp = {
+ .start = 0,
+ .size = size * count,
+ };
+
+ if (!drm_mm_reserve_node(&mm, &tmp)) {
+ drm_mm_remove_node(&tmp);
+ pr_err("impossible reserve succeeded, step %d, start %llu\n",
+ n, tmp.start);
+ goto out;
+ }
+ }
+ for (n = 0; n < count; n++) {
+ struct drm_mm_node tmp = {
+ .start = size * n,
+ .size = size * (count - n),
+ };
+
+ if (!drm_mm_reserve_node(&mm, &tmp)) {
+ drm_mm_remove_node(&tmp);
+ pr_err("impossible reserve succeeded, step %d, start %llu\n",
+ n, tmp.start);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ list_for_each_entry_safe(node, next,
+ &mm.head_node.node_list, node_list) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+ kfree(order);
+err:
+ return ret;
+}
+
+static int igt_reserve(void *ignored)
+{
+ int n, ret;
+
+ for (n = 1; n < 50; n++) {
+ ret = __igt_reserve(8192, (1ull << n) - 1);
+ if (ret)
+ return ret;
+
+ ret = __igt_reserve(8192, 1ull << n);
+ if (ret)
+ return ret;
+
+ ret = __igt_reserve(8192, (1ull << n) + 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
#include "drm_selftest.c"
static int __init test_drm_mm_init(void)
--
2.11.0
More information about the dri-devel
mailing list