[PATCH 15/18] drm/i915/selftests: exercise mmap migration
Matthew Auld
matthew.auld at intel.com
Fri Jan 14 10:37:51 UTC 2022
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
.../drm/i915/gem/selftests/i915_gem_mman.c | 156 ++++++++++++++++++
1 file changed, 156 insertions(+)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index f61356b72b1c..ce7b9ad09bf4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1125,6 +1125,161 @@ static int igt_mmap_access(void *arg)
return 0;
}
+static void igt_close_objects(struct intel_memory_region *mem,
+ struct list_head *objects)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ i915_gem_object_lock(obj, NULL);
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+
+ cond_resched();
+
+ i915_gem_drain_freed_objects(i915);
+}
+
+static void igt_make_evictable(struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, objects, st_link) {
+ i915_gem_object_lock(obj, NULL);
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unlock(obj);
+ }
+
+ cond_resched();
+}
+
+static int igt_fill_mappable(struct intel_memory_region *mr,
+ struct list_head *objects)
+{
+ u64 size;
+ int err;
+
+ size = mr->io_size;
+ do {
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mr, size, 0, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close;
+ }
+
+ list_add(&obj->st_link, objects);
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ if (err != -ENXIO)
+ goto err_close;
+
+ if (size == mr->min_page_size)
+ break;
+
+ size >>= 1;
+ }
+ } while (1);
+
+ return 0;
+
+err_close:
+ igt_close_objects(mr, objects);
+ return err;
+}
+
+static int __igt_mmap_access_migrate(struct intel_memory_region **placements,
+ int n_placements,
+ struct intel_memory_region *expected_mr)
+{
+ struct drm_i915_private *i915 = placements[0]->i915;
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
+ placements,
+ n_placements);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ obj->flags |= I915_BO_ALLOC_TOPDOWN;
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ return err;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
+ if (!err && obj->mm.region != expected_mr)
+ err = -EINVAL;
+
+ i915_gem_object_put(obj);
+
+ i915_gem_drain_freed_objects(i915);
+
+ return err;
+}
+
+static int igt_mmap_access_migrate(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct intel_memory_region *mixed[] = { mr, system };
+ struct intel_memory_region *single[] = { mr };
+ LIST_HEAD(objects);
+ int err;
+
+ if (mr->private)
+ continue;
+
+ if (!mr->io_size || mr->io_size == mr->total)
+ continue;
+
+ /* Migrate to the mappable portion */
+ err = __igt_mmap_access_migrate(mixed, ARRAY_SIZE(mixed), mr);
+ if (err)
+ return err;
+
+ err = igt_fill_mappable(mr, &objects);
+ if (err)
+ return err;
+
+ /* Spill into system memory */
+ err = __igt_mmap_access_migrate(mixed, ARRAY_SIZE(mixed),
+ system);
+ if (err)
+ return err;
+
+ igt_make_evictable(&objects);
+
+ /* Migrate to the mappable portion, but force eviction */
+ err = __igt_mmap_access_migrate(single, ARRAY_SIZE(single), mr);
+ if (err)
+ return err;
+
+ igt_close_objects(mr, &objects);
+ }
+
+ return 0;
+}
+
static int __igt_mmap_gpu(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
enum i915_mmap_type type)
@@ -1419,6 +1574,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_offset_exhaustion),
SUBTEST(igt_mmap),
SUBTEST(igt_mmap_access),
+ SUBTEST(igt_mmap_access_migrate),
SUBTEST(igt_mmap_revoke),
SUBTEST(igt_mmap_gpu),
};
--
2.31.1
More information about the Intel-gfx-trybot
mailing list