[PATCH 7/7] drm/i915/gem: Migrate to system at dma-buf attach time (v6)
Jason Ekstrand
jason at jlekstrand.net
Thu Jul 15 22:39:00 UTC 2021
From: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Until we support p2p dma or as a complement to that, migrate data
to system memory at dma-buf attach time if possible.
v2:
- Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
selftest to migrate if we are LMEM capable.
v3:
- Migrate also in the pin() callback.
v4:
- Migrate in attach
v5: (jason)
- Lock around the migration
v6: (jason)
- Move the can_migrate check outside the lock
- Rework the selftests to test more migration conditions. In
particular, SMEM, LMEM, and LMEM+SMEM are all checked.
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl at intel.com>
Reported-by: kernel test robot <lkp at intel.com>
Signed-off-by: Jason Ekstrand <jason at jlekstrand.net>
Reviewed-by: Jason Ekstrand <jason at jlekstrand.net>
---
drivers/gpu/drm/i915/gem/i915_gem_create.c | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 23 ++++-
drivers/gpu/drm/i915/gem/i915_gem_object.h | 4 +
.../drm/i915/gem/selftests/i915_gem_dmabuf.c | 89 ++++++++++++++++++-
4 files changed, 112 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 69bf9ec777642..ed6b0d75ff801 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -82,7 +82,7 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj,
return 0;
}
-static struct drm_i915_gem_object *
+struct drm_i915_gem_object *
i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
struct intel_memory_region **placements,
unsigned int n_placements)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 9a655f69a0671..5d438b95826b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -170,8 +170,29 @@ static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+ return -EOPNOTSUPP;
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ continue;
+
+ err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
+ if (err)
+ continue;
- return i915_gem_object_pin_pages_unlocked(obj);
+ err = i915_gem_object_wait_migration(obj, 0);
+ if (err)
+ continue;
+
+ err = i915_gem_object_pin_pages(obj);
+ }
+
+ return err;
}
static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 8be4fadeee487..fbae53bd46384 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -61,6 +61,10 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
const void *data, resource_size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
+ struct intel_memory_region **placements,
+ unsigned int n_placements);
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 4451bbb4917e4..7b7647e7e220a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -85,9 +85,62 @@ static int igt_dmabuf_import_self(void *arg)
return err;
}
-static int igt_dmabuf_import_same_driver(void *arg)
+static int igt_dmabuf_import_same_driver_lmem(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
+ struct drm_i915_gem_object *obj;
+ struct drm_gem_object *import;
+ struct dma_buf *dmabuf;
+ int err;
+
+ if (!i915->mm.regions[INTEL_REGION_LMEM])
+ return 0;
+
+ force_different_devices = true;
+
+ obj = i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
+ if (IS_ERR(obj)) {
+ pr_err("i915_gem_object_create_user failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(obj);
+ goto out_ret;
+ }
+
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(dmabuf);
+ goto out;
+ }
+
+ /* We expect an import of an LMEM-only object to fail with
+ * -EOPNOTSUPP because it can't be migrated to SMEM.
+ */
+ import = i915_gem_prime_import(&i915->drm, dmabuf);
+ if (!IS_ERR(import)) {
+ drm_gem_object_put(import);
+ pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
+ err = -EINVAL;
+ } else if (PTR_ERR(import) != -EOPNOTSUPP) {
+ pr_err("i915_gem_prime_import failed with the wrong err=%d\n",
+ (int)PTR_ERR(import));
+ err = PTR_ERR(import);
+ }
+
+ dma_buf_put(dmabuf);
+out:
+ i915_gem_object_put(obj);
+out_ret:
+ force_different_devices = false;
+ return err;
+}
+
+static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
+ struct intel_memory_region **regions,
+ unsigned int num_regions)
+{
struct drm_i915_gem_object *obj, *import_obj;
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -97,9 +150,15 @@ static int igt_dmabuf_import_same_driver(void *arg)
int err;
force_different_devices = true;
- obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
- if (IS_ERR(obj))
+
+ obj = i915_gem_object_create_user(i915, PAGE_SIZE,
+ regions, num_regions);
+ if (IS_ERR(obj)) {
+ pr_err("i915_gem_object_create_user failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(obj);
goto out_ret;
+ }
dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
@@ -174,6 +233,26 @@ static int igt_dmabuf_import_same_driver(void *arg)
return err;
}
+static int igt_dmabuf_import_same_driver_smem(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
+ return igt_dmabuf_import_same_driver(i915, &smem, 1);
+}
+
+static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *regions[2];
+
+ if (!i915->mm.regions[INTEL_REGION_LMEM])
+ return 0;
+
+ regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
+ regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
+ return igt_dmabuf_import_same_driver(i915, regions, 2);
+}
+
static int igt_dmabuf_import(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -384,7 +463,9 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_dmabuf_export),
- SUBTEST(igt_dmabuf_import_same_driver),
+ SUBTEST(igt_dmabuf_import_same_driver_lmem),
+ SUBTEST(igt_dmabuf_import_same_driver_smem),
+ SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
};
return i915_subtests(tests, i915);
--
2.31.1
More information about the dri-devel
mailing list