[PATCH 5/6] drm/i915/ttm: Implement object migration
Thomas Hellström
thomas.hellstrom at linux.intel.com
Fri Jun 11 15:41:04 UTC 2021
Implement object migration, needed primarily for dma-buf exports of
objects currently residing in LMEM, until we land p2pdma.
There are no users yet of this code.
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_object.c | 100 ++++++++++++++++++
drivers/gpu/drm/i915/gem/i915_gem_object.h | 10 ++
.../gpu/drm/i915/gem/i915_gem_object_types.h | 7 ++
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 50 ++++++---
4 files changed, 155 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 07e8ff9a8aae..1589053ea99e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -475,6 +475,106 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
return obj->mm.n_placements > 1;
}
+bool i915_gem_object_can_migrate_to_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr,
+ unsigned int *placement_index)
+{
+ unsigned int i;
+ unsigned int num_allowed = obj->mm.n_placements;
+
+ if (!i915_gem_object_evictable(obj))
+ return false;
+
+ if (num_allowed == 0 && mr != obj->mm.region)
+ return false;
+
+ if (num_allowed == 1 && mr != obj->mm.placements[0])
+ return false;
+
+ for (i = 0; i < num_allowed; ++i) {
+ if (mr == obj->mm.placements[i]) {
+ if (placement_index)
+ *placement_index = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * i915_gem_object_migrate_to_region_lazy - Lazily migrate an object
+ * @obj: The object to migrate.
+ * @mr: The region to migrate to.
+ *
+ * Check that @obj can migrate to @mr, and update all data necessary to
+ * make that happen on the next get_pages(). We sync and unbind gpu bindings
+ * and put pages. The word "lazy" means that the actual migration blit
+ * is not triggered by this function.
+ *
+ * Return: Zero on success, negative error code on failure.
+ */
+int i915_gem_object_migrate_to_region_lazy(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr)
+{
+ unsigned int index;
+ int ret;
+
+ if (obj->mm.region == mr)
+ return 0;
+
+ if (!i915_gem_object_can_migrate_to_region(obj, mr, &index))
+ return -EINVAL;
+
+ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (ret)
+ return ret;
+
+ ret = __i915_gem_object_put_pages(obj);
+ if (ret)
+ return ret;
+
+ /*
+ * The next get_pages() will pick up the new desired placement
+ * and migrate.
+ */
+ if (obj->mm.override_region) {
+ intel_memory_region_put(obj->mm.override_region);
+ obj->mm.override_region = NULL;
+ }
+
+ if (index != 0)
+ obj->mm.override_region =
+ intel_memory_region_get(obj->mm.placements[index]);
+
+ return 0;
+}
+
+/**
+ * i915_gem_object_migrate_to_region - Migrate an object
+ * @obj: The object to migrate.
+ * @mr: The region to migrate to.
+ *
+ * Check that @obj can migrate to @mr, and migrate the object.
+ * The caller needs to check that the final region was the
+ * desired one since the object may have ended up elsewhere on
+ * lack of space in the desired region, and if there are other
+ * allowed placements.
+ *
+ * Return: Zero on success, negative error code on failure.
+ */
+int i915_gem_object_migrate_to_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr)
+{
+ int ret;
+
+ ret = i915_gem_object_migrate_to_region_lazy(obj, mr);
+ if (ret)
+ return ret;
+
+ return ____i915_gem_object_get_pages(obj);
+}
+
/**
* i915_gem_object_has_struct_page - Whether the object is page-backed
* @obj: The object to query.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 60c760ebde42..a75288b936b0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -592,6 +592,16 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj);
+bool i915_gem_object_can_migrate_to_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr,
+ unsigned int *placement_index);
+
+int i915_gem_object_migrate_to_region_lazy(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr);
+
+int i915_gem_object_migrate_to_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr);
+
#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index fb9ccc3f50e7..d645fa6f4c37 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -264,6 +264,13 @@ struct drm_i915_gem_object {
*/
struct intel_memory_region *region;
+ /**
+ * Override memory region for this object. Use to
+ * override the order of the placement list to migrate
+ * an object to the desired region.
+ */
+ struct intel_memory_region *override_region;
+
/**
* Memory manager node allocated for this object.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 0940c1d7c5e6..7c5e5a698cde 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -103,24 +103,33 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
struct ttm_placement *placement)
{
unsigned int num_allowed = obj->mm.n_placements;
+ struct intel_memory_region *requested_mr;
unsigned int i;
placement->num_placement = 1;
- i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested);
-
- /* Cache this on object? */
- placement->num_busy_placement = num_allowed;
- for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i);
+ placement->placement = requested;
+ placement->busy_placement = busy;
- if (num_allowed == 0) {
- *busy = *requested;
- placement->num_busy_placement = 1;
+ /* We migrate by setting the override region to something sensible. */
+ if (obj->mm.override_region) {
+ requested_mr = obj->mm.override_region;
+ } else if (num_allowed) {
+ requested_mr = obj->mm.placements[0];
+ } else {
+ requested_mr = obj->mm.region;
}
+ i915_ttm_place_from_region(requested_mr, requested);
- placement->placement = requested;
- placement->busy_placement = busy;
+ /* In the future we might want to cache the busy list on the object? */
+ *busy++ = *requested;
+ placement->num_busy_placement = 1;
+
+ for (i = 0; i < num_allowed; ++i) {
+ if (requested_mr != obj->mm.placements[i]) {
+ i915_ttm_place_from_region(obj->mm.placements[i], busy++);
+ placement->num_busy_placement++;
+ }
+ }
}
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
@@ -246,6 +255,23 @@ static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level;
+ unsigned int i;
+
+ /*
+ * If object was moved to an allowable region, update the object
+ * region to consider it migrated. Note that if it's currently not
+ * in an allowable region, it's evicted and we don't update the
+ * object region.
+ */
+ for (i = 0; i < obj->mm.n_placements; ++i) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+
+ if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ mr != obj->mm.region) {
+ intel_memory_region_put(obj->mm.region);
+ obj->mm.region = intel_memory_region_get(mr);
+ }
+ }
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
--
2.31.1
More information about the Intel-gfx-trybot
mailing list