[Intel-gfx] [RFC PATCH 05/42] drm/i915/region: support basic eviction
Matthew Auld
matthew.auld at intel.com
Thu Feb 14 14:57:03 UTC 2019
Support basic eviction for regions.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 2 +
drivers/gpu/drm/i915/i915_gem.c | 16 ++++
drivers/gpu/drm/i915/i915_gem_object.h | 7 ++
drivers/gpu/drm/i915/i915_gem_shrinker.c | 59 ++++++++++++++
drivers/gpu/drm/i915/intel_memory_region.c | 40 +++++++++-
drivers/gpu/drm/i915/intel_memory_region.h | 7 ++
.../drm/i915/selftests/intel_memory_region.c | 76 +++++++++++++++++++
drivers/gpu/drm/i915/selftests/mock_region.c | 1 +
8 files changed, 204 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0bea7d889284..3df27769b978 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3196,6 +3196,8 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex);
+int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
+ resource_size_t target);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 92768ab294a4..7f044b643a75 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4095,6 +4095,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
!i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
+ if (obj->memory_region) {
+ mutex_lock(&obj->memory_region->obj_lock);
+
+ switch (obj->mm.madv) {
+ case I915_MADV_WILLNEED:
+ list_move(&obj->region_link, &obj->memory_region->objects);
+ break;
+ default:
+ list_move(&obj->region_link,
+ &obj->memory_region->purgeable);
+ break;
+ }
+
+ mutex_unlock(&obj->memory_region->obj_lock);
+ }
+
args->retained = obj->mm.madv != __I915_MADV_PURGED;
mutex_unlock(&obj->mm.lock);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index ac52f61e8ad1..76947a6f49f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -95,6 +95,13 @@ struct drm_i915_gem_object {
* List of memory region blocks allocated for this object.
*/
struct list_head blocks;
+ /**
+ * Element within memory_region->objects or memory_region->purgeable if
+ * the object is marked as DONTNEED. Access is protected by
+ * memory_region->obj_lock.
+ */
+ struct list_head region_link;
+ struct list_head tmp_link;
struct {
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 6da795c7e62e..713c6c93cf30 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -308,6 +308,65 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
return freed;
}
+int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
+ resource_size_t target)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+ resource_size_t found;
+ LIST_HEAD(purgeable);
+ bool unlock;
+ int err;
+
+ if (!shrinker_lock(i915, 0, &unlock))
+ return 0;
+
+ i915_retire_requests(i915);
+
+ err = 0;
+ found = 0;
+
+ mutex_lock(&mem->obj_lock);
+
+ list_for_each_entry(obj, &mem->purgeable, region_link) {
+ if (!i915_gem_object_has_pages(obj))
+ continue;
+
+ if (READ_ONCE(obj->pin_global))
+ continue;
+
+ if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
+ continue;
+
+ list_add(&obj->tmp_link, &purgeable);
+
+ found += obj->base.size;
+ if (found >= target)
+ goto found;
+ }
+
+ err = -ENOSPC;
+found:
+ mutex_unlock(&mem->obj_lock);
+
+ list_for_each_entry_safe(obj, on, &purgeable, tmp_link) {
+ if (!err)
+ err = i915_gem_object_unbind(obj);
+ if (!err) {
+ __i915_gem_object_put_pages(obj,
+ I915_MM_SHRINKER);
+ if (!i915_gem_object_has_pages(obj))
+ obj->mm.madv = __I915_MADV_PURGED;
+ }
+
+ list_del(&obj->tmp_link);
+ }
+
+ shrinker_unlock(i915, unlock);
+
+ return err;
+}
+
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 405d6d51194f..f7fdc3e942e6 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -89,7 +89,8 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj)
unsigned int order;
u64 block_size;
u64 offset;
-
+ bool retry = false;
+retry:
order = fls(n_pages) - 1;
GEM_BUG_ON(order > mem->mm.max_order);
@@ -98,9 +99,25 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj)
if (!IS_ERR(block))
break;
- /* XXX: some kind of eviction pass, local to the device */
- if (!order--)
- goto err_free_blocks;
+ if (!order--) {
+ resource_size_t target;
+ int err;
+
+ if (retry)
+ goto err_free_blocks;
+
+ target = n_pages * mem->mm.min_size;
+
+ mutex_unlock(&mem->mm_lock);
+ err = i915_gem_shrink_memory_region(mem,
+ target);
+ mutex_lock(&mem->mm_lock);
+ if (err)
+ goto err_free_blocks;
+
+ retry = true;
+ goto retry;
+ }
} while (1);
n_pages -= 1 << order;
@@ -151,6 +168,13 @@ void i915_memory_region_release_buddy(struct intel_memory_region *mem)
i915_gem_buddy_fini(&mem->mm);
}
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ mutex_lock(&obj->memory_region->obj_lock);
+ list_del(&obj->region_link);
+ mutex_unlock(&obj->memory_region->obj_lock);
+}
+
struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
@@ -179,6 +203,10 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
INIT_LIST_HEAD(&obj->blocks);
obj->memory_region = mem;
+ mutex_lock(&mem->obj_lock);
+ list_add(&obj->region_link, &mem->objects);
+ mutex_unlock(&mem->obj_lock);
+
i915_gem_object_set_cache_coherency(obj, obj->cache_level);
return obj;
@@ -205,6 +233,10 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->min_page_size = min_page_size;
mem->ops = ops;
+ mutex_init(&mem->obj_lock);
+ INIT_LIST_HEAD(&mem->objects);
+ INIT_LIST_HEAD(&mem->purgeable);
+
mutex_init(&mem->mm_lock);
if (ops->init) {
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 6d8a954ca75e..b1546afb0b6e 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -99,6 +99,11 @@ struct intel_memory_region {
unsigned int type;
unsigned int instance;
unsigned int id;
+
+ /* Protects access to objects and purgeable */
+ struct mutex obj_lock;
+ struct list_head objects;
+ struct list_head purgeable;
};
int i915_memory_region_init_buddy(struct intel_memory_region *mem);
@@ -108,6 +113,8 @@ int i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj);
void i915_memory_region_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2b8d28216d87..1cea381d2d5e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -93,10 +93,86 @@ static int igt_mock_fill(void *arg)
return err;
}
+static void igt_mark_evictable(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ obj->mm.madv = I915_MADV_DONTNEED;
+ list_move(&obj->region_link, &obj->memory_region->purgeable);
+}
+
+static int igt_mock_shrink(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ resource_size_t target;
+ resource_size_t total;
+ int err = 0;
+
+ target = mem->mm.min_size;
+ total = resource_size(&mem->region);
+ n_objects = total / target;
+
+ while (n_objects--) {
+ obj = i915_gem_object_create_region(mem,
+ target,
+ 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto err_close_objects;
+
+ /*
+ * Make half of the region evictable, though do so in a
+ * horribly fragmented fashion.
+ */
+ if (n_objects % 2)
+ igt_mark_evictable(obj);
+ }
+
+ while (target <= total / 2) {
+ obj = i915_gem_object_create_region(mem, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ /* Provoke the shrinker to start violently swinging its axe! */
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("failed to shrink for target=%pa", &target);
+ goto err_close_objects;
+ }
+
+ /* Again, half of the region should remain evictable */
+ igt_mark_evictable(obj);
+
+ target <<= 1;
+ }
+
+err_close_objects:
+ close_objects(&objects);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_fill),
+ SUBTEST(igt_mock_shrink),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 2c83711f780d..11e9f379aaca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -27,6 +27,7 @@
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.get_pages = i915_memory_region_get_pages_buddy,
.put_pages = i915_memory_region_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
};
static struct drm_i915_gem_object *
--
2.20.1
More information about the Intel-gfx
mailing list