[RFC PATCH 085/162] drm/i915/region: support basic eviction
Matthew Auld
matthew.auld at intel.com
Fri Nov 27 12:06:01 UTC 2020
Support basic eviction for regions.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
---
.../gpu/drm/i915/gem/i915_gem_object_types.h | 1 +
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 59 ++++++++++++++
drivers/gpu/drm/i915/gem/i915_gem_shrinker.h | 4 +
drivers/gpu/drm/i915/i915_gem.c | 17 +++++
drivers/gpu/drm/i915/intel_memory_region.c | 24 +++++-
.../drm/i915/selftests/intel_memory_region.c | 76 +++++++++++++++++++
6 files changed, 178 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index b172e8cc53ab..6d101275bc9d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -226,6 +226,7 @@ struct drm_i915_gem_object {
* region->obj_lock.
*/
struct list_head region_link;
+ struct list_head tmp_link;
struct sg_table *pages;
void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index e42192834c88..4d346df8fd5b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,6 +16,7 @@
#include "gt/intel_gt_requests.h"
#include "i915_trace.h"
+#include "gt/intel_gt_requests.h"
static bool swap_available(void)
{
@@ -271,6 +272,64 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
return freed;
}
+int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
+ resource_size_t target)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ resource_size_t purged;
+ LIST_HEAD(purgeable);
+ int err = -ENOSPC;
+
+ intel_gt_retire_requests(&i915->gt);
+
+ purged = 0;
+
+ mutex_lock(&mem->objects.lock);
+
+ while ((obj = list_first_entry_or_null(&mem->objects.purgeable,
+ typeof(*obj),
+ mm.region_link))) {
+ list_move_tail(&obj->mm.region_link, &purgeable);
+
+ if (!i915_gem_object_has_pages(obj))
+ continue;
+
+ if (i915_gem_object_is_framebuffer(obj))
+ continue;
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ mutex_unlock(&mem->objects.lock);
+
+ if (!i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE)) {
+ if (i915_gem_object_trylock(obj)) {
+ __i915_gem_object_put_pages(obj);
+ if (!i915_gem_object_has_pages(obj)) {
+ purged += obj->base.size;
+ if (!i915_gem_object_is_volatile(obj))
+ obj->mm.madv = __I915_MADV_PURGED;
+ }
+ i915_gem_object_unlock(obj);
+ }
+ }
+
+ i915_gem_object_put(obj);
+
+ mutex_lock(&mem->objects.lock);
+
+ if (purged >= target) {
+ err = 0;
+ break;
+ }
+ }
+
+ list_splice_tail(&purgeable, &mem->objects.purgeable);
+ mutex_unlock(&mem->objects.lock);
+ return err;
+}
+
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
index 8512470f6fd6..c945f3b587d6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
@@ -7,10 +7,12 @@
#define __I915_GEM_SHRINKER_H__
#include <linux/bits.h>
+#include <linux/types.h>
struct drm_i915_private;
struct i915_gem_ww_ctx;
struct mutex;
+struct intel_memory_region;
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct i915_gem_ww_ctx *ww,
@@ -29,5 +31,7 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex);
+int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
+ resource_size_t target);
#endif /* __I915_GEM_SHRINKER_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2662d679db6e..ef2124c17a7f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1104,6 +1104,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
!i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
+ if (obj->mm.region && i915_gem_object_has_pages(obj)) {
+ mutex_lock(&obj->mm.region->objects.lock);
+
+ switch (obj->mm.madv) {
+ case I915_MADV_WILLNEED:
+ list_move(&obj->mm.region_link,
+ &obj->mm.region->objects.list);
+ break;
+ default:
+ list_move(&obj->mm.region_link,
+ &obj->mm.region->objects.purgeable);
+ break;
+ }
+
+ mutex_unlock(&obj->mm.region->objects.lock);
+ }
+
args->retained = obj->mm.madv != __I915_MADV_PURGED;
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b326993a1026..308f89b87834 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -97,7 +97,8 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
do {
struct i915_buddy_block *block;
unsigned int order;
-
+ bool retry = true;
+retry:
order = fls(n_pages) - 1;
GEM_BUG_ON(order > mem->mm.max_order);
GEM_BUG_ON(order < min_order);
@@ -107,8 +108,25 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
if (!IS_ERR(block))
break;
- if (order-- == min_order)
- goto err_free_blocks;
+ if (order-- == min_order) {
+ resource_size_t target;
+ int err;
+
+ if (!retry)
+ goto err_free_blocks;
+
+ target = n_pages * mem->mm.chunk_size;
+
+ mutex_unlock(&mem->mm_lock);
+ err = i915_gem_shrink_memory_region(mem,
+ target);
+ mutex_lock(&mem->mm_lock);
+ if (err)
+ goto err_free_blocks;
+
+ retry = false;
+ goto retry;
+ }
} while (1);
n_pages -= BIT(order);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 9c20b7065fc5..84525ddba321 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -848,12 +848,88 @@ static int perf_memcpy(void *arg)
return 0;
}
+static void igt_mark_evictable(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ obj->mm.madv = I915_MADV_DONTNEED;
+ list_move(&obj->mm.region_link, &obj->mm.region->objects.purgeable);
+}
+
+static int igt_mock_shrink(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ resource_size_t target;
+ resource_size_t total;
+ int err = 0;
+
+ target = mem->mm.chunk_size;
+ total = resource_size(&mem->region);
+ n_objects = total / target;
+
+ while (n_objects--) {
+ obj = i915_gem_object_create_region(mem,
+ target,
+ 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto err_close_objects;
+
+ /*
+ * Make half of the region evictable, though do so in a
+ * horribly fragmented fashion.
+ */
+ if (n_objects % 2)
+ igt_mark_evictable(obj);
+ }
+
+ while (target <= total / 2) {
+ obj = i915_gem_object_create_region(mem, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ /* Provoke the shrinker to start violently swinging its axe! */
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("failed to shrink for target=%pa", &target);
+ goto err_close_objects;
+ }
+
+ /* Again, half of the region should remain evictable */
+ igt_mark_evictable(obj);
+
+ target <<= 1;
+ }
+
+err_close_objects:
+ close_objects(mem, &objects);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_shrink),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
--
2.26.2
More information about the dri-devel
mailing list