[Intel-gfx] [PATCH 31/46] drm/i915: Stop tracking MRU activity on VMA
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Wed Jan 16 16:27:16 UTC 2019
On 07/01/2019 11:54, Chris Wilson wrote:
> Our goal is to remove struct_mutex and replace it with fine grained
> locking. One of the thorny issues is our eviction logic for reclaiming
> space for an execbuffer (or GTT mmaping, among a few other examples).
> While eviction itself is easy to move under a per-VM mutex, performing
> the activity tracking is less agreeable. One solution is not to do any
> MRU tracking and do a simple coarse evaluation during eviction of
> active/inactive, with a loose temporal ordering of last
> insertion/evaluation. That keeps all the locking constrained to when we
> are manipulating the VM itself, neatly avoiding the tricky handling of
> possible recursive locking during execbuf and elsewhere.
>
> Note that discarding the MRU is unlikely to impact upon our efficiency
> to reclaim VM space (where we think a LRU model is best) as our
> current strategy is to use random idle replacement first before doing
> a search, and over time the use of softpinned 48b per-ppGTT is growing
> (thereby eliminating any need to perform any eviction searches, in
> theory at least).
I've noticed you did some changes since I last reviewed it, but there is
not change log so I have to find them manually. Also, the ones you did
not do I suppose means you disagree with?
On the commit message my comment was that I think you should mention the
removal of active/inactive lists in favour of a single list.
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_gem.c | 10 +--
> drivers/gpu/drm/i915/i915_gem_evict.c | 71 ++++++++++++-------
> drivers/gpu/drm/i915/i915_gem_gtt.c | 15 ++--
> drivers/gpu/drm/i915/i915_gem_gtt.h | 26 +------
> drivers/gpu/drm/i915/i915_gem_shrinker.c | 8 ++-
> drivers/gpu/drm/i915/i915_gem_stolen.c | 3 +-
> drivers/gpu/drm/i915/i915_gpu_error.c | 37 +++++-----
> drivers/gpu/drm/i915/i915_vma.c | 9 +--
> .../gpu/drm/i915/selftests/i915_gem_evict.c | 4 +-
> drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +-
> 10 files changed, 84 insertions(+), 101 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 83fb02dab18c..6ed44aeee583 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -254,10 +254,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
>
> pinned = ggtt->vm.reserved;
> mutex_lock(&dev->struct_mutex);
> - list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
> - if (i915_vma_is_pinned(vma))
> - pinned += vma->node.size;
> - list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
> + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
> if (i915_vma_is_pinned(vma))
> pinned += vma->node.size;
> mutex_unlock(&dev->struct_mutex);
> @@ -1540,13 +1537,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
> GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
>
> for_each_ggtt_vma(vma, obj) {
> - if (i915_vma_is_active(vma))
> - continue;
> -
> if (!drm_mm_node_allocated(&vma->node))
> continue;
>
> - list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
> + list_move_tail(&vma->vm_link, &vma->vm->bound_list);
> }
>
> i915 = to_i915(obj->base.dev);
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 02b83a5ed96c..a76f65fe86be 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -127,14 +127,10 @@ i915_gem_evict_something(struct i915_address_space *vm,
> struct drm_i915_private *dev_priv = vm->i915;
> struct drm_mm_scan scan;
> struct list_head eviction_list;
> - struct list_head *phases[] = {
> - &vm->inactive_list,
> - &vm->active_list,
> - NULL,
> - }, **phase;
> struct i915_vma *vma, *next;
> struct drm_mm_node *node;
> enum drm_mm_insert_mode mode;
> + struct i915_vma *active;
> int ret;
>
> lockdep_assert_held(&vm->i915->drm.struct_mutex);
There is this a comment around here not shown in the diff which talks
about active and inactive lists. Plus it is misleading on the lists
ordering now.
> @@ -170,17 +166,46 @@ i915_gem_evict_something(struct i915_address_space *vm,
> */
> if (!(flags & PIN_NONBLOCK))
> i915_retire_requests(dev_priv);
> - else
> - phases[1] = NULL;
>
> search_again:
> + active = NULL;
> INIT_LIST_HEAD(&eviction_list);
> - phase = phases;
> - do {
> - list_for_each_entry(vma, *phase, vm_link)
> - if (mark_free(&scan, vma, flags, &eviction_list))
> - goto found;
> - } while (*++phase);
> + list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
> + /*
> + * We keep this list in a rough least-recently scanned order
> + * of active elements (inactive elements are cheap to reap).
> + * New entries are added to the end, and we move anything we
> + * scan to the end. The assumption is that the working set
> + * of applications is either steady state (and thanks to the
> + * userspace bo cache it almost always is) or volatile and
> + * frequently replaced after a frame, which are self-evicting!
> + * Given that assumption, the MRU order of the scan list is
> + * fairly static, and keeping it in least-recently scan order
> + * is suitable.
> + *
> + * To notice when we complete one full cycle, we record the
> + * first active element seen, before moving it to the tail.
> + */
This is one change since v1 I spotted and it is a good one.
> + if (i915_vma_is_active(vma)) {
> + if (vma == active) {
> + if (flags & PIN_NONBLOCK)
> + break;
> +
> + active = ERR_PTR(-EAGAIN);
> + }
> +
> + if (active != ERR_PTR(-EAGAIN)) {
> + if (!active)
> + active = vma;
> +
> + list_move_tail(&vma->vm_link, &vm->bound_list);
> + continue;
> + }
> + }
> +
> + if (mark_free(&scan, vma, flags, &eviction_list))
> + goto found;
> + }
>
> /* Nothing found, clean up and bail out! */
> list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
> @@ -389,11 +414,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
> */
> int i915_gem_evict_vm(struct i915_address_space *vm)
> {
> - struct list_head *phases[] = {
> - &vm->inactive_list,
> - &vm->active_list,
> - NULL
> - }, **phase;
> struct list_head eviction_list;
> struct i915_vma *vma, *next;
> int ret;
> @@ -413,16 +433,13 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
> }
>
> INIT_LIST_HEAD(&eviction_list);
> - phase = phases;
> - do {
> - list_for_each_entry(vma, *phase, vm_link) {
> - if (i915_vma_is_pinned(vma))
> - continue;
> + list_for_each_entry(vma, &vm->bound_list, vm_link) {
> + if (i915_vma_is_pinned(vma))
> + continue;
>
> - __i915_vma_pin(vma);
> - list_add(&vma->evict_link, &eviction_list);
> - }
> - } while (*++phase);
> + __i915_vma_pin(vma);
> + list_add(&vma->evict_link, &eviction_list);
> + }
>
> ret = 0;
> list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 45c7c8b6c7c8..ad4ef8980b97 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -492,9 +492,8 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
>
> stash_init(&vm->free_pages);
>
> - INIT_LIST_HEAD(&vm->active_list);
> - INIT_LIST_HEAD(&vm->inactive_list);
> INIT_LIST_HEAD(&vm->unbound_list);
> + INIT_LIST_HEAD(&vm->bound_list);
> }
>
> static void i915_address_space_fini(struct i915_address_space *vm)
> @@ -2112,8 +2111,7 @@ void i915_ppgtt_close(struct i915_address_space *vm)
> static void ppgtt_destroy_vma(struct i915_address_space *vm)
> {
> struct list_head *phases[] = {
> - &vm->active_list,
> - &vm->inactive_list,
> + &vm->bound_list,
> &vm->unbound_list,
> NULL,
> }, **phase;
> @@ -2136,8 +2134,7 @@ void i915_ppgtt_release(struct kref *kref)
>
> ppgtt_destroy_vma(&ppgtt->vm);
>
> - GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
> - GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
> + GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
> GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
>
> ppgtt->vm.cleanup(&ppgtt->vm);
> @@ -2802,8 +2799,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
> mutex_lock(&dev_priv->drm.struct_mutex);
> i915_gem_fini_aliasing_ppgtt(dev_priv);
>
> - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
> - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
> + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
> WARN_ON(i915_vma_unbind(vma));
>
> if (drm_mm_node_allocated(&ggtt->error_capture))
> @@ -3514,8 +3510,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
> ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
>
> /* clflush objects bound into the GGTT and rebind them. */
> - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
> - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
> + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
> struct drm_i915_gem_object *obj = vma->obj;
>
> if (!(vma->flags & I915_VMA_GLOBAL_BIND))
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index a0039ea97cdc..bd679c8c56dd 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -299,32 +299,12 @@ struct i915_address_space {
> struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
>
> /**
> - * List of objects currently involved in rendering.
> - *
> - * Includes buffers having the contents of their GPU caches
> - * flushed, not necessarily primitives. last_read_req
> - * represents when the rendering involved will be completed.
> - *
> - * A reference is held on the buffer while on this list.
> + * List of vma currently bound.
> */
> - struct list_head active_list;
> + struct list_head bound_list;
>
> /**
> - * LRU list of objects which are not in the ringbuffer and
> - * are ready to unbind, but are still in the GTT.
> - *
> - * last_read_req is NULL while an object is in this list.
> - *
> - * A reference is not held on the buffer while on this list,
> - * as merely being GTT-bound shouldn't prevent its being
> - * freed, and we'll pull it off the list in the free path.
> - */
> - struct list_head inactive_list;
> -
> - /**
> - * List of vma that have been unbound.
> - *
> - * A reference is not held on the buffer while on this list.
> + * List of vma that are not unbound.
> */
> struct list_head unbound_list;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index e9a79059bc43..1531534eea02 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -490,9 +490,13 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>
> /* We also want to clear any cached iomaps as they wrap vmap */
> list_for_each_entry_safe(vma, next,
> - &i915->ggtt.vm.inactive_list, vm_link) {
> + &i915->ggtt.vm.bound_list, vm_link) {
> unsigned long count = vma->node.size >> PAGE_SHIFT;
> - if (vma->iomap && i915_vma_unbind(vma) == 0)
> +
> + if (!vma->iomap || i915_vma_is_active(vma))
> + continue;
> +
> + if (i915_vma_unbind(vma) == 0)
> freed_pages += count;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 2f756a97689a..75b97d71f072 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -702,7 +702,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
> vma->pages = obj->mm.pages;
> vma->flags |= I915_VMA_GLOBAL_BIND;
> __i915_vma_set_map_and_fenceable(vma);
> - list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
> +
> + list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
>
> spin_lock(&dev_priv->mm.obj_lock);
> list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 5533a741abeb..6e975c43dae9 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1124,7 +1124,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>
> static u32 capture_error_bo(struct drm_i915_error_buffer *err,
> int count, struct list_head *head,
> - bool pinned_only)
> + bool active_only, bool pinned_only)
Here I suggested having flags instead of two booleans would be more
readable at the call sites.
> {
> struct i915_vma *vma;
> int i = 0;
> @@ -1133,6 +1133,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
> if (!vma->obj)
> continue;
>
> + if (active_only && !i915_vma_is_active(vma))
> + continue;
> +
> if (pinned_only && !i915_vma_is_pinned(vma))
> continue;
>
> @@ -1610,14 +1613,16 @@ static void gem_capture_vm(struct i915_gpu_state *error,
> int count;
>
> count = 0;
> - list_for_each_entry(vma, &vm->active_list, vm_link)
> - count++;
> + list_for_each_entry(vma, &vm->bound_list, vm_link)
> + if (i915_vma_is_active(vma))
> + count++;
>
> active_bo = NULL;
> if (count)
> active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
> if (active_bo)
> - count = capture_error_bo(active_bo, count, &vm->active_list, false);
> + count = capture_error_bo(active_bo, count, &vm->bound_list,
> + true, false);
> else
> count = 0;
>
> @@ -1655,28 +1660,20 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
> struct i915_address_space *vm = &error->i915->ggtt.vm;
> struct drm_i915_error_buffer *bo;
> struct i915_vma *vma;
> - int count_inactive, count_active;
> -
> - count_inactive = 0;
> - list_for_each_entry(vma, &vm->inactive_list, vm_link)
> - count_inactive++;
> + int count;
>
> - count_active = 0;
> - list_for_each_entry(vma, &vm->active_list, vm_link)
> - count_active++;
> + count = 0;
> + list_for_each_entry(vma, &vm->bound_list, vm_link)
> + count++;
>
> bo = NULL;
> - if (count_inactive + count_active)
> - bo = kcalloc(count_inactive + count_active,
> - sizeof(*bo), GFP_ATOMIC);
> + if (count)
> + bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
> if (!bo)
> return;
>
> - count_inactive = capture_error_bo(bo, count_inactive,
> - &vm->active_list, true);
> - count_active = capture_error_bo(bo + count_inactive, count_active,
> - &vm->inactive_list, true);
> - error->pinned_bo_count = count_inactive + count_active;
> + error->pinned_bo_count =
> + capture_error_bo(bo, count, &vm->bound_list, false, true);
> error->pinned_bo = bo;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 5b4d78cdb4ca..7de28baffb8f 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -79,9 +79,6 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
> if (--vma->active_count)
> return;
>
> - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> - list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
> -
> GEM_BUG_ON(!i915_gem_object_is_active(obj));
> if (--obj->active_count)
> return;
> @@ -659,7 +656,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
> GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
>
> - list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
> + list_move_tail(&vma->vm_link, &vma->vm->bound_list);
>
> if (vma->obj) {
> struct drm_i915_gem_object *obj = vma->obj;
> @@ -1003,10 +1000,8 @@ int i915_vma_move_to_active(struct i915_vma *vma,
> * add the active reference first and queue for it to be dropped
> * *last*.
> */
> - if (!i915_gem_active_isset(active) && !vma->active_count++) {
> - list_move_tail(&vma->vm_link, &vma->vm->active_list);
> + if (!i915_gem_active_isset(active) && !vma->active_count++)
> obj->active_count++;
> - }
> i915_gem_active_set(active, rq);
> GEM_BUG_ON(!i915_vma_is_active(vma));
> GEM_BUG_ON(!obj->active_count);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> index e1ff6a1c2cb0..9d0fe8aac219 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> @@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
> return -EINVAL;
> }
>
> - if (list_empty(&i915->ggtt.vm.inactive_list)) {
> + if (list_empty(&i915->ggtt.vm.bound_list)) {
> pr_err("No objects on the GGTT inactive list!\n");
> return -EINVAL;
> }
> @@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
> {
> struct i915_vma *vma;
>
> - list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
> + list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
> i915_vma_unpin(vma);
> }
>
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> index fea8ab14e79d..852b06cb50a0 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> @@ -1237,7 +1237,7 @@ static void track_vma_bind(struct i915_vma *vma)
> __i915_gem_object_pin_pages(obj);
>
> vma->pages = obj->mm.pages;
> - list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
> + list_move_tail(&vma->vm_link, &vma->vm->bound_list);
> }
>
> static int exercise_mock(struct drm_i915_private *i915,
>
The rest looks okay.
Regards,
Tvrtko
More information about the Intel-gfx
mailing list