[Intel-gfx] [PATCH 14/22] drm/i915: Throw away the active object retirement complexity
Matthew Auld
matthew.auld at intel.com
Mon Jun 17 13:43:34 UTC 2019
On 17/06/2019 08:19, Chris Wilson wrote:
> Remove the accumulated optimisations that we have for i915_vma_retire
> and reduce it to the bare essential of tracking the active object
> reference. This allows us to only use atomic operations, and so will be
> able to avoid the struct_mutex requirement.
>
> The principal loss here is the shrinker MRU bumping, so now if we have
> to shrink, we will do so in much more random order and more likely to
> try and shrink recently used objects. That is a nuisance, but shrinking
> active objects is a second step we try to avoid and will always be a
> system-wide performance issue.
>
> The other loss is here is in the automatic pruning of the
> reservation_object when idling. This is not as large an issue as upon
> reservation_object introduction as now adding new fences into the object
> replaces already signaled fences, keeping the array compact. But we do
> lose the auto-expiration of stale fences and unused arrays. That may be
> a noticeable problem for which we need to re-implement autopruning.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 -
> drivers/gpu/drm/i915/gem/i915_gem_object.h | 6 ---
> .../gpu/drm/i915/gem/i915_gem_object_types.h | 1 -
> drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 5 +-
> .../drm/i915/gem/selftests/i915_gem_mman.c | 9 ----
> drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +-
> drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 1 -
> drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 32 +++++------
> drivers/gpu/drm/i915/i915_debugfs.c | 8 +--
> drivers/gpu/drm/i915/i915_gem_batch_pool.c | 42 ++++++---------
> drivers/gpu/drm/i915/i915_vma.c | 54 ++++---------------
> 11 files changed, 47 insertions(+), 116 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index bb5b6e63a2cc..252e752da211 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -162,7 +162,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
>
> mutex_lock(&i915->drm.struct_mutex);
>
> - GEM_BUG_ON(i915_gem_object_is_active(obj));
> list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
> GEM_BUG_ON(i915_vma_is_active(vma));
> vma->flags &= ~I915_VMA_PIN_MASK;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 7cb1871d7128..454bfb498001 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -158,12 +158,6 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
> return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
> }
>
> -static inline bool
> -i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
> -{
> - return READ_ONCE(obj->active_count);
> -}
> -
> static inline bool
> i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
> {
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 5b05698619ce..c299fed2c6b1 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -156,7 +156,6 @@ struct drm_i915_gem_object {
>
> /** Count of VMA actually bound by this object */
> atomic_t bind_count;
> - unsigned int active_count;
> /** Count of how many global VMA are currently pinned for use by HW */
> unsigned int pin_global;
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> index 3a926a8755c6..f4677f70cce7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> @@ -230,8 +230,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
> continue;
>
> if (!(shrink & I915_SHRINK_ACTIVE) &&
> - (i915_gem_object_is_active(obj) ||
> - i915_gem_object_is_framebuffer(obj)))
> + (i915_gem_object_is_framebuffer(obj) ||
> + reservation_object_test_signaled_rcu(obj->resv,
> + true)))
Wait, isn't it the other way around, so
!reservation_object_test_signaled_rcu() ?
> continue;
>
> if (!(shrink & I915_SHRINK_BOUND) &&
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index 5c81f4b4813a..2053194a8b70 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -474,15 +474,6 @@ static int igt_mmap_offset_exhaustion(void *arg)
> pr_err("[loop %d] Failed to busy the object\n", loop);
> goto err_obj;
> }
> -
> - /* NB we rely on the _active_ reference to access obj now */
> - GEM_BUG_ON(!i915_gem_object_is_active(obj));
> - err = create_mmap_offset(obj);
> - if (err) {
> - pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
> - loop, err);
> - goto out;
> - }
Do we really want to drop the create_mmap_offset?
> }
>
> out:
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index bbbdc63906c6..cd4cf4d0b30c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1509,9 +1509,7 @@ static void execlists_submit_request(struct i915_request *request)
> static void __execlists_context_fini(struct intel_context *ce)
> {
> intel_ring_put(ce->ring);
> -
> - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
> - i915_gem_object_put(ce->state->obj);
> + i915_vma_put(ce->state);
I guess vma_put atm is still just an alias...though this hunk seems a
little misplaced for this patch? Not sure.
Fwiw,
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
More information about the Intel-gfx
mailing list