[Intel-gfx] [PATCH] drm/i915/gem: Remove shared locking on freeing objects
Matthew Auld
matthew.auld at intel.com
Wed Aug 3 08:56:20 UTC 2022
On 26/07/2022 15:48, Nirmoy Das wrote:
> From: Chris Wilson <chris at chris-wilson.co.uk>
>
> The obj->base.resv may be shared across many objects, some of which may
> still be live and locked, preventing objects from being freed
> indefintely. We could individualise the lock during the free, or rely on
> a freed object having no contention and being able to immediately free
> th pages it owns.
>
> References: https://gitlab.freedesktop.org/drm/intel/-/issues/6469
> Fixes: be7612fd6665 ("drm/i915: Require object lock when freeing pages during destruction")
> Fixes: 6cb12fbda1c2 ("drm/i915: Use trylock instead of blocking lock for __i915_gem_free_objects.")
> Cc: <stable at vger.kernel.org> # v5.17+
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Looks to be missing your sob.
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 16 ++++------------
> drivers/gpu/drm/i915/i915_drv.h | 4 ++--
> 2 files changed, 6 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index ccec4055fde3..389e9f157ca5 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
> */
> void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
> {
> - assert_object_held(obj);
> + assert_object_held_shared(obj);
>
> if (!list_empty(&obj->vma.list)) {
> struct i915_vma *vma;
> @@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
> continue;
> }
>
> - if (!i915_gem_object_trylock(obj, NULL)) {
> - /* busy, toss it back to the pile */
> - if (llist_add(&obj->freed, &i915->mm.free_list))
> - queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
> - continue;
> - }
> -
> __i915_gem_object_pages_fini(obj);
> - i915_gem_object_unlock(obj);
> __i915_gem_free_object(obj);
>
> /* But keep the pointer alive for RCU-protected lookups */
> @@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
> static void __i915_gem_free_work(struct work_struct *work)
> {
> struct drm_i915_private *i915 =
> - container_of(work, struct drm_i915_private, mm.free_work.work);
> + container_of(work, struct drm_i915_private, mm.free_work);
>
> i915_gem_flush_free_objects(i915);
> }
> @@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
> */
>
> if (llist_add(&obj->freed, &i915->mm.free_list))
> - queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
> + queue_work(i915->wq, &i915->mm.free_work);
> }
>
> void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
> @@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
>
> void i915_gem_init__objects(struct drm_i915_private *i915)
> {
> - INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
> + INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
> }
>
> void i915_objects_module_exit(void)
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index d25647be25d1..086bbe8945d6 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -247,7 +247,7 @@ struct i915_gem_mm {
> * List of objects which are pending destruction.
> */
> struct llist_head free_list;
> - struct delayed_work free_work;
> + struct work_struct free_work;
> /**
> * Count of objects pending destructions. Used to skip needlessly
> * waiting on an RCU barrier if no objects are waiting to be freed.
> @@ -1378,7 +1378,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
> * armed the work again.
> */
> while (atomic_read(&i915->mm.free_count)) {
> - flush_delayed_work(&i915->mm.free_work);
> + flush_work(&i915->mm.free_work);
> flush_delayed_work(&i915->bdev.wq);
> rcu_barrier();
> }
More information about the Intel-gfx
mailing list