[Intel-gfx] [PATCH v4 31/61] drm/i915: Prepare for obj->mm.lock removal

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Wed Nov 4 16:01:15 UTC 2020


Op 02-11-2020 om 11:13 schreef Thomas Hellström:
>
> On 10/16/20 12:44 PM, Maarten Lankhorst wrote:
>> From: Thomas Hellström <thomas.hellstrom at intel.com>
>>
>> Stolen objects need to lock, and we may call put_pages when
>> refcount drops to 0, ensure all calls are handled correctly.
>>
>> Idea-from: Thomas Hellström <thomas.hellstrom at intel.com>
>> Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_object.h | 14 ++++++++++++++
>>   drivers/gpu/drm/i915/gem/i915_gem_pages.c  | 14 ++++++++++++--
>>   drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 10 +++++++++-
>>   3 files changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
>> index 8db84ce09d9f..a3a701d849bf 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
>> @@ -112,6 +112,20 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
>>     #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
>>   +/*
>> + * If more than one potential simultaneous locker, assert held.
>> + */
>> +static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
>> +{
>> +    /*
>> +     * Note mm list lookup is protected by
>> +     * kref_get_unless_zero().
>> +     */
>> +    if (IS_ENABLED(CONFIG_LOCKDEP) &&
>> +        kref_read(&obj->base.refcount) > 0)
>> +        lockdep_assert_held(&obj->mm.lock);
>> +}
>> +
>>   static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
>>                        struct i915_gem_ww_ctx *ww,
>>                        bool intr)
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
>> index ef1d5fabd077..429ec652c394 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
>> @@ -18,7 +18,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
>>       unsigned long supported = INTEL_INFO(i915)->page_sizes;
>>       int i;
>>   -    lockdep_assert_held(&obj->mm.lock);
>> +    assert_object_held_shared(obj);
>>         if (i915_gem_object_is_volatile(obj))
>>           obj->mm.madv = I915_MADV_DONTNEED;
>> @@ -67,6 +67,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
>>           struct list_head *list;
>>           unsigned long flags;
>>   +        lockdep_assert_held(&obj->mm.lock);
>>           spin_lock_irqsave(&i915->mm.obj_lock, flags);
>>             i915->mm.shrink_count++;
>> @@ -88,6 +89,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>>       struct drm_i915_private *i915 = to_i915(obj->base.dev);
>>       int err;
>>   +    assert_object_held_shared(obj);
>> +
>>       if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
>>           drm_dbg(&i915->drm,
>>               "Attempting to obtain a purgeable object\n");
>> @@ -115,6 +118,8 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>>       if (err)
>>           return err;
>>   +    assert_object_held_shared(obj);
>> +
>>       if (unlikely(!i915_gem_object_has_pages(obj))) {
>>           GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
>>   @@ -142,7 +147,7 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
>>   /* Try to discard unwanted pages */
>>   void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
>>   {
>> -    lockdep_assert_held(&obj->mm.lock);
>> +    assert_object_held_shared(obj);
>>       GEM_BUG_ON(i915_gem_object_has_pages(obj));
>>         if (obj->ops->writeback)
>> @@ -175,6 +180,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
>>   {
>>       struct sg_table *pages;
>>   +    assert_object_held_shared(obj);
>> +
>>       pages = fetch_and_zero(&obj->mm.pages);
>>       if (IS_ERR_OR_NULL(pages))
>>           return pages;
>> @@ -202,6 +209,9 @@ int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
>>       if (i915_gem_object_has_pinned_pages(obj))
>>           return -EBUSY;
>>   +    /* May be called by shrinker from within get_pages() (on another bo) */
>> +    assert_object_held_shared(obj);
>> +
>>       i915_gem_object_release_mmap_offset(obj);
>>         /*
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
>> index 9a9242b5a99f..1fd287ce86f4 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
>> @@ -593,11 +593,19 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
>>       cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
>>       i915_gem_object_set_cache_coherency(obj, cache_level);
>>   +    if (WARN_ON(!i915_gem_object_trylock(obj))) {
>> +        err = -EBUSY;
>> +        goto cleanup;
>> +    }
>
> We should probably keep the _isolated annotation here. I think it needs to be used elsewhere anyway.
>
> Otherwise
>
> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>
>
I think it's best not to reintroduce lock_isolated, but open code it with a big comment why it's needed every time. :)



More information about the Intel-gfx mailing list