[Intel-gfx] [PATCH 25/34] drm/i915: Track active timelines

John Harrison John.C.Harrison at Intel.com
Wed Jan 23 22:32:54 UTC 2019


On 1/22/2019 07:17, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-01-22 14:56:32)
>> On 21/01/2019 22:21, Chris Wilson wrote:
>>> Now that we pin timelines around use, we have a clearly defined lifetime
>>> and convenient points at which we can track only the active timelines.
>>> This allows us to reduce the list iteration to only consider those
>>> active timelines and not all.
>>>
>>> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
>>> ---
>>>    drivers/gpu/drm/i915/i915_drv.h      |  2 +-
>>>    drivers/gpu/drm/i915/i915_gem.c      |  4 +--
>>>    drivers/gpu/drm/i915/i915_reset.c    |  2 +-
>>>    drivers/gpu/drm/i915/i915_timeline.c | 39 ++++++++++++++++++----------
>>>    4 files changed, 29 insertions(+), 18 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>>> index c00eaf2889fb..5577e0e1034f 100644
>>> --- a/drivers/gpu/drm/i915/i915_drv.h
>>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>>> @@ -1977,7 +1977,7 @@ struct drm_i915_private {
>>>    
>>>                struct i915_gt_timelines {
>>>                        struct mutex mutex; /* protects list, tainted by GPU */
>>> -                     struct list_head list;
>>> +                     struct list_head active_list;
>>>    
>>>                        /* Pack multiple timelines' seqnos into the same page */
>>>                        spinlock_t hwsp_lock;
>>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>>> index 4e0de22f0166..9c499edb4c13 100644
>>> --- a/drivers/gpu/drm/i915/i915_gem.c
>>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>>> @@ -3246,7 +3246,7 @@ wait_for_timelines(struct drm_i915_private *i915,
>>>                return timeout;
>>>    
>>>        mutex_lock(&gt->mutex);
>>> -     list_for_each_entry(tl, &gt->list, link) {
>>> +     list_for_each_entry(tl, &gt->active_list, link) {
>>>                struct i915_request *rq;
>>>    
>>>                rq = i915_gem_active_get_unlocked(&tl->last_request);
>>> @@ -3274,7 +3274,7 @@ wait_for_timelines(struct drm_i915_private *i915,
>>>    
>>>                /* restart after reacquiring the lock */
>>>                mutex_lock(&gt->mutex);
>>> -             tl = list_entry(&gt->list, typeof(*tl), link);
>>> +             tl = list_entry(&gt->active_list, typeof(*tl), link);
>>>        }
>>>        mutex_unlock(&gt->mutex);
>>>    
>>> diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
>>> index 09edf488f711..9b9169508139 100644
>>> --- a/drivers/gpu/drm/i915/i915_reset.c
>>> +++ b/drivers/gpu/drm/i915/i915_reset.c
>>> @@ -852,7 +852,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
>>>         * No more can be submitted until we reset the wedged bit.
>>>         */
>>>        mutex_lock(&i915->gt.timelines.mutex);
>>> -     list_for_each_entry(tl, &i915->gt.timelines.list, link) {
>>> +     list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
>>>                struct i915_request *rq;
>>>                long timeout;
>>>    
>>> diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
>>> index 69ee33dfa340..007348b1b469 100644
>>> --- a/drivers/gpu/drm/i915/i915_timeline.c
>>> +++ b/drivers/gpu/drm/i915/i915_timeline.c
>>> @@ -117,7 +117,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
>>>                       const char *name,
>>>                       struct i915_vma *hwsp)
>>>    {
>>> -     struct i915_gt_timelines *gt = &i915->gt.timelines;
>>>        void *vaddr;
>>>    
>>>        /*
>>> @@ -161,10 +160,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
>>>    
>>>        i915_syncmap_init(&timeline->sync);
>>>    
>>> -     mutex_lock(&gt->mutex);
>>> -     list_add(&timeline->link, &gt->list);
>>> -     mutex_unlock(&gt->mutex);
>>> -
>>>        return 0;
>>>    }
>>>    
>>> @@ -173,7 +168,7 @@ void i915_timelines_init(struct drm_i915_private *i915)
>>>        struct i915_gt_timelines *gt = &i915->gt.timelines;
>>>    
>>>        mutex_init(&gt->mutex);
>>> -     INIT_LIST_HEAD(&gt->list);
>>> +     INIT_LIST_HEAD(&gt->active_list);
>>>    
>>>        spin_lock_init(&gt->hwsp_lock);
>>>        INIT_LIST_HEAD(&gt->hwsp_free_list);
>>> @@ -182,6 +177,24 @@ void i915_timelines_init(struct drm_i915_private *i915)
>>>        i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
>>>    }
>>>    
>>> +static void timeline_active(struct i915_timeline *tl)
>>> +{
>>> +     struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
>>> +
>>> +     mutex_lock(&gt->mutex);
>>> +     list_add(&tl->link, &gt->active_list);
>>> +     mutex_unlock(&gt->mutex);
>>> +}
>>> +
>>> +static void timeline_inactive(struct i915_timeline *tl)
>>> +{
>>> +     struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
>>> +
>>> +     mutex_lock(&gt->mutex);
>>> +     list_del(&tl->link);
>>> +     mutex_unlock(&gt->mutex);
>>> +}
>> Bike shedding comments only:
>> Would it be better to use a verb suffix? Even though timeline_activate
>> also wouldn't sound perfect. Since it is file local - activate_timeline?
>> Or even just inline to pin/unpin. Unless more gets put into them later..
> Haven't got any plans for more here, yet, and was thinking this is a
> pinned_list myself. I picked active_list since I was using 'active'
> elsewhere for active_ring, active_engines, active_contexts, etc.
>
> I didn't like activate/deactivate enough to switch, and was trying to
> avoid reusing pin/unpin along this path:
> 	i915_timeline_pin -> timeline_pin
> begged confusion
>
> [snip]
>> Never mind the bikeshedding comments:
> There's time enough for someone to open a new pot of paint.
I agree that having a verb in there would make things clearer. Maybe 
timeline_make_(in)active? Or timeline_mark_(in)active?

> -Chris
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx



More information about the Intel-gfx mailing list