[Intel-gfx] [RFC 3/9] drm/i915: Convert requests to use struct fence
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Mon Aug 3 02:18:51 PDT 2015
On 07/28/2015 11:18 AM, John Harrison wrote:
> On 22/07/2015 15:45, Tvrtko Ursulin wrote:
>>
>> On 07/17/2015 03:31 PM, John.C.Harrison at Intel.com wrote:
>>> From: John Harrison <John.C.Harrison at Intel.com>
>>>
>>> There is a construct in the linux kernel called 'struct fence' that
>>> is intended
>>> to keep track of work that is executed on hardware. I.e. it solves
>>> the basic
>>> problem that the drivers 'struct drm_i915_gem_request' is trying to
>>> address. The
>>> request structure does quite a lot more than simply track the
>>> execution progress
>>> so is very definitely still required. However, the basic completion
>>> status side
>>> could be updated to use the ready made fence implementation and gain
>>> all the
>>> advantages that provides.
>>>
>>> This patch makes the first step of integrating a struct fence into
>>> the request.
>>> It replaces the explicit reference count with that of the fence. It also
>>> replaces the 'is completed' test with the fence's equivalent.
>>> Currently, that
>>> simply chains on to the original request implementation. A future
>>> patch will
>>> improve this.
>>>
>>> For: VIZ-5190
>>> Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
>>> ---
>>> drivers/gpu/drm/i915/i915_drv.h | 45 +++++++++++++------------
>>> drivers/gpu/drm/i915/i915_gem.c | 58
>>> ++++++++++++++++++++++++++++++---
>>> drivers/gpu/drm/i915/intel_lrc.c | 1 +
>>> drivers/gpu/drm/i915/intel_ringbuffer.c | 1 +
>>> drivers/gpu/drm/i915/intel_ringbuffer.h | 3 ++
>>> 5 files changed, 80 insertions(+), 28 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_drv.h
>>> b/drivers/gpu/drm/i915/i915_drv.h
>>> index cf6761c..79d346c 100644
>>> --- a/drivers/gpu/drm/i915/i915_drv.h
>>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>>> @@ -50,6 +50,7 @@
>>> #include <linux/intel-iommu.h>
>>> #include <linux/kref.h>
>>> #include <linux/pm_qos.h>
>>> +#include <linux/fence.h>
>>>
>>> /* General customization:
>>> */
>>> @@ -2150,7 +2151,17 @@ void i915_gem_track_fb(struct
>>> drm_i915_gem_object *old,
>>> * initial reference taken using kref_init
>>> */
>>> struct drm_i915_gem_request {
>>> - struct kref ref;
>>> + /**
>>> + * Underlying object for implementing the signal/wait stuff.
>>> + * NB: Never call fence_later() or return this fence object to user
>>> + * land! Due to lazy allocation, scheduler re-ordering,
>>> pre-emption,
>>> + * etc., there is no guarantee at all about the validity or
>>> + * sequentiality of the fence's seqno! It is also unsafe to let
>>> + * anything outside of the i915 driver get hold of the fence object
>>> + * as the clean up when decrementing the reference count requires
>>> + * holding the driver mutex lock.
>>> + */
>>> + struct fence fence;
>>>
>>> /** On Which ring this request was generated */
>>> struct drm_i915_private *i915;
>>> @@ -2227,7 +2238,13 @@ int i915_gem_request_alloc(struct
>>> intel_engine_cs *ring,
>>> struct intel_context *ctx,
>>> struct drm_i915_gem_request **req_out);
>>> void i915_gem_request_cancel(struct drm_i915_gem_request *req);
>>> -void i915_gem_request_free(struct kref *req_ref);
>>> +
>>> +static inline bool i915_gem_request_completed(struct
>>> drm_i915_gem_request *req,
>>> + bool lazy_coherency)
>>> +{
>>> + return fence_is_signaled(&req->fence);
>>> +}
>>> +
>>> int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
>>> struct drm_file *file);
>>>
>>> @@ -2247,7 +2264,7 @@ static inline struct drm_i915_gem_request *
>>> i915_gem_request_reference(struct drm_i915_gem_request *req)
>>> {
>>> if (req)
>>> - kref_get(&req->ref);
>>> + fence_get(&req->fence);
>>> return req;
>>> }
>>>
>>> @@ -2255,7 +2272,7 @@ static inline void
>>> i915_gem_request_unreference(struct drm_i915_gem_request *req)
>>> {
>>> WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
>>> - kref_put(&req->ref, i915_gem_request_free);
>>> + fence_put(&req->fence);
>>> }
>>>
>>> static inline void
>>> @@ -2267,7 +2284,7 @@ i915_gem_request_unreference__unlocked(struct
>>> drm_i915_gem_request *req)
>>> return;
>>>
>>> dev = req->ring->dev;
>>> - if (kref_put_mutex(&req->ref, i915_gem_request_free,
>>> &dev->struct_mutex))
>>> + if (kref_put_mutex(&req->fence.refcount, fence_release,
>>> &dev->struct_mutex))
>>> mutex_unlock(&dev->struct_mutex);
>>> }
>>>
>>> @@ -2284,12 +2301,6 @@ static inline void
>>> i915_gem_request_assign(struct drm_i915_gem_request **pdst,
>>> }
>>>
>>> /*
>>> - * XXX: i915_gem_request_completed should be here but currently
>>> needs the
>>> - * definition of i915_seqno_passed() which is below. It will be
>>> moved in
>>> - * a later patch when the call to i915_seqno_passed() is obsoleted...
>>> - */
>>> -
>>> -/*
>>> * A command that requires special handling by the command parser.
>>> */
>>> struct drm_i915_cmd_descriptor {
>>> @@ -2851,18 +2862,6 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
>>> return (int32_t)(seq1 - seq2) >= 0;
>>> }
>>>
>>> -static inline bool i915_gem_request_completed(struct
>>> drm_i915_gem_request *req,
>>> - bool lazy_coherency)
>>> -{
>>> - u32 seqno;
>>> -
>>> - BUG_ON(req == NULL);
>>> -
>>> - seqno = req->ring->get_seqno(req->ring, lazy_coherency);
>>> -
>>> - return i915_seqno_passed(seqno, req->seqno);
>>> -}
>>> -
>>> int __must_check i915_gem_get_seqno(struct drm_device *dev, u32
>>> *seqno);
>>> int __must_check i915_gem_set_seqno(struct drm_device *dev, u32
>>> seqno);
>>> int __must_check i915_gem_object_get_fence(struct
>>> drm_i915_gem_object *obj);
>>> diff --git a/drivers/gpu/drm/i915/i915_gem.c
>>> b/drivers/gpu/drm/i915/i915_gem.c
>>> index d9f2701..888bb72 100644
>>> --- a/drivers/gpu/drm/i915/i915_gem.c
>>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>>> @@ -2616,12 +2616,14 @@ static void i915_set_reset_status(struct
>>> drm_i915_private *dev_priv,
>>> }
>>> }
>>>
>>> -void i915_gem_request_free(struct kref *req_ref)
>>> +static void i915_gem_request_free(struct fence *req_fence)
>>> {
>>> - struct drm_i915_gem_request *req = container_of(req_ref,
>>> - typeof(*req), ref);
>>> + struct drm_i915_gem_request *req = container_of(req_fence,
>>> + typeof(*req), fence);
>>> struct intel_context *ctx = req->ctx;
>>>
>>> + BUG_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
>>> +
>>> if (req->file_priv)
>>> i915_gem_request_remove_from_client(req);
>>>
>>> @@ -2637,6 +2639,47 @@ void i915_gem_request_free(struct kref *req_ref)
>>> kmem_cache_free(req->i915->requests, req);
>>> }
>>>
>>> +static const char *i915_gem_request_get_driver_name(struct fence
>>> *req_fence)
>>> +{
>>> + return "i915_request";
>>> +}
>>> +
>>> +static const char *i915_gem_request_get_timeline_name(struct fence
>>> *req_fence)
>>> +{
>>> + struct drm_i915_gem_request *req = container_of(req_fence,
>>> + typeof(*req), fence);
>>> + return req->ring->name;
>>> +}
>>> +
>>> +static bool i915_gem_request_enable_signaling(struct fence *req_fence)
>>> +{
>>> + /* Interrupt driven fences are not implemented yet.*/
>>> + WARN(true, "This should not be called!");
>>> + return true;
>>> +}
>>> +
>>> +static bool i915_gem_request_is_completed(struct fence *req_fence)
>>> +{
>>> + struct drm_i915_gem_request *req = container_of(req_fence,
>>> + typeof(*req), fence);
>>> + u32 seqno;
>>> +
>>> + BUG_ON(req == NULL);
>>> +
>>> + seqno = req->ring->get_seqno(req->ring, false/*lazy_coherency*/);
>>> +
>>> + return i915_seqno_passed(seqno, req->seqno);
>>> +}
>>
>> How does this really work? I don't see any fence code calling this,
>> plus, this patch is not doing fence_signal anywhere. So is the whole
>> thing functional at this point?
>
> Do you mean fence code calling i915_gem_request_is_completed? It is a
> callback in the fence ops structure assigned a few lines lower in the
> patch:
> > + .signaled = i915_gem_request_is_completed,
>
> Whenever 'fence_is_signaled(&fence)' is called it basically chains on to
> the .signalled callback inside the fence implementation. When the
> interrupt version comes in with the later patch, this is removed as the
> fence is then tracking its completion state and does not need to do the
> hardware query every time. However, at this point it still does need to
> chain on to reading the HWS. And yes, it is certainly supposed to be
> fully functional at this point! It certainly was when I was testing it.
Yeah look OK. I probably missed something when initialy reading this, or
confused the two variants of "is complete" / "completed".
Tvrtko
More information about the Intel-gfx
mailing list