[Intel-gfx] [PATCH v4 09/13] drm/i915/guc: Prepare to process incoming requests from CT

Michel Thierry michel.thierry at intel.com
Fri Mar 23 23:23:15 UTC 2018


On 3/23/2018 7:47 AM, Michal Wajdeczko wrote:
> Requests are read from CT in the irq handler, but actual processing
> will be done in the work thread. Processing of specific actions will
> be added in the upcoming patches.
> 
> v2: don't use GEM_BUG_ON (Chris)
>      don't kmalloc too large buffer (Michal)
> v3: rebased
> 
> Signed-off-by: Michal Wajdeczko <michal.wajdeczko at intel.com>
> Cc: Oscar Mateo <oscar.mateo at intel.com>
> Cc: Michel Thierry <michel.thierry at intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> ---
>   drivers/gpu/drm/i915/intel_guc_ct.c | 72 ++++++++++++++++++++++++++++++++++++-
>   drivers/gpu/drm/i915/intel_guc_ct.h |  4 +++
>   2 files changed, 75 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
> index 6a9aad0..90aff51 100644
> --- a/drivers/gpu/drm/i915/intel_guc_ct.c
> +++ b/drivers/gpu/drm/i915/intel_guc_ct.c
> @@ -32,10 +32,17 @@ struct ct_request {
>   	u32 *response_buf;
>   };
>   
> +struct ct_incoming_request {
> +	struct list_head link;
> +	u32 data[];
> +};
> +
>   enum { CTB_SEND = 0, CTB_RECV = 1 };
>   
>   enum { CTB_OWNER_HOST = 0 };
>   
> +static void ct_incoming_request_worker_func(struct work_struct *w);
> +
>   /**
>    * intel_guc_ct_init_early - Initialize CT state without requiring device access
>    * @ct: pointer to CT struct
> @@ -47,6 +54,8 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
>   
>   	spin_lock_init(&ct->lock);
>   	INIT_LIST_HEAD(&ct->pending_requests);
> +	INIT_LIST_HEAD(&ct->incoming_requests);
> +	INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
>   }
>   
>   static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
> @@ -632,13 +641,74 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
>   	return 0;
>   }
>   
> +static void ct_dispatch_request(struct intel_guc_ct *ct,
> +				u32 action, u32 len, const u32 *payload)

Would ct_consume_request be better? It would be clear that this is for 
an incoming request.

To me 'dispatch' sounds like it will send something, while here the code 
will be handling the received g2h request.

With ct_dispatch_request, I would think this is at the same level as 
ct_handle_request.

Thanks,

> +{
> +	switch (action) {
> +	default:
> +		DRM_ERROR("CT: unexpected request %x %*phn\n",
> +			  action, 4*len, payload);
> +		break;
> +	}
> +}
> +
> +static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
> +{
> +	unsigned long flags;
> +	struct ct_incoming_request *request;
> +	bool done;
> +
> +	spin_lock_irqsave(&ct->lock, flags);
> +	request = list_first_entry_or_null(&ct->incoming_requests,
> +					   struct ct_incoming_request, link);
> +	if (request)
> +		list_del(&request->link);
> +	done = !!list_empty(&ct->incoming_requests);
> +	spin_unlock_irqrestore(&ct->lock, flags);
> +
> +	if (!request)
> +		return true;
> +
> +	ct_dispatch_request(ct,
> +			    ct_header_get_action(request->data[0]),
> +			    ct_header_get_len(request->data[0]),
> +			    &request->data[1]);
> +
> +	kfree(request);
> +	return done;
> +}
> +
> +static void ct_incoming_request_worker_func(struct work_struct *w)
> +{
> +	struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
> +	bool done;
> +
> +	done = ct_process_incoming_requests(ct);
> +	if (!done)
> +		queue_work(system_unbound_wq, &ct->worker);
> +}
> +
>   static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
>   {
>   	u32 header = msg[0];
> +	u32 len = ct_header_get_len(header) + 1; /* total len with header */
> +	struct ct_incoming_request *request;
> +	unsigned long flags;
>   
>   	GEM_BUG_ON(ct_header_is_response(header));
>   
> -	/* XXX */
> +	request = kmalloc(sizeof(*request) + 4*len, GFP_ATOMIC);
> +	if (unlikely(!request)) {
> +		DRM_ERROR("CT: dropping request %*phn\n", 4*len, msg);
> +		return 0; /* XXX: -ENOMEM ? */
> +	}
> +	memcpy(request->data, msg, 4*len);
> +
> +	spin_lock_irqsave(&ct->lock, flags);
> +	list_add_tail(&request->link, &ct->incoming_requests);
> +	spin_unlock_irqrestore(&ct->lock, flags);
> +
> +	queue_work(system_unbound_wq, &ct->worker);
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
> index 905566b..0be4ce5 100644
> --- a/drivers/gpu/drm/i915/intel_guc_ct.h
> +++ b/drivers/gpu/drm/i915/intel_guc_ct.h
> @@ -73,6 +73,8 @@ struct intel_guc_ct_channel {
>    * @host_channel: main channel used by the host
>    * @lock: spin lock for pending requests list
>    * @pending_requests: list of pending requests
> + * @incoming_requests: list of incoming requests
> + * @worker: worker for handling incoming requests
>    */
>   struct intel_guc_ct {
>   	struct intel_guc_ct_channel host_channel;
> @@ -80,6 +82,8 @@ struct intel_guc_ct {
>   
>   	spinlock_t lock;
>   	struct list_head pending_requests;
> +	struct list_head incoming_requests;
> +	struct work_struct worker;
>   };
>   
>   void intel_guc_ct_init_early(struct intel_guc_ct *ct);
> 


More information about the Intel-gfx mailing list