[PATCH v7 3/8] drm/i915/pxp: Add MTL helpers to submit Heci-Cmd-Packet to GSC

Teres Alexis, Alan Previn alan.previn.teres.alexis at intel.com
Mon Apr 17 17:56:12 UTC 2023


On Mon, 2023-04-10 at 09:10 -0700, Ceraolo Spurio, Daniele wrote:
> 
alan:snip

> > +int
> > +intel_gsc_uc_heci_cmd_submit_nonpriv(struct intel_gsc_uc *gsc,
> > +				     struct intel_context *ce,
> > +				     struct intel_gsc_heci_non_priv_pkt *pkt,
> > +				     u32 *cmd, int timeout_ms)
> > +{
> > +	struct intel_engine_cs *eng;
> 
> We always use "engine" for engine_cs variables. IMO it's better to stick 
> to that here as well for consistency across the code.
alan: will do
> 
> > +	struct i915_gem_ww_ctx ww;
> > +	struct i915_request *rq;
> > +	int err, trials = 0;
> > +
> 
> Is the assumption that the caller is holding a wakeref already? 
> Otherwise we're going to need and engine_pm_get() here (assuming it 
> doesn't interfere with any locking, otherwise it has to be in the caller)
alan: right now the only places this can get called from is via intel_pxp_gsccs_create_session or
intel_pxp_gsccs_end_arb_fw_session. These functions are either being called by intel_pxp_start
or intel_pxp_end. intel_pxp_start calls intel_runtime_pm_get_if_in_use indirectly from the
session-worker and while intel_pxp_end takes an explicit intel_runtime_pm_get (since it is
for suspend/shutdown cleanup and doesn't use the worker). I'm assuming runtime_pm works right?
we have a similar logic across the paths from ADL version where we dont take explicit
engine_pm_get for the termination via VDBOX because its part of the same code paths.

alan:snip

> > +	err = i915_vma_move_to_active(pkt->bb_vma, rq, EXEC_OBJECT_WRITE);
> 
> nit: I don't think we need EXEC_OBJECT_WRITE for the bb as we're not 
> going to write it.
alan: yes - will remove. (had accidentally kept above flag from offline
debugging version of the code that had additional store dwords into bb).

> 
> > +	if (err)
> > +		goto out_rq;
> > +	err = i915_vma_move_to_active(pkt->heci_pkt_vma, rq, EXEC_OBJECT_WRITE);
> > +	if (err)
> > +		goto out_rq;
> > +
> > +	eng = rq->context->engine;
> > +	if (eng->emit_init_breadcrumb) {
> > +		err = eng->emit_init_breadcrumb(rq);
> > +		if (err)
> > +			goto out_rq;
> > +	}
> > +
> > +	err = eng->emit_bb_start(rq, i915_vma_offset(pkt->bb_vma), PAGE_SIZE, 0);
> > +	if (err)
> > +		goto out_rq;
> > +
> > +	err = ce->engine->emit_flush(rq, 0);
> > +	if (err)
> > +		drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
> > +			"Failed emit-flush for gsc-heci-non-priv-pkterr=%d\n", err);
> > +
> > +out_rq:
> > +	i915_request_get(rq);
> > +
> > +	if (unlikely(err))
> > +		i915_request_set_error_once(rq, err);
> > +
> > +	i915_request_add(rq);
> > +
> > +	if (!err) {
> > +		if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
> > +				      msecs_to_jiffies(timeout_ms)) < 0)
> > +			err = -ETIME;
> > +	}
> > +
> > +	i915_request_put(rq);
> > +
> > +out_unpin_ce:
> > +	intel_context_unpin(ce);
> > +out_ww:
> > +	if (err == -EDEADLK) {
> > +		err = i915_gem_ww_ctx_backoff(&ww);
> > +		if (!err) {
> > +			if (++trials < 10)
> > +				goto retry;
> > +			else
> > +				err = EAGAIN;
> > +		}
> > +	}
> > +	i915_gem_ww_ctx_fini(&ww);
> > +
> > +	return err;
> > +}
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
> > index 3d56ae501991..3addce861854 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
> > @@ -8,7 +8,10 @@
> >   
> >   #include <linux/types.h>
> >   
> > +struct i915_vma;
> > +struct intel_context;
> >   struct intel_gsc_uc;
> > +
> >   struct intel_gsc_mtl_header {
> >   	u32 validity_marker;
> >   #define GSC_HECI_VALIDITY_MARKER 0xA578875A
> > @@ -47,7 +50,7 @@ struct intel_gsc_mtl_header {
> >   	 * we distinguish the flags using OUTFLAG or INFLAG
> >   	 */
> >   	u32 flags;
> > -#define GSC_OUTFLAG_MSG_PENDING	1
> > +#define GSC_OUTFLAG_MSG_PENDING 1
> 
> Nit: this change on the define is not really needed
sure - will fix.
> 
> Daniele



More information about the dri-devel mailing list