[Intel-gfx] [PATCH v2 09/22] drm/i915/guc: Update GuC ADS object definition

Daniele Ceraolo Spurio daniele.ceraolospurio at intel.com
Sat Apr 13 01:16:49 UTC 2019



On 4/11/19 1:44 AM, Michal Wajdeczko wrote:
> New GuC firmwares use updated definitions for the Additional Data
> Structures (ADS).
> 
> Signed-off-by: Michal Wajdeczko <michal.wajdeczko at intel.com>
> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> Signed-off-by: Fernando Pacheco <fernando.pacheco at intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> Cc: John Spotswood <john.a.spotswood at intel.com>
> Cc: Tomasz Lis <tomasz.lis at intel.com>
> ---
>   drivers/gpu/drm/i915/intel_engine_cs.c  |  5 ++
>   drivers/gpu/drm/i915/intel_guc_ads.c    | 94 +++++++++++++++----------
>   drivers/gpu/drm/i915/intel_guc_fwif.h   | 89 +++++++++++++----------
>   drivers/gpu/drm/i915/intel_ringbuffer.h |  2 +
>   4 files changed, 117 insertions(+), 73 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index eea9bec04f1b..2a4d1527e171 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -231,6 +231,11 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
>   	}
>   }
>   
> +u32 intel_class_context_size(struct drm_i915_private *dev_priv, u8 class)
> +{
> +	return __intel_engine_context_size(dev_priv, class);
> +}
> +

Any reason not to just rename __intel_engine_context_size to 
intel_class_context_size?

>   static u32 __engine_mmio_base(struct drm_i915_private *i915,
>   			      const struct engine_mmio_base *bases)
>   {
> diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
> index bec62f34b15a..abab5cb6909a 100644
> --- a/drivers/gpu/drm/i915/intel_guc_ads.c
> +++ b/drivers/gpu/drm/i915/intel_guc_ads.c
> @@ -51,7 +51,7 @@ static void guc_policies_init(struct guc_policies *policies)

Possible future improvement: the defs say that the policies page doesn't 
need to be kept in memory because it is copied during load, so we could 
release it after load. It also says that the default values (which I 
believe are what we're setting) are automatically used if the page is 
not set, so we could just get rid of this until we need a customized 
value, but I'm not sure we want to go trough the churn.

>   	policies->max_num_work_items = POLICY_MAX_NUM_WI;
>   
>   	for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
> -		for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> +		for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) {
>   			policy = &policies->policy[p][i];
>   
>   			guc_policy_init(policy);
> @@ -61,6 +61,11 @@ static void guc_policies_init(struct guc_policies *policies)
>   	policies->is_valid = 1;
>   }
>   
> +static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
> +{
> +	memset(pool, 0, num * sizeof(*pool));
> +}
> +
>   /*
>    * The first 80 dwords of the register state context, containing the
>    * execlists and ppgtt registers.
> @@ -75,20 +80,21 @@ static void guc_policies_init(struct guc_policies *policies)
>   int intel_guc_ads_create(struct intel_guc *guc)
>   {
>   	struct drm_i915_private *dev_priv = guc_to_i915(guc);
> -	struct i915_vma *vma, *kernel_ctx_vma;
> -	struct page *page;
> +	struct i915_vma *vma;
>   	/* The ads obj includes the struct itself and buffers passed to GuC */
>   	struct {
>   		struct guc_ads ads;
>   		struct guc_policies policies;
>   		struct guc_mmio_reg_state reg_state;
> +		struct guc_gt_system_info system_info;
> +		struct guc_clients_info clients_info;
> +		struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE];
>   		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
>   	} __packed *blob;
> -	struct intel_engine_cs *engine;
> -	enum intel_engine_id id;
> -	const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
>   	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
>   	u32 base;
> +	u8 engine_class;
> +	int ret;
>   
>   	GEM_BUG_ON(guc->ads_vma);
>   
> @@ -98,51 +104,67 @@ int intel_guc_ads_create(struct intel_guc *guc)
>   
>   	guc->ads_vma = vma;
>   
> -	page = i915_vma_first_page(vma);
> -	blob = kmap(page);
> +	blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
> +	if (IS_ERR(blob)) {
> +		ret = PTR_ERR(blob);
> +		goto err_vma;
> +	}
>   
>   	/* GuC scheduling policies */
>   	guc_policies_init(&blob->policies);
>   
> -	/* MMIO reg state */
> -	for_each_engine(engine, dev_priv, id) {
> -		blob->reg_state.white_list[engine->guc_id].mmio_start =
> -			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
> -
> -		/* Nothing to be saved or restored for now. */
> -		blob->reg_state.white_list[engine->guc_id].count = 0;
> -	}
> -
>   	/*
> -	 * The GuC requires a "Golden Context" when it reinitialises
> -	 * engines after a reset. Here we use the Render ring default
> -	 * context, which must already exist and be pinned in the GGTT,
> -	 * so its address won't change after we've told the GuC where
> -	 * to find it. Note that we have to skip our header (1 page),
> -	 * because our GuC shared data is there.
> +	 * GuC expects a per-engine-class context image and size
> +	 * (minus hwsp and ring context). The context image will be
> +	 * used to reinitialize engines after a reset. It must exist
> +	 * and be pinned in the GGTT, so that the address won't change after
> +	 * we have told GuC where to find it. The context size will be used
> +	 * to validate that the LRC base + size fall within allowed GGTT.
>   	 */
> -	kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
> -	blob->ads.golden_context_lrca =
> -		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
> +	for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
> +		if (engine_class == OTHER_CLASS)
> +			continue;
> +		/*
> +		 * TODO: Set context pointer to default state to allow
> +		 * GuC to re-init guilty contexts after internal reset.
> +		 */
> +		blob->ads.golden_context_lrca[engine_class] = 0;
> +		blob->ads.eng_state_size[engine_class] =
> +			intel_class_context_size(dev_priv, engine_class) - skipped_size;
> +	}
>   
> -	/*
> -	 * The GuC expects us to exclude the portion of the context image that
> -	 * it skips from the size it is to read. It starts reading from after
> -	 * the execlist context (so skipping the first page [PPHWSP] and 80
> -	 * dwords). Weird guc is weird.
> -	 */
> -	for_each_engine(engine, dev_priv, id)
> -		blob->ads.eng_state_size[engine->guc_id] =
> -			engine->context_size - skipped_size;
> +	/* System info */
> +	blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
> +	blob->system_info.rcs_enabled = 1;
> +	blob->system_info.bcs_enabled = 1;
> +
> +	blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
> +	blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
> +	blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
>   
>   	base = intel_guc_ggtt_offset(guc, vma);
> +
> +	/* Clients info  */
> +	guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
> +
> +	blob->clients_info.clients_num = 1;
> +	blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
> +	blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
> +
> +	/* ADS */
>   	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
>   	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
>   	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
> +	blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
> +	blob->ads.clients_info = base + ptr_offset(blob, clients_info);
>   
> -	kunmap(page);
> +	i915_gem_object_unpin_map(guc->ads_vma->obj);
>   
>   	return 0;
> +
> +err_vma:
> +	i915_vma_unpin_and_release(&guc->ads_vma, 0);
> +	return ret;
>   }
>   
>   void intel_guc_ads_destroy(struct intel_guc *guc)
> diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
> index dd9d99dc2aca..68dfeecf7b26 100644
> --- a/drivers/gpu/drm/i915/intel_guc_fwif.h
> +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
> @@ -39,6 +39,9 @@
>   #define GUC_VIDEO_ENGINE2		4
>   #define GUC_MAX_ENGINES_NUM		(GUC_VIDEO_ENGINE2 + 1)
>   
> +#define GUC_MAX_ENGINE_CLASSES		5
> +#define GUC_MAX_INSTANCES_PER_CLASS	4

mmm, inside of guc this is defined as

#if GEN9
#define GUC_MAX_INSTANCES_PER_CLASS 1
#else
#define GUC_MAX_INSTANCES_PER_CLASS 4
#endif

Now, this is only used for the regset, which only matters if guc is 
doing engine resets. Since we're never going to enable guc submission on 
gen9 with the new interface we should be ok, but we should at least add 
a comment.
I've mentioned it to the guc team and they say it is on their list of 
things to fix so the FW should move to a unified define soon-ish.

Daniele

> +
>   #define GUC_DOORBELL_INVALID		256
>   
>   #define GUC_DB_SIZE			(PAGE_SIZE)
> @@ -397,23 +400,19 @@ struct guc_ct_buffer_desc {
>   struct guc_policy {
>   	/* Time for one workload to execute. (in micro seconds) */
>   	u32 execution_quantum;
> -	u32 reserved1;
> -
>   	/* Time to wait for a preemption request to completed before issuing a
>   	 * reset. (in micro seconds). */
>   	u32 preemption_time;
> -
>   	/* How much time to allow to run after the first fault is observed.
>   	 * Then preempt afterwards. (in micro seconds) */
>   	u32 fault_time;
> -
>   	u32 policy_flags;
> -	u32 reserved[2];
> +	u32 reserved[8];
>   } __packed;
>   
>   struct guc_policies {
> -	struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
> -
> +	struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES];
> +	u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
>   	/* In micro seconds. How much time to allow before DPC processing is
>   	 * called back via interrupt (to prevent DPC queue drain starving).
>   	 * Typically 1000s of micro seconds (example only, not granularity). */
> @@ -426,57 +425,73 @@ struct guc_policies {
>   	 * idle. */
>   	u32 max_num_work_items;
>   
> -	u32 reserved[19];
> +	u32 reserved[4];
>   } __packed;
>   
>   /* GuC MMIO reg state struct */
>   
> -#define GUC_REGSET_FLAGS_NONE		0x0
> -#define GUC_REGSET_POWERCYCLE		0x1
> -#define GUC_REGSET_MASKED		0x2
> -#define GUC_REGSET_ENGINERESET		0x4
> -#define GUC_REGSET_SAVE_DEFAULT_VALUE	0x8
> -#define GUC_REGSET_SAVE_CURRENT_VALUE	0x10
>   
> -#define GUC_REGSET_MAX_REGISTERS	25
> -#define GUC_MMIO_WHITE_LIST_START	0x24d0
> -#define GUC_MMIO_WHITE_LIST_MAX		12
> +#define GUC_REGSET_MAX_REGISTERS	64
>   #define GUC_S3_SAVE_SPACE_PAGES		10
>   
> -struct guc_mmio_regset {
> -	struct __packed {
> -		u32 offset;
> -		u32 value;
> -		u32 flags;
> -	} registers[GUC_REGSET_MAX_REGISTERS];
> +struct guc_mmio_reg {
> +	u32 offset;
> +	u32 value;
> +	u32 flags;
> +#define GUC_REGSET_MASKED		(1 << 0)
> +} __packed;
>   
> +struct guc_mmio_regset {
> +	struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS];
>   	u32 values_valid;
>   	u32 number_of_registers;
>   } __packed;
>   
> -/* MMIO registers that are set as non privileged */
> -struct mmio_white_list {
> -	u32 mmio_start;
> -	u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
> -	u32 count;
> +/* GuC register sets */
> +struct guc_mmio_reg_state {
> +	struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
> +	u32 reserved[98];
>   } __packed;
>   
> -struct guc_mmio_reg_state {
> -	struct guc_mmio_regset global_reg;
> -	struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
> -	struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
> +/* HW info */
> +struct guc_gt_system_info {
> +	u32 slice_enabled;
> +	u32 rcs_enabled;
> +	u32 reserved0;
> +	u32 bcs_enabled;
> +	u32 vdbox_enable_mask;
> +	u32 vdbox_sfc_support_mask;
> +	u32 vebox_enable_mask;
> +	u32 reserved[9];
>   } __packed;
>   
> -/* GuC Additional Data Struct */
> +/* Clients info */
> +struct guc_ct_pool_entry {
> +	struct guc_ct_buffer_desc desc;
> +	u32 reserved[7];
> +} __packed;
>   
> +#define GUC_CT_POOL_SIZE	2
> +
> +struct guc_clients_info {
> +	u32 clients_num;
> +	u32 reserved0[13];
> +	u32 ct_pool_addr;
> +	u32 ct_pool_count;
> +	u32 reserved[4];
> +} __packed;
> +
> +/* GuC Additional Data Struct */
>   struct guc_ads {
>   	u32 reg_state_addr;
>   	u32 reg_state_buffer;
> -	u32 golden_context_lrca;
>   	u32 scheduler_policies;
> -	u32 reserved0[3];
> -	u32 eng_state_size[GUC_MAX_ENGINES_NUM];
> -	u32 reserved2[4];
> +	u32 gt_system_info;
> +	u32 clients_info;
> +	u32 control_data;
> +	u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
> +	u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
> +	u32 reserved[16];
>   } __packed;
>   
>   /* GuC logging structures */
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 0dea6c7fd438..584eec348412 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -547,6 +547,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
>   struct i915_request *
>   intel_engine_find_active_request(struct intel_engine_cs *engine);
>   
> +u32 intel_class_context_size(struct drm_i915_private *dev_priv, u8 class);
> +
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>   
>   static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
> 


More information about the Intel-gfx mailing list