[Intel-gfx] [PATCH v4 2/2] drm/i915/guc : Decouple logs and ADS from submission

Sujaritha Sundaresan sujaritha.sundaresan at intel.com
Thu Sep 21 18:39:13 UTC 2017


The Additional Data Struct (ADS) contains objects that are required by
guc post FW load and are not necessarily submission-only (although that's
our current only use-case). If in the future we load GuC with submission
disabled to use some other GuC feature we might still end up requiring
something inside the ADS, so it makes more sense for them to be always
created if GuC is loaded.
Similarly, we still want to access GuC logs even if GuC submission is
disable to debug issues with GuC loading or with wathever we're using GuC for.

v2: Decoupling ads together with logs
v3: Group initialization of GuC objects
v4: Rebased

Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
Cc: Oscar Mateo <oscar.mateo at intel.com>
Cc: Anusha Srivatsa <anusha.srivatsa at intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>

Signed-off-by: Sujaritha Sundaresan <sujaritha.sundaresan at intel.com>
---
 drivers/gpu/drm/i915/i915_guc_submission.c | 121 +-----------------------
 drivers/gpu/drm/i915/intel_guc_log.c       |   6 +-
 drivers/gpu/drm/i915/intel_uc.c            | 145 ++++++++++++++++++++++++++---
 drivers/gpu/drm/i915/intel_uc.h            |   6 +-
 4 files changed, 143 insertions(+), 135 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index e191d56..02fb1e6 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -71,13 +71,6 @@
  * ELSP context descriptor dword into Work Item.
  * See guc_wq_item_append()
  *
- * ADS:
- * The Additional Data Struct (ADS) has pointers for different buffers used by
- * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
- * scheduling policies (guc_policies), a structure describing a collection of
- * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
- * its internal state for sleep.
- *
  */
 
 static inline bool is_high_priority(struct i915_guc_client* client)
@@ -898,7 +891,7 @@ static void guc_policy_init(struct guc_policy *policy)
 	policy->policy_flags = 0;
 }
 
-static void guc_policies_init(struct guc_policies *policies)
+void i915_guc_policies_init(struct guc_policies *policies)
 {
 	struct guc_policy *policy;
 	u32 p, i;
@@ -918,97 +911,13 @@ static void guc_policies_init(struct guc_policies *policies)
 }
 
 /*
- * The first 80 dwords of the register state context, containing the
- * execlists and ppgtt registers.
- */
-#define LR_HW_CONTEXT_SIZE	(80 * sizeof(u32))
-
-static int guc_ads_create(struct intel_guc *guc)
-{
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct i915_vma *vma;
-	struct page *page;
-	/* The ads obj includes the struct itself and buffers passed to GuC */
-	struct {
-		struct guc_ads ads;
-		struct guc_policies policies;
-		struct guc_mmio_reg_state reg_state;
-		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
-	} __packed *blob;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
-	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
-	u32 base;
-
-	GEM_BUG_ON(guc->ads_vma);
-
-	vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
-	guc->ads_vma = vma;
-
-	page = i915_vma_first_page(vma);
-	blob = kmap(page);
-
-	/* GuC scheduling policies */
-	guc_policies_init(&blob->policies);
-
-	/* MMIO reg state */
-	for_each_engine(engine, dev_priv, id) {
-		blob->reg_state.white_list[engine->guc_id].mmio_start =
-			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
-
-		/* Nothing to be saved or restored for now. */
-		blob->reg_state.white_list[engine->guc_id].count = 0;
-	}
-
-	/*
-	 * The GuC requires a "Golden Context" when it reinitialises
-	 * engines after a reset. Here we use the Render ring default
-	 * context, which must already exist and be pinned in the GGTT,
-	 * so its address won't change after we've told the GuC where
-	 * to find it. Note that we have to skip our header (1 page),
-	 * because our GuC shared data is there.
-	 */
-	blob->ads.golden_context_lrca =
-		guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
-
-	/*
-	 * The GuC expects us to exclude the portion of the context image that
-	 * it skips from the size it is to read. It starts reading from after
-	 * the execlist context (so skipping the first page [PPHWSP] and 80
-	 * dwords). Weird guc is weird.
-	 */
-	for_each_engine(engine, dev_priv, id)
-		blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
-
-	base = guc_ggtt_offset(vma);
-	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
-	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
-	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
-
-	kunmap(page);
-
-	return 0;
-}
-
-static void guc_ads_destroy(struct intel_guc *guc)
-{
-	i915_vma_unpin_and_release(&guc->ads_vma);
-}
-
-/*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
  */
-int i915_guc_submission_init(struct drm_i915_private *dev_priv)
+int i915_guc_submission_shared_objects_init(struct intel_guc *guc)
 {
-	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_vma *vma;
 	void *vaddr;
-	int ret;
 
 	if (guc->stage_desc_pool)
 		return 0;
@@ -1023,40 +932,20 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 
 	vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
-		ret = PTR_ERR(vaddr);
-		goto err_vma;
+		i915_vma_unpin_and_release(&guc->stage_desc_pool);
+		return PTR_ERR(vaddr);
 	}
 
 	guc->stage_desc_pool_vaddr = vaddr;
 
-	ret = intel_guc_log_create(guc);
-	if (ret < 0)
-		goto err_vaddr;
-
-	ret = guc_ads_create(guc);
-	if (ret < 0)
-		goto err_log;
-
 	ida_init(&guc->stage_ids);
 
 	return 0;
-
-err_log:
-	intel_guc_log_destroy(guc);
-err_vaddr:
-	i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
-err_vma:
-	i915_vma_unpin_and_release(&guc->stage_desc_pool);
-	return ret;
 }
 
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
+void i915_guc_submission_shared_objects_fini(struct intel_guc *guc)
 {
-	struct intel_guc *guc = &dev_priv->guc;
-
 	ida_destroy(&guc->stage_ids);
-	guc_ads_destroy(guc);
-	intel_guc_log_destroy(guc);
 	i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
 	i915_vma_unpin_and_release(&guc->stage_desc_pool);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 16d3b87..9a44d1d 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -502,7 +502,7 @@ static void guc_flush_logs(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-	if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+	if (!NEEDS_GUC_LOADING(dev_priv) || (i915.guc_log_level < 0))
 		return;
 
 	/* First disable the interrupts, will be renabled afterwards */
@@ -641,7 +641,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
 
 void i915_guc_log_register(struct drm_i915_private *dev_priv)
 {
-	if (!i915.enable_guc_submission || i915.guc_log_level < 0)
+	if (!NEEDS_GUC_LOADING(dev_priv) || i915.guc_log_level < 0)
 		return;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
@@ -651,7 +651,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
 
 void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
 {
-	if (!i915.enable_guc_submission)
+	if (!NEEDS_GUC_LOADING(dev_priv))
 		return;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b898486..4fa55b5 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -326,6 +326,130 @@ static int guc_enable_communication(struct intel_guc *guc)
 	return 0;
 }
 
+/*
+ * The first 80 dwords of the register state context, containing the
+ * execlists and ppgtt registers.
+ */
+#define LR_HW_CONTEXT_SIZE	(80 * sizeof(u32))
+
+/*
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
+ * scheduling policies (guc_policies), a structure describing a collection of
+ * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
+ * its internal state for sleep.
+ */
+static int guc_ads_create(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct i915_vma *vma;
+	struct page *page;
+	/* The ads obj includes the struct itself and buffers passed to GuC */
+	struct {
+		struct guc_ads ads;
+		struct guc_policies policies;
+		struct guc_mmio_reg_state reg_state;
+		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
+	} __packed *blob;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 base;
+
+	GEM_BUG_ON(guc->ads_vma);
+
+	vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	guc->ads_vma = vma;
+
+	page = i915_vma_first_page(vma);
+	blob = kmap(page);
+
+	/* GuC scheduling policies */
+	i915_guc_policies_init(&blob->policies);
+
+	/* MMIO reg state */
+	for_each_engine(engine, dev_priv, id) {
+		blob->reg_state.white_list[engine->guc_id].mmio_start =
+			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+		/* Nothing to be saved or restored for now. */
+		blob->reg_state.white_list[engine->guc_id].count = 0;
+	}
+
+	/*
+	 * The GuC requires a "Golden Context" when it reinitialises
+	 * engines after a reset. Here we use the Render ring default
+	 * context, which must already exist and be pinned in the GGTT,
+	 * so its address won't change after we've told the GuC where
+	 * to find it.
+	 */
+	blob->ads.golden_context_lrca =
+		dev_priv->engine[RCS]->status_page.ggtt_offset;
+
+	for_each_engine(engine, dev_priv, id)
+		blob->ads.eng_state_size[engine->guc_id] = engine->context_size;
+
+	base = guc_ggtt_offset(vma);
+	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
+	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
+	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
+
+	kunmap(page);
+
+	return 0;
+}
+
+
+static void guc_ads_destroy(struct intel_guc *guc)
+{
+	i915_vma_unpin_and_release(&guc->ads_vma);
+}
+
+static int guc_shared_objects_init(struct intel_guc *guc)
+{
+	int ret;
+
+	if (guc->ads_vma)
+		return 0;
+
+	ret = intel_guc_log_create(guc);
+	if (ret < 0)
+		return ret;
+
+	ret = guc_ads_create(guc);
+	if (ret < 0)
+		goto err_logs;
+	if (i915.enable_guc_submission) {
+		/*
+		 * This is stuff we need to have available at fw load time
+		 * if we are planning to enable submission later
+		 */
+		ret = i915_guc_submission_shared_objects_init(guc);
+		if (ret)
+			goto err_ads;
+	}
+			
+	return 0;
+			
+err_ads:
+	guc_ads_destroy(guc);
+err_logs:
+	intel_guc_log_destroy(guc);
+
+	return ret;
+}
+
+static void guc_shared_objects_fini(struct intel_guc *guc)
+{
+	if (i915.enable_guc_submission)
+		i915_guc_submission_shared_objects_fini(guc);
+
+	guc_ads_destroy(guc);
+	intel_guc_log_destroy(guc);
+}
+
 static void guc_disable_communication(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -350,15 +474,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 	/* We need to notify the guc whenever we change the GGTT */
 	i915_ggtt_enable_guc(dev_priv);
 
-	if (i915.enable_guc_submission) {
-		/*
-		 * This is stuff we need to have available at fw load time
-		 * if we are planning to enable submission later
-		 */
-		ret = i915_guc_submission_init(dev_priv);
-		if (ret)
-			goto err_guc;
-	}
+	ret = guc_shared_objects_init(guc);
+	if (ret)
+		goto err_guc;
 
 	/* init WOPCM */
 	I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
@@ -379,7 +497,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 		 */
 		ret = __intel_uc_reset_hw(dev_priv);
 		if (ret)
-			goto err_submission;
+			goto err_shared;
 
 		intel_huc_init_hw(&dev_priv->huc);
 		ret = intel_guc_init_hw(&dev_priv->guc);
@@ -424,9 +542,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 	gen9_disable_guc_interrupts(dev_priv);
 err_log_capture:
 	guc_capture_load_err_log(guc);
-err_submission:
-	if (i915.enable_guc_submission)
-		i915_guc_submission_fini(dev_priv);
+err_shared:
+	guc_shared_objects_fini(guc);
 err_guc:
 	i915_ggtt_disable_guc(dev_priv);
 
@@ -460,9 +577,9 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
 
 	if (i915.enable_guc_submission) {
 		gen9_disable_guc_interrupts(dev_priv);
-		i915_guc_submission_fini(dev_priv);
 	}
 
+	guc_shared_objects_fini(&dev_priv->guc);
 	i915_ggtt_disable_guc(dev_priv);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 7703c9a..16bd954 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -230,11 +230,13 @@ static inline void intel_guc_notify(struct intel_guc *guc)
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
 /* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_shared_objects_init(struct intel_guc *guc);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+void i915_guc_submission_shared_objects_fini(struct intel_guc *guc);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+void i915_guc_policies_init(struct guc_policies *policies);
+
 
 /* intel_guc_log.c */
 int intel_guc_log_create(struct intel_guc *guc);
-- 
1.9.1



More information about the Intel-gfx mailing list