[Intel-gfx] [RFC] drm/i915/guc: Smurf the GuC context

Oscar Mateo oscar.mateo at intel.com
Fri Feb 24 14:33:55 UTC 2017


While trying to get up to speed with the GuC, the thing that bothers me the
most is that we have so many things called "context" in the driver, that when
I talk with someone about it we invariably end up sounding like The Smurfs
(update the context ID in the GuC context so that the HW context gets submitted
correctly!").

Currently, we have the following structs in our driver:

GEM contexts (i915_gem_context)
HW contexts (intel_context)
GuC execlist contexts (guc_execlist_context)
GuC contexts (guc_ctx_desc)

The first three are all somehow related, while the fourth one is a completely
different thing (related to GuC state, instead of HW state).

This patch tries to avoid this situation by renaming the GuC context (and all
related fields) with something different. Obviously, "smurf" is just an attempt
to spark the dicussion, so that we can find the right name ("submitter"? "state"?
"interface")?

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |   4 +-
 drivers/gpu/drm/i915/i915_guc_submission.c | 110 ++++++++++++++++-------------
 drivers/gpu/drm/i915/intel_guc_fwif.h      |  34 ++++-----
 drivers/gpu/drm/i915/intel_guc_loader.c    |   4 +-
 drivers/gpu/drm/i915/intel_uc.h            |   6 +-
 5 files changed, 84 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 655e60d..e6c853f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2495,8 +2495,8 @@ static void i915_guc_client_info(struct seq_file *m,
 	enum intel_engine_id id;
 	uint64_t tot = 0;
 
-	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
-		client->priority, client->ctx_index, client->proc_desc_offset);
+	seq_printf(m, "\tPriority %d, GuC smurf index: %u, PD offset 0x%x\n",
+		client->priority, client->smurf_index, client->proc_desc_offset);
 	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
 		client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
 	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index beec88a..f9b5ef9 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -28,16 +28,27 @@
 /**
  * DOC: GuC-based command submission
  *
- * i915_guc_client:
- * We use the term client to avoid confusion with contexts. A i915_guc_client is
- * equivalent to GuC object guc_context_desc. This context descriptor is
- * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
- * and workqueue for it. Also the process descriptor (guc_process_desc), which
- * is mapped to client space. So the client can write Work Item then ring the
- * doorbell.
+ * GuC client:
+ * A i915_guc_client refers to a unique user of the GuC. Currently, there is
+ * only one of these (the execbuf_client) and this one is charged with all
+ * submissions to the GuC. This struct is the owner of a doorbell, a process
+ * descriptor and a workqueue (all of them inside a single gem object that
+ * contains all required pages for these elements).
  *
- * To simplify the implementation, we allocate one gem object that contains all
- * pages for doorbell, process descriptor and workqueue.
+ * Smurf descriptor:
+ * Technically, this is known as a "GuC Context" descriptor in the specs, but we
+ * use the term smurf to avoid confusion with all the other things already
+ * named "context" in the driver. During initialization, the driver allocates a
+ * static pool of 1024 such descriptors, and shares them with the GuC.
+ * Currently, there exists a 1:1 mapping between a i915_guc_client and a
+ * guc_smurf_desc (via the client's smurf_index), so effectively only
+ * one guc_smurf_desc gets used (since we only have one client at the
+ * moment). This smurf descriptor lets the GuC know about the doorbell and
+ * workqueue. Theoretically, it also lets the GuC know about the LRC, but we
+ * employ a kind of proxy submission where the LRC of the workload to be
+ * submitted is sent via the work item instead (the single guc_smurf_desc
+ * associated to execbuf client contains the LRCs of the default kernel context,
+ * but these are essentially unused).
  *
  * The Scratch registers:
  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -52,7 +63,7 @@
  * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
  * mapped into process space.
  *
- * Work Items:
+ * Workqueue:
  * There are several types of work items that the host may place into a
  * workqueue, each with its own requirements and limitations. Currently only
  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
@@ -71,7 +82,7 @@ static int guc_allocate_doorbell(struct intel_guc *guc,
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
-		client->ctx_index
+		client->smurf_index
 	};
 
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -82,7 +93,7 @@ static int guc_release_doorbell(struct intel_guc *guc,
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
-		client->ctx_index
+		client->smurf_index
 	};
 
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -99,10 +110,10 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
 				  struct i915_guc_client *client,
 				  u16 new_id)
 {
-	struct sg_table *sg = guc->ctx_pool_vma->pages;
+	struct sg_table *sg = guc->smurf_pool_vma->pages;
 	void *doorbell_bitmap = guc->doorbell_bitmap;
 	struct guc_doorbell_info *doorbell;
-	struct guc_context_desc desc;
+	struct guc_smurf_desc desc;
 	size_t len;
 
 	doorbell = client->vaddr + client->doorbell_offset;
@@ -117,12 +128,12 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
 
 	/* Update the GuC's idea of the doorbell ID */
 	len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
-			     sizeof(desc) * client->ctx_index);
+			     sizeof(desc) * client->smurf_index);
 	if (len != sizeof(desc))
 		return -EFAULT;
 	desc.db_id = new_id;
 	len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
-			     sizeof(desc) * client->ctx_index);
+			     sizeof(desc) * client->smurf_index);
 	if (len != sizeof(desc))
 		return -EFAULT;
 
@@ -217,7 +228,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
 	desc->wq_base_addr = 0;
 	desc->db_base_addr = 0;
 
-	desc->context_id = client->ctx_index;
+	desc->smurf_id = client->smurf_index;
 	desc->wq_size_bytes = client->wq_size;
 	desc->wq_status = WQ_STATUS_ACTIVE;
 	desc->priority = client->priority;
@@ -231,21 +242,21 @@ static void guc_proc_desc_init(struct intel_guc *guc,
  * write queue, etc).
  */
 
-static void guc_ctx_desc_init(struct intel_guc *guc,
+static void guc_smurf_desc_init(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx = client->owner;
-	struct guc_context_desc desc;
+	struct guc_smurf_desc desc;
 	struct sg_table *sg;
 	unsigned int tmp;
 	u32 gfx_addr;
 
 	memset(&desc, 0, sizeof(desc));
 
-	desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
-	desc.context_id = client->ctx_index;
+	desc.attribute = GUC_SMURF_DESC_ATTR_ACTIVE | GUC_SMURF_DESC_ATTR_KERNEL;
+	desc.smurf_id = client->smurf_index;
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
@@ -267,10 +278,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
 		lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
 		/* The state page is after PPHWSP */
-		lrc->ring_lcra =
+		lrc->ring_lrca =
 			guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
-		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-				(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
+		lrc->smurf_and_engine_id =
+			(client->smurf_index << GUC_ELC_SMURF_ID_OFFSET) |
+			(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
 
 		lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
 		lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
@@ -305,22 +317,22 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
 	desc.desc_private = (uintptr_t)client;
 
 	/* Pool context is pinned already */
-	sg = guc->ctx_pool_vma->pages;
+	sg = guc->smurf_pool_vma->pages;
 	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
-			     sizeof(desc) * client->ctx_index);
+			     sizeof(desc) * client->smurf_index);
 }
 
-static void guc_ctx_desc_fini(struct intel_guc *guc,
+static void guc_smurf_desc_fini(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
-	struct guc_context_desc desc;
+	struct guc_smurf_desc desc;
 	struct sg_table *sg;
 
 	memset(&desc, 0, sizeof(desc));
 
-	sg = guc->ctx_pool_vma->pages;
+	sg = guc->smurf_pool_vma->pages;
 	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
-			     sizeof(desc) * client->ctx_index);
+			     sizeof(desc) * client->smurf_index);
 }
 
 /**
@@ -612,9 +624,9 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 
 	i915_vma_unpin_and_release(&client->vma);
 
-	if (client->ctx_index != GUC_INVALID_CTX_ID) {
-		guc_ctx_desc_fini(guc, client);
-		ida_simple_remove(&guc->ctx_ids, client->ctx_index);
+	if (client->smurf_index != GUC_INVALID_SMURF_ID) {
+		guc_smurf_desc_fini(guc, client);
+		ida_simple_remove(&guc->smurf_ids, client->smurf_index);
 	}
 
 	kfree(client);
@@ -710,10 +722,10 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
 	client->priority = priority;
 	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
 
-	client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
-			GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
-	if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
-		client->ctx_index = GUC_INVALID_CTX_ID;
+	client->smurf_index = (uint32_t)ida_simple_get(&guc->smurf_ids, 0,
+			GUC_MAX_GPU_SMURFS, GFP_KERNEL);
+	if (client->smurf_index >= GUC_MAX_GPU_SMURFS) {
+		client->smurf_index = GUC_INVALID_SMURF_ID;
 		goto err;
 	}
 
@@ -753,7 +765,7 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
 		client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
 	guc_proc_desc_init(guc, client);
-	guc_ctx_desc_init(guc, client);
+	guc_smurf_desc_init(guc, client);
 
 	/* For runtime client allocation we need to enable the doorbell. Not
 	 * required yet for the static execbuf_client as this special kernel
@@ -762,8 +774,8 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
 	 * guc_update_doorbell_id(guc, client, db_id);
 	 */
 
-	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
-		priority, client, client->engines, client->ctx_index);
+	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: smurf_index %u\n",
+		priority, client, client->engines, client->smurf_index);
 	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
 		client->doorbell_id, client->doorbell_offset);
 
@@ -873,8 +885,8 @@ static void guc_addon_create(struct intel_guc *guc)
  */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 {
-	const size_t ctxsize = sizeof(struct guc_context_desc);
-	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
+	const size_t ctxsize = sizeof(struct guc_smurf_desc);
+	const size_t poolsize = GUC_MAX_GPU_SMURFS * ctxsize;
 	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_vma *vma;
@@ -889,15 +901,15 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 	if (!i915.enable_guc_submission)
 		return 0; /* not enabled  */
 
-	if (guc->ctx_pool_vma)
+	if (guc->smurf_pool_vma)
 		return 0; /* already allocated */
 
 	vma = intel_guc_allocate_vma(guc, gemsize);
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
-	guc->ctx_pool_vma = vma;
-	ida_init(&guc->ctx_ids);
+	guc->smurf_pool_vma = vma;
+	ida_init(&guc->smurf_ids);
 	intel_guc_log_create(guc);
 	guc_addon_create(guc);
 
@@ -985,9 +997,9 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
 	i915_vma_unpin_and_release(&guc->ads_vma);
 	i915_vma_unpin_and_release(&guc->log.vma);
 
-	if (guc->ctx_pool_vma)
-		ida_destroy(&guc->ctx_ids);
-	i915_vma_unpin_and_release(&guc->ctx_pool_vma);
+	if (guc->smurf_pool_vma)
+		ida_destroy(&guc->smurf_ids);
+	i915_vma_unpin_and_release(&guc->smurf_pool_vma);
 }
 
 /**
@@ -1042,5 +1054,3 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
-
-
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 25691f0..e2a8a7b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -32,8 +32,8 @@
 #define GUC_CTX_PRIORITY_NORMAL		3
 #define GUC_CTX_PRIORITY_NUM		4
 
-#define GUC_MAX_GPU_CONTEXTS		1024
-#define	GUC_INVALID_CTX_ID		GUC_MAX_GPU_CONTEXTS
+#define GUC_MAX_GPU_SMURFS		1024
+#define	GUC_INVALID_SMURF_ID	GUC_MAX_GPU_SMURFS
 
 #define GUC_RENDER_ENGINE		0
 #define GUC_VIDEO_ENGINE		1
@@ -68,14 +68,14 @@
 #define GUC_DOORBELL_ENABLED		1
 #define GUC_DOORBELL_DISABLED		0
 
-#define GUC_CTX_DESC_ATTR_ACTIVE	(1 << 0)
-#define GUC_CTX_DESC_ATTR_PENDING_DB	(1 << 1)
-#define GUC_CTX_DESC_ATTR_KERNEL	(1 << 2)
-#define GUC_CTX_DESC_ATTR_PREEMPT	(1 << 3)
-#define GUC_CTX_DESC_ATTR_RESET		(1 << 4)
-#define GUC_CTX_DESC_ATTR_WQLOCKED	(1 << 5)
-#define GUC_CTX_DESC_ATTR_PCH		(1 << 6)
-#define GUC_CTX_DESC_ATTR_TERMINATED	(1 << 7)
+#define GUC_SMURF_DESC_ATTR_ACTIVE	(1 << 0)
+#define GUC_SMURF_DESC_ATTR_PENDING_DB	(1 << 1)
+#define GUC_SMURF_DESC_ATTR_KERNEL	(1 << 2)
+#define GUC_SMURF_DESC_ATTR_PREEMPT	(1 << 3)
+#define GUC_SMURF_DESC_ATTR_RESET		(1 << 4)
+#define GUC_SMURF_DESC_ATTR_WQLOCKED	(1 << 5)
+#define GUC_SMURF_DESC_ATTR_PCH		(1 << 6)
+#define GUC_SMURF_DESC_ATTR_TERMINATED	(1 << 7)
 
 /* The guc control data is 10 DWORDs */
 #define GUC_CTL_CTXINFO			0
@@ -256,7 +256,7 @@ struct guc_wq_item {
 } __packed;
 
 struct guc_process_desc {
-	u32 context_id;
+	u32 smurf_id;
 	u64 db_base_addr;
 	u32 head;
 	u32 tail;
@@ -270,15 +270,15 @@ struct guc_process_desc {
 } __packed;
 
 /* engine id and context id is packed into guc_execlist_context.context_id*/
-#define GUC_ELC_CTXID_OFFSET		0
+#define GUC_ELC_SMURF_ID_OFFSET	0
 #define GUC_ELC_ENGINE_OFFSET		29
 
 /* The execlist context including software and HW information */
 struct guc_execlist_context {
 	u32 context_desc;
-	u32 context_id;
+	u32 smurf_and_engine_id;
 	u32 ring_status;
-	u32 ring_lcra;
+	u32 ring_lrca;
 	u32 ring_begin;
 	u32 ring_end;
 	u32 ring_next_free_location;
@@ -289,10 +289,10 @@ struct guc_execlist_context {
 	u16 engine_submit_queue_count;
 } __packed;
 
-/*Context descriptor for communicating between uKernel and Driver*/
-struct guc_context_desc {
+/* Smurf descriptor for communicating between uKernel and Driver */
+struct guc_smurf_desc {
 	u32 sched_common_area;
-	u32 context_id;
+	u32 smurf_id;
 	u32 pas_id;
 	u8 engines_used;
 	u64 db_trigger_cpu;
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 9885f76..e0eee7f 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -221,8 +221,8 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
 
 	/* If GuC submission is enabled, set up additional parameters here */
 	if (i915.enable_guc_submission) {
-		u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
-		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
+		u32 pgs = guc_ggtt_offset(dev_priv->guc.smurf_pool_vma);
+		u32 ctx_in_16 = GUC_MAX_GPU_SMURFS / 16;
 
 		pgs >>= PAGE_SHIFT;
 		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index d74f4d3..0306b11 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -72,7 +72,7 @@ struct i915_guc_client {
 
 	uint32_t engines;		/* bitmap of (host) engine ids	*/
 	uint32_t priority;
-	uint32_t ctx_index;
+	uint32_t smurf_index;
 	uint32_t proc_desc_offset;
 
 	uint32_t doorbell_offset;
@@ -154,8 +154,8 @@ struct intel_guc {
 	bool interrupts_enabled;
 
 	struct i915_vma *ads_vma;
-	struct i915_vma *ctx_pool_vma;
-	struct ida ctx_ids;
+	struct i915_vma *smurf_pool_vma;
+	struct ida smurf_ids;
 
 	struct i915_guc_client *execbuf_client;
 
-- 
1.9.1



More information about the Intel-gfx mailing list