[Intel-gfx] [PATCH 22/30] drm/i915/guc: New GuC workqueue item submission mechanism
Michal Wajdeczko
michal.wajdeczko at intel.com
Fri Mar 29 22:11:10 UTC 2019
Work queue items definitions were updated.
To simplify the scheduling logic in the GuC firmware,
only out-of-order mode of scheduling is now supported.
Credits-to: Michel Thierry <michel.thierry at intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko at intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: MichaĆ Winiarski <michal.winiarski at intel.com>
Cc: Tomasz Lis <tomasz.lis at intel.com>
---
drivers/gpu/drm/i915/intel_guc_fwif.h | 18 +++++++++----
drivers/gpu/drm/i915/intel_guc_submission.c | 29 ++++++++++++++-------
drivers/gpu/drm/i915/selftests/intel_guc.c | 2 +-
3 files changed, 34 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index a900680ff5a4..004d3d882f6f 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -49,22 +49,30 @@
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
-/* Work queue item header definitions */
+/* Work queue status */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
#define WQ_STATUS_CMD_ERROR 3
#define WQ_STATUS_ENGINE_ID_NOT_USED 4
#define WQ_STATUS_SUSPENDED_FROM_RESET 5
+#define WQ_STATUS_INVALID 6
+
+/* Work queue item header definitions */
#define WQ_TYPE_SHIFT 0
#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_KMD (0x3 << WQ_TYPE_SHIFT)
#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT)
-#define WQ_TARGET_SHIFT 10
+#define WQ_TYPE_RESUME (0x5 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_INVALID (0x6 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT 8
#define WQ_LEN_SHIFT 16
-#define WQ_NO_WCFLUSH_WAIT (1 << 27)
-#define WQ_PRESENT_WORKLOAD (1 << 28)
+#define WQ_LEN_MASK (0x7FF << WQ_LEN_SHIFT)
+/* Work queue item submit element info definitions */
+#define WQ_SW_CTX_INDEX_SHIFT 0
+#define WQ_SW_COUNTER_SHIFT 11
+#define WQ_RING_TAIL_INDEX_SHIFT 18
#define WQ_RING_TAIL_SHIFT 20
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 3104f091abc5..1a27352b73a9 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -82,9 +82,14 @@
* Work Items:
* There are several types of work items that the host may place into a
* workqueue, each with its own requirements and limitations. Currently only
- * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
- * represents in-order queue. The kernel driver packs ring tail pointer and an
- * ELSP context descriptor dword into Work Item.
+ * out-of-order WQ_TYPE_KMD is supported by the firmware.
+ * Our in-orderness is guaranteed by the execlists emulation with two "fake"
+ * ports that the scheduler uses when submitting to the GuC backend. If we are
+ * submitting requests for context A in the first port and we place a request
+ * for context B in the second port, we won't submit more requests for A until
+ * all the pending ones complete. We have to take this into account if we try
+ * to change the current execlist emulation model (e.g.: increasing the number
+ * of fake ports could cause requests to execute in wrong global seqno order).
* See guc_add_request()
*
*/
@@ -424,6 +429,7 @@ static void guc_proxy_stage_desc_fini(struct intel_guc_client *client)
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct intel_guc_client *client,
+ struct intel_context *ce,
u32 target_engine, u32 context_desc,
u32 ring_tail, u32 fence_id)
{
@@ -461,12 +467,15 @@ static void guc_wq_item_append(struct intel_guc_client *client,
wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
} else {
/* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
+ wqi->header = WQ_TYPE_KMD |
(target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
+ (wqi_len << WQ_LEN_SHIFT);
wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+
+ wqi->submit_element_info =
+ (ce->sw_context_id << WQ_SW_CTX_INDEX_SHIFT) |
+ (ce->sw_counter << WQ_SW_COUNTER_SHIFT |
+ (ring_tail << WQ_RING_TAIL_INDEX_SHIFT));
GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
wqi->fence_id = fence_id;
}
@@ -500,12 +509,13 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
+ struct intel_context *ce = rq->hw_context;
u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
spin_lock(&client->wq_lock);
- guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ guc_wq_item_append(client, ce, engine->guc_id, ctx_desc,
ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
@@ -569,7 +579,8 @@ static void inject_preempt_context(struct work_struct *work)
}
spin_lock_irq(&client->wq_lock);
- guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
+ guc_wq_item_append(client, ce, engine->guc_id,
+ lower_32_bits(ce->lrc_desc),
GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
spin_unlock_irq(&client->wq_lock);
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0102ee90355c..4f9adb2dccda 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -74,7 +74,7 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
spin_lock_irq(&client->wq_lock);
- guc_wq_item_append(client, 0, 0, 0, 0);
+ guc_wq_item_append(client, NULL, 0, 0, 0, 0);
guc_ring_doorbell(client);
spin_unlock_irq(&client->wq_lock);
--
2.19.2
More information about the Intel-gfx
mailing list