[PATCH 5/5] ringbuffer-scheduler
Chris Wilson
chris at chris-wilson.co.uk
Sat May 30 20:00:32 UTC 2020
---
drivers/gpu/drm/i915/Makefile | 1 +
drivers/gpu/drm/i915/gt/gen6_engine_cs.c | 21 +-
drivers/gpu/drm/i915/gt/intel_engine.h | 1 +
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +
drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 +
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 1160 +++++++++++++++++
.../gpu/drm/i915/gt/intel_ring_submission.c | 10 +-
.../gpu/drm/i915/gt/intel_ring_submission.h | 14 +
drivers/gpu/drm/i915/i915_request.c | 29 +-
9 files changed, 1212 insertions(+), 27 deletions(-)
create mode 100644 drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
create mode 100644 drivers/gpu/drm/i915/gt/intel_ring_submission.h
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41a27fd5dbc7..6d98a74da41e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -109,6 +109,7 @@ gt-y += \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ring.o \
+ gt/intel_ring_scheduler.o \
gt/intel_ring_submission.o \
gt/intel_rps.o \
gt/intel_sseu.o \
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
index 064733b4305c..bf76921bc402 100644
--- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -368,11 +368,10 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ u32 addr = i915_request_active_timeline(rq)->hwsp_offset;
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = addr | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -386,19 +385,17 @@ u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
#define GEN7_XCS_WA 32
u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
+ u32 addr = i915_request_active_timeline(rq)->hwsp_offset;
int i;
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
- MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = addr | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
for (i = 0; i < GEN7_XCS_WA; i++) {
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
*cs++ = rq->fence.seqno;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 043462b6ce1f..08176117757e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -209,6 +209,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_engine_resume(struct intel_engine_cs *engine);
int intel_ring_submission_setup(struct intel_engine_cs *engine);
+int intel_ring_scheduler_setup(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 529fd9f3e64b..5747c5a123b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -791,6 +791,8 @@ int intel_engines_init(struct intel_gt *gt)
if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
+ else if (INTEL_GEN(gt->i915) >= 6)
+ setup = intel_ring_scheduler_setup;
else
setup = intel_ring_submission_setup;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 2b6cdf47d428..3782e27c2945 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -348,6 +348,7 @@ struct intel_engine_cs {
struct {
struct intel_ring *ring;
struct intel_timeline *timeline;
+ struct intel_context *context;
} legacy;
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
new file mode 100644
index 000000000000..bb5832375ed2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -0,0 +1,1160 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include <drm/i915_drm.h>
+
+#include "gen2_engine_cs.h"
+#include "gen6_engine_cs.h"
+#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_gt_requests.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_ring_submission.h"
+#include "shmem_utils.h"
+
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
+static inline int rq_prio(const struct i915_request *rq)
+{
+ return rq->sched.attr.priority;
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static inline bool reset_in_progress(const struct intel_engine_execlists *el)
+{
+ return unlikely(!__tasklet_is_enabled(&el->tasklet));
+}
+
+static void
+set_current_context(struct intel_context **ptr, struct intel_context *ce)
+{
+ if (ce) {
+ intel_context_get(ce);
+ __intel_context_pin(ce);
+ }
+
+ ce = xchg(ptr, ce);
+
+ if (ce) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ }
+}
+
+static void reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const el = &engine->execlists;
+ struct intel_uncore *uncore = engine->uncore;
+ const u32 base = engine->mmio_base;
+ unsigned long flags;
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ GEM_TRACE("%s\n", engine->name);
+
+ __tasklet_disable_sync_once(&el->tasklet);
+ GEM_BUG_ON(!reset_in_progress(el));
+
+ /* And flush any current direct submission. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+
+ if (intel_engine_stop_cs(engine))
+ GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
+
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+
+ /* Check acts as a post */
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ GEM_TRACE("%s: ring head [%x] not parked\n",
+ engine->name,
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *uninitialized_var(pl);
+ int prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->active.requests,
+ sched.link) {
+ if (i915_request_completed(rq))
+ break;
+
+ __i915_request_unsubmit(rq);
+
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ active = rq;
+ }
+
+ return active;
+}
+
+static inline void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
+static void cancel_port_requests(struct intel_engine_execlists * const el)
+{
+ struct i915_request * const *port;
+
+ clear_ports(el->pending, ARRAY_SIZE(el->pending));
+ for (port = xchg(&el->active, el->pending); *port; port++)
+ i915_request_put(*port);
+ clear_ports(el->inflight, ARRAY_SIZE(el->inflight));
+
+ smp_wmb(); /* complete the seqlock for execlists_active() */
+ WRITE_ONCE(el->active, el->inflight);
+}
+
+static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (rq && i915_request_started(rq))
+ __i915_request_reset(rq, stalled);
+
+ cancel_port_requests(&engine->execlists);
+
+ /* Clear the global submission state, we will submit from scratch */
+ intel_ring_reset(engine->legacy.ring, 0);
+ set_current_context(&engine->legacy.context, NULL);
+}
+
+static void ring_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __ring_rewind(engine, stalled);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void ring_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request *rq, *rn;
+ unsigned long flags;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ __ring_rewind(engine, true);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&el->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+ __i915_request_submit(rq);
+ }
+
+ rb_erase_cached(&p->node, &el->queue);
+ i915_priolist_free(p);
+ }
+
+ el->queue_priority_hint = INT_MIN;
+ el->queue = RB_ROOT_CACHED;
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const el = &engine->execlists;
+
+ if (__tasklet_enable(&el->tasklet))
+ tasklet_hi_schedule(&el->tasklet);
+}
+
+static u32 *ring_map(struct intel_ring *ring, u32 len)
+{
+ u32 *va;
+
+ if (ring->tail + len > ring->effective_size) {
+ memset(ring->vaddr + ring->tail, 0, ring->size - ring->tail);
+ ring->tail = 0;
+ }
+
+ va = ring->vaddr + ring->tail;
+ ring->tail = intel_ring_wrap(ring, ring->tail + len);
+
+ return va;
+}
+
+static u32 *ring_map_dw(struct intel_ring *ring, u32 len)
+{
+ return ring_map(ring, len * sizeof(u32));
+}
+
+static void ring_copy(struct intel_ring *dst,
+ const struct intel_ring *src,
+ u32 start, u32 end)
+{
+ unsigned int len;
+ void *out;
+
+ len = end - start;
+ if (end < start)
+ len += src->size;
+ out = ring_map(dst, len);
+
+ if (end < start) {
+ len = src->size - start;
+ memcpy(out, src->vaddr + start, len);
+ out += len;
+ start = 0;
+ }
+
+ memcpy(out, src->vaddr + start, end - start);
+}
+
+static void mi_set_context(struct intel_ring *ring,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 flags)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ enum intel_engine_id id;
+ const int num_engines =
+ IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+ int len;
+ u32 *cs;
+
+ len = 4;
+ if (IS_GEN(i915, 7))
+ len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
+ else if (IS_GEN(i915, 5))
+ len += 2;
+
+ cs = ring_map_dw(ring, len);
+
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+ if (IS_GEN(i915, 7)) {
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ if (num_engines) {
+ struct intel_engine_cs *signaller;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
+ for_each_engine(signaller, engine->gt, id) {
+ if (signaller == engine)
+ continue;
+
+ *cs++ = i915_mmio_reg_offset(
+ RING_PSMI_CTL(signaller->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+ }
+ } else if (IS_GEN(i915, 5)) {
+ /*
+ * This w/a is only listed for pre-production ilk a/b steppings,
+ * but is also mentioned for programming the powerctx. To be
+ * safe, just apply the workaround; we do not use SyncFlush so
+ * this should never take effect and so be a no-op!
+ */
+ *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
+ }
+
+ *cs++ = MI_NOOP;
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
+ *cs++ = MI_NOOP;
+
+ if (IS_GEN(i915, 7)) {
+ if (num_engines) {
+ struct intel_engine_cs *signaller;
+ i915_reg_t last_reg = {}; /* keep gcc quiet */
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
+ for_each_engine(signaller, engine->gt, id) {
+ if (signaller == engine)
+ continue;
+
+ last_reg = RING_PSMI_CTL(signaller->mmio_base);
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = _MASKED_BIT_DISABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+
+ /* Insert a delay before the next switch! */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ } else if (IS_GEN(i915, 5)) {
+ *cs++ = MI_SUSPEND_FLUSH;
+ }
+}
+
+static struct i915_address_space *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static void load_pd_dir(struct intel_ring *ring,
+ struct intel_engine_cs *engine,
+ const struct i915_ppgtt *ppgtt)
+{
+ u32 *cs = ring_map_dw(ring, 12);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
+ *cs++ = PP_DIR_DCLV_2G;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
+ *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
+
+ /* Stall until the page table load is complete? */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
+ *cs++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
+}
+
+static struct i915_address_space *current_vm(struct intel_engine_cs *engine)
+{
+ return engine->legacy.context ? vm_alias(engine->legacy.context->vm) : NULL;
+}
+
+static void emit_invalidate_rcs(struct intel_ring *ring,
+ struct intel_engine_cs *engine)
+{
+ u32 addr, flags;
+ u32 *cs;
+
+ addr = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+
+ flags = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+
+ if (INTEL_GEN(engine->i915) >= 8)
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ else
+ addr |= PIPE_CONTROL_GLOBAL_GTT;
+
+ cs = ring_map_dw(ring, 4);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = addr;
+ *cs++ = 0;
+}
+
+static struct i915_address_space *
+clear_residuals(struct intel_ring *ring, struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_address_space *vm = vm_alias(ce->vm);
+
+ if (vm != current_vm(engine))
+ load_pd_dir(ring, engine, i915_vm_to_ppgtt(vm));
+
+ if (ce->state)
+ mi_set_context(ring, engine, ce,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+
+ //MI_BATCH_NON_SECURE_I965;
+ __gen6_emit_bb_start(ring_map_dw(ring, 2),
+ engine->wa_ctx.vma->node.start,
+ MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
+
+ emit_invalidate_rcs(ring, engine);
+
+ return vm;
+}
+
+static void switch_context(struct intel_ring *ring, struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_address_space *cvm = current_vm(engine);
+ struct intel_context *ce = rq->context;
+ struct i915_address_space *vm;
+
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce) {
+ cvm = clear_residuals(ring, engine);
+ intel_context_put(engine->wa_ctx.vma->private);
+ engine->wa_ctx.vma->private = intel_context_get(ce);
+ }
+ }
+
+ vm = vm_alias(ce->vm);
+ if (vm != cvm)
+ load_pd_dir(ring, engine, i915_vm_to_ppgtt(vm));
+
+ if (ce->state) {
+ u32 flags;
+
+ GEM_BUG_ON(engine->id != RCS0);
+
+ /* For resource streamer on HSW+ and power context elsewhere */
+ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+ BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+ flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+ if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
+ emit_invalidate_rcs(ring, engine);
+ flags |= MI_RESTORE_EXT_STATE_EN;
+ } else {
+ flags |= MI_RESTORE_INHIBIT;
+ }
+
+ mi_set_context(ring, engine, ce, flags);
+ }
+}
+
+static bool ring_submit(struct i915_request *rq)
+{
+ struct intel_ring *ring = rq->engine->legacy.ring;
+
+ if (!__i915_request_submit(rq))
+ return false;
+
+ if (rq->engine->legacy.context != rq->context) {
+ switch_context(ring, rq);
+ set_current_context(&rq->engine->legacy.context, rq->context);
+ }
+
+ ring_copy(ring, rq->ring, rq->head, rq->tail);
+ return true;
+}
+
+static void bsd_write_tail(struct intel_engine_cs *engine, u32 tail)
+{
+ struct intel_uncore *uncore = engine->uncore;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /*
+ * Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ intel_uncore_write_fw(uncore,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 1000, 0, NULL))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ ENGINE_WRITE(engine, RING_TAIL, tail);
+
+ /*
+ * Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ intel_uncore_write_fw(uncore,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+static void write_tail(struct intel_engine_cs *engine, u32 tail)
+{
+ if (engine->class == VIDEO_DECODE_CLASS && IS_GEN(engine->i915, 6))
+ bsd_write_tail(engine, tail);
+ else
+ ENGINE_WRITE(engine, RING_TAIL, tail);
+}
+
+static void __dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request **port = el->pending;
+ struct i915_request ** const last_port = port + el->port_mask;
+ struct i915_request *last;
+ bool submit = false;
+ struct rb_node *rb;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ while (*el->active)
+ *port++ = *el->active++;
+ if (port > last_port)
+ return;
+
+ last = NULL;
+ while ((rb = rb_first_cached(&el->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ if (last && rq->context != last->context) {
+ if (port == last_port)
+ goto done;
+
+ *port++ = i915_request_get(last);
+ }
+
+ submit |= ring_submit(rq);
+ last = rq;
+ }
+
+ rb_erase_cached(&p->node, &el->queue);
+ i915_priolist_free(p);
+ }
+
+done:
+ el->queue_priority_hint = rb ? to_priolist(rb)->priority : INT_MIN;
+ if (submit) {
+ *port++ = i915_request_get(last);
+ *port++ = NULL;
+ WRITE_ONCE(el->active, el->pending);
+
+ wmb(); /* paranoid flush of WCB before RING_TAIL write */
+ write_tail(engine, engine->legacy.ring->tail);
+ memcpy(el->inflight, el->pending,
+ (port - el->pending) * sizeof(*port));
+
+ WRITE_ONCE(el->active, el->inflight);
+ }
+}
+
+static void __submission_tasklet(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request *rq;
+
+ while ((rq = *el->active)) {
+ struct intel_context *ce = rq->context;
+
+ if (!i915_request_completed(rq))
+ break;
+
+ if (list_is_last_rcu(&rq->link, &ce->timeline->requests))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ i915_request_put(rq);
+ el->active++;
+ }
+
+ __dequeue(engine);
+}
+
+static void submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_sched_node *node,
+ int prio)
+{
+ GEM_BUG_ON(!list_empty(&node->link));
+ list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
+}
+
+static void __submit_queue_imm(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const el = &engine->execlists;
+
+ if (reset_in_progress(el))
+ return; /* defer until we restart the engine following reset */
+
+ __submission_tasklet(engine);
+}
+
+static void submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+
+ if (rq_prio(rq) <= el->queue_priority_hint)
+ return;
+
+ el->queue_priority_hint = rq_prio(rq);
+ __submit_queue_imm(engine);
+}
+
+static void submit_request(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ queue_request(engine, &rq->sched, rq_prio(rq));
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&rq->sched.link));
+
+ submit_queue(engine, rq);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void submission_park(struct intel_engine_cs *engine)
+{
+ intel_engine_unpin_breadcrumbs_irq(engine);
+ engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+}
+
+static void submission_unpark(struct intel_engine_cs *engine)
+{
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+ intel_engine_pin_breadcrumbs_irq(engine);
+}
+
+static int emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = rq->fence.seqno - 1;
+
+ intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+ return 0;
+}
+
+static void ring_context_destroy(struct kref *ref)
+{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ i915_vma_put(ce->state);
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ intel_ring_put(ce->ring);
+
+ intel_context_fini(ce);
+ intel_context_free(ce);
+}
+
+static int __context_pin_ppgtt(struct intel_context *ce)
+{
+ struct i915_address_space *vm;
+ int err = 0;
+
+ vm = vm_alias(ce->vm);
+ if (vm)
+ err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct intel_context *ce)
+{
+ struct i915_address_space *vm;
+
+ vm = vm_alias(ce->vm);
+ if (vm)
+ gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
+}
+
+static void ring_context_unpin(struct intel_context *ce)
+{
+ __context_unpin_ppgtt(ce);
+}
+
+static struct i915_vma *
+alloc_context_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_shmem(i915, engine->context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
+
+ if (engine->default_state) {
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ shmem_read(engine->default_state, 0,
+ vaddr, engine->context_size);
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+ }
+
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int alloc_timeline(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_timeline *tl;
+ struct i915_vma *hwsp;
+
+ /*
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
+ */
+ hwsp = NULL;
+ if (unlikely(intel_context_is_barrier(ce)))
+ hwsp = engine->status_page.vma;
+
+ tl = intel_timeline_create(engine->gt, hwsp);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ ce->timeline = tl;
+ return 0;
+}
+
+static int ring_context_alloc(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_ring *ring;
+ int err;
+
+ GEM_BUG_ON(ce->state);
+ if (engine->context_size) {
+ struct i915_vma *vma;
+
+ vma = alloc_context_vma(engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ ce->state = vma;
+ }
+
+ if (!ce->timeline) {
+ err = alloc_timeline(ce);
+ if (err)
+ goto err_vma;
+ }
+
+ ring = intel_engine_create_ring(engine,
+ (unsigned long)ce->ring |
+ INTEL_RING_CREATE_INTERNAL);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_timeline;
+ }
+ ce->ring = ring;
+
+ return 0;
+
+err_timeline:
+ intel_timeline_put(ce->timeline);
+err_vma:
+ if (ce->state) {
+ i915_vma_put(ce->state);
+ ce->state = NULL;
+ }
+ return err;
+}
+
+static int ring_context_pin(struct intel_context *ce)
+{
+ return __context_pin_ppgtt(ce);
+}
+
+static void ring_context_reset(struct intel_context *ce)
+{
+ intel_ring_reset(ce->ring, 0);
+}
+
+static const struct intel_context_ops ring_context_ops = {
+ .alloc = ring_context_alloc,
+
+ .pin = ring_context_pin,
+ .unpin = ring_context_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = ring_context_reset,
+ .destroy = ring_context_destroy,
+};
+
+static int ring_request_alloc(struct i915_request *rq)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(rq->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ rq->reserved_space += LEGACY_REQUEST_SIZE;
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ rq->reserved_space -= LEGACY_REQUEST_SIZE;
+ return 0;
+}
+
+static void set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = submission_tasklet;
+
+ engine->submit_request = submit_request;
+
+ engine->park = submission_park;
+ engine->unpark = submission_unpark;
+}
+
+static void ring_release(struct intel_engine_cs *engine)
+{
+ intel_engine_cleanup_common(engine);
+
+ set_current_context(&engine->legacy.context, NULL);
+
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
+ intel_ring_unpin(engine->legacy.ring);
+ intel_ring_put(engine->legacy.ring);
+
+ kfree(engine);
+}
+
+static void setup_irq(struct intel_engine_cs *engine)
+{
+ engine->irq_enable = gen6_irq_enable;
+ engine->irq_disable = gen6_irq_disable;
+}
+
+static void setup_common(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ /* gen8+ are only supported with execlists */
+ GEM_BUG_ON(INTEL_GEN(i915) >= 8);
+ GEM_BUG_ON(INTEL_GEN(i915) < 6);
+
+ setup_irq(engine);
+
+ engine->resume = intel_ring_submission_resume_xcs;
+ engine->reset.prepare = reset_prepare;
+ engine->reset.rewind = ring_reset_rewind;
+ engine->reset.cancel = ring_reset_cancel;
+ engine->reset.finish = reset_finish;
+
+ engine->cops = &ring_context_ops;
+ engine->request_alloc = ring_request_alloc;
+
+ engine->emit_init_breadcrumb = emit_init_breadcrumb;
+ if (INTEL_GEN(i915) >= 7)
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
+ else if (INTEL_GEN(i915) >= 6)
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
+ else
+ engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
+
+ engine->set_default_submission = set_default_submission;
+
+ engine->emit_bb_start = gen6_emit_bb_start;
+}
+
+static void setup_rcs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (HAS_L3_DPF(i915))
+ engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+
+ if (INTEL_GEN(i915) >= 7) {
+ engine->emit_flush = gen7_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
+ if (IS_HASWELL(i915))
+ engine->emit_bb_start = hsw_emit_bb_start;
+ } else {
+ engine->emit_flush = gen6_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
+ }
+
+ engine->resume = intel_ring_submission_resume_rcs;
+}
+
+static void setup_vcs(struct intel_engine_cs *engine)
+{
+ engine->emit_flush = gen6_emit_flush_vcs;
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+}
+
+static void setup_bcs(struct intel_engine_cs *engine)
+{
+ engine->emit_flush = gen6_emit_flush_xcs;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+}
+
+static void setup_vecs(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!IS_HASWELL(engine->i915));
+
+ engine->emit_flush = gen6_emit_flush_xcs;
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_enable = hsw_irq_enable_vecs;
+ engine->irq_disable = hsw_irq_disable_vecs;
+}
+
+static unsigned int global_ring_size(void)
+{
+ /* Enough space to hold 2 clients and the context switch */
+ return roundup_pow_of_two(EXECLIST_MAX_PORTS * SZ_16K + SZ_4K);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size;
+ int err;
+
+ size = gen7_setup_clear_gpr_bb(engine, NULL /* probe size */);
+ if (size <= 0)
+ return size;
+
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ goto err_private;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ size = gen7_setup_clear_gpr_bb(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_private:
+ intel_context_put(vma->private);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
+{
+ struct intel_ring *ring;
+ int err;
+
+ tasklet_init(&engine->execlists.tasklet,
+ submission_tasklet, (unsigned long)engine);
+
+ setup_common(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ setup_rcs(engine);
+ break;
+ case VIDEO_DECODE_CLASS:
+ setup_vcs(engine);
+ break;
+ case COPY_ENGINE_CLASS:
+ setup_bcs(engine);
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ setup_vecs(engine);
+ break;
+ default:
+ MISSING_CASE(engine->class);
+ return -ENODEV;
+ }
+
+ ring = intel_engine_create_ring(engine, global_ring_size());
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err;
+ }
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_ring;
+
+ GEM_BUG_ON(engine->legacy.ring);
+ engine->legacy.ring = ring;
+
+ if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ err = gen7_ctx_switch_bb_init(engine);
+ if (err)
+ goto err_ring_unpin;
+ }
+
+ if (INTEL_GEN(engine->i915) >= 6)
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = ring_release;
+ return 0;
+
+err_ring_unpin:
+ intel_ring_unpin(ring);
+err_ring:
+ intel_ring_put(ring);
+err:
+ intel_engine_cleanup_common(engine);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 0135752c43ff..5503b0a0dd67 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -219,7 +219,7 @@ static void set_pp_dir(struct intel_engine_cs *engine)
}
}
-static int xcs_resume(struct intel_engine_cs *engine)
+int intel_ring_submission_resume_xcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
@@ -434,7 +434,7 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int rcs_resume(struct intel_engine_cs *engine)
+int intel_ring_submission_resume_rcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
@@ -457,7 +457,7 @@ static int rcs_resume(struct intel_engine_cs *engine)
intel_uncore_write(uncore, INSTPM,
_MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return xcs_resume(engine);
+ return intel_ring_submission_resume_xcs(engine);
}
static void reset_cancel(struct intel_engine_cs *engine)
@@ -1087,7 +1087,7 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
- engine->resume = xcs_resume;
+ engine->resume = intel_ring_submission_resume_xcs;
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
engine->reset.cancel = reset_cancel;
@@ -1145,7 +1145,7 @@ static void setup_rcs(struct intel_engine_cs *engine)
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
- engine->resume = rcs_resume;
+ engine->resume = intel_ring_submission_resume_rcs;
}
static void setup_vcs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.h b/drivers/gpu/drm/i915/gt/intel_ring_submission.h
new file mode 100644
index 000000000000..1e78af2d6a1c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_RING_SUBMISSION_H__
+#define __INTEL_RING_SUBMISSION_H__
+
+struct intel_engine_cs;
+
+int intel_ring_submission_resume_rcs(struct intel_engine_cs *engine);
+int intel_ring_submission_resume_xcs(struct intel_engine_cs *engine);
+
+#endif /* __INTEL_RING_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c5d7220de529..bab4c2cd4540 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1040,17 +1040,26 @@ __emit_semaphore_wait(struct i915_request *to,
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
- *cs++ = (MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD) +
- has_token;
- *cs++ = seqno;
- *cs++ = hwsp_offset;
- *cs++ = 0;
- if (has_token) {
+ if (INTEL_GEN(to->i915) >= 8) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD) +
+ has_token;
+ *cs++ = seqno;
+ *cs++ = hwsp_offset;
+ *cs++ = 0;
+ if (has_token) {
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
+ } else {
+ *cs++ = (MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_COMPARE);
+ *cs++ = seqno - 1; /* COMPARE is a strict greater-than */
+ *cs++ = hwsp_offset;
*cs++ = 0;
- *cs++ = MI_NOOP;
}
intel_ring_advance(to, cs);
--
2.20.1
More information about the Intel-gfx-trybot
mailing list