[Intel-gfx] [PATCH 1/2] drm/i915/scheduler: add gvt force-single-submission for guc
Chuanxiao Dong
chuanxiao.dong at intel.com
Tue Mar 28 01:49:51 UTC 2017
GVT needs single submission and cannot allow merge. So when GuC submitting
a GVT request, the next one should be submitted to guc later until the
previous one is completed. This is following the usage when using execlist
mode submission.
Cc: chris at chris-wilson.co.uk
Signed-off-by: Chuanxiao Dong <chuanxiao.dong at intel.com>
---
drivers/gpu/drm/i915/i915_gem_context.h | 20 ++++++++++++++++++++
drivers/gpu/drm/i915/i915_guc_submission.c | 6 +++++-
drivers/gpu/drm/i915/intel_lrc.c | 25 ++++---------------------
3 files changed, 29 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 4af2ab94..025eba2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -246,6 +246,26 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
return !ctx->file_priv;
}
+static inline bool
+i915_gem_context_single_port_submit(const struct i915_gem_context *ctx)
+{
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ i915_gem_context_force_single_submission(ctx));
+}
+
+static inline bool
+i915_gem_context_can_merge(const struct i915_gem_context *prev,
+ const struct i915_gem_context *next)
+{
+ if (prev != next)
+ return false;
+
+ if (i915_gem_context_single_port_submit(prev))
+ return false;
+
+ return true;
+}
+
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
void i915_gem_context_lost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 991e76e..ad90de1 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -680,10 +680,14 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
struct drm_i915_gem_request *rq =
rb_entry(rb, typeof(*rq), priotree.node);
- if (last && rq->ctx != last->ctx) {
+ if (last && !i915_gem_context_can_merge(last->ctx, rq->ctx)) {
if (port != engine->execlist_port)
break;
+ if (i915_gem_context_single_port_submit(last->ctx) ||
+ i915_gem_context_single_port_submit(rq->ctx))
+ break;
+
i915_gem_request_assign(&port->request, last);
nested_enable_signaling(last);
port++;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index dd0e9d587..a49801e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -377,24 +377,6 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
writel(lower_32_bits(desc[0]), elsp);
}
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
-{
- return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ctx));
-}
-
-static bool can_merge_ctx(const struct i915_gem_context *prev,
- const struct i915_gem_context *next)
-{
- if (prev != next)
- return false;
-
- if (ctx_single_port_submission(prev))
- return false;
-
- return true;
-}
-
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *last;
@@ -462,7 +444,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request, and so we never need to tell the hardware about
* the first.
*/
- if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+ if (last &&
+ !i915_gem_context_can_merge(last->ctx, cursor->ctx)) {
/* If we are on the second port and cannot combine
* this request with the last, then we are done.
*/
@@ -475,8 +458,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* context (even though a different request) to
* the second port.
*/
- if (ctx_single_port_submission(last->ctx) ||
- ctx_single_port_submission(cursor->ctx))
+ if (i915_gem_context_single_port_submit(last->ctx) ||
+ i915_gem_context_single_port_submit(cursor->ctx))
break;
GEM_BUG_ON(last->ctx == cursor->ctx);
--
2.7.4
More information about the Intel-gfx
mailing list