[Intel-gfx] [PATCH] drm/i915: Pass intel_context to i915_request_create()
Chris Wilson
chris at chris-wilson.co.uk
Thu Mar 21 21:32:29 UTC 2019
Start acquiring the logical intel_context and using that as our primary
means for request allocation. This is the initial step to allow us to
avoid requiring struct_mutex for request allocation along the
perma-pinned kernel context, but it also provides a foundation for
breaking up the complex request allocation to handle different scenarios
inside execbuf.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
drivers/gpu/drm/i915/i915_gem_context.c | 4 +-
drivers/gpu/drm/i915/i915_perf.c | 2 +-
drivers/gpu/drm/i915/i915_request.c | 90 +++++++++++--------
drivers/gpu/drm/i915/i915_request.h | 3 +
drivers/gpu/drm/i915/i915_reset.c | 2 +-
drivers/gpu/drm/i915/intel_overlay.c | 5 +-
drivers/gpu/drm/i915/selftests/i915_active.c | 2 +-
.../drm/i915/selftests/i915_gem_coherency.c | 2 +-
.../gpu/drm/i915/selftests/i915_gem_object.c | 2 +-
drivers/gpu/drm/i915/selftests/i915_request.c | 6 +-
.../gpu/drm/i915/selftests/i915_timeline.c | 4 +-
11 files changed, 71 insertions(+), 51 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 00dec72f6875..a2c2476fa2e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -774,7 +774,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
struct intel_ring *ring;
struct i915_request *rq;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -1040,7 +1040,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
/* Submitting requests etc needs the hw awake. */
wakeref = intel_runtime_pm_get(i915);
- rq = i915_request_alloc(ce->engine, i915->kernel_context);
+ rq = i915_request_create(ce->engine->kernel_context);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_put;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 85c5cb779297..c541c4605c11 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1761,7 +1761,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* Apply the configuration by doing one context restore of the edited
* context image.
*/
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1529824d7c61..f6d325d1696c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -597,44 +597,21 @@ static int add_timeline_barrier(struct i915_request *rq)
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
-struct i915_request *
-i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
+struct i915_request *i915_request_create(struct intel_context *ce)
{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_context *ce;
- struct i915_timeline *tl;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ struct i915_timeline *tl = ce->ring->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
- /*
- * Preempt contexts are reserved for exclusive use to inject a
- * preemption context switch. They are never to be used for any trivial
- * request!
- */
- GEM_BUG_ON(ctx == i915->preempt_context);
-
- /*
- * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged.
- */
- ret = i915_terminally_wedged(i915);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * Pinning the contexts may generate requests in order to acquire
- * GGTT space, so do this first before we reserve a seqno for
- * ourselves.
- */
- ce = intel_context_pin(ctx, engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
+ /* Check that the caller provided an already pinned context */
+ __intel_context_pin(ce);
reserve_gt(i915);
- mutex_lock(&ce->ring->timeline->mutex);
+ mutex_lock(&tl->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -684,18 +661,17 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
INIT_LIST_HEAD(&rq->active_list);
INIT_LIST_HEAD(&rq->execute_cb);
- tl = ce->ring->timeline;
ret = i915_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
rq->i915 = i915;
- rq->engine = engine;
- rq->gem_context = ctx;
rq->hw_context = ce;
+ rq->gem_context = ce->gem_context;
+ rq->engine = ce->engine;
rq->ring = ce->ring;
rq->timeline = tl;
- GEM_BUG_ON(rq->timeline == &engine->timeline);
+ GEM_BUG_ON(rq->timeline == &ce->engine->timeline);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -727,7 +703,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* around inside i915_request_add() there is sufficient space at
* the beginning of the ring as well.
*/
- rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
+ rq->reserved_space =
+ 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
/*
* Record the position of the start of the request so that
@@ -741,7 +718,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (ret)
goto err_unwind;
- ret = engine->request_alloc(rq);
+ ret = rq->engine->request_alloc(rq);
if (ret)
goto err_unwind;
@@ -752,6 +729,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ lockdep_assert_held(&tl->mutex);
return rq;
err_unwind:
@@ -765,12 +743,52 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
err_free:
kmem_cache_free(global.slab_requests, rq);
err_unreserve:
- mutex_unlock(&ce->ring->timeline->mutex);
+ mutex_unlock(&tl->mutex);
unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
+struct i915_request *
+i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int ret;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ /*
+ * Preempt contexts are reserved for exclusive use to inject a
+ * preemption context switch. They are never to be used for any trivial
+ * request!
+ */
+ GEM_BUG_ON(ctx == i915->preempt_context);
+
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged.
+ */
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
+
+ return rq;
+}
+
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8c8fa5010644..e4be1b688a14 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -225,6 +225,9 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
+struct i915_request * __must_check
+i915_request_create(struct intel_context *ce);
+
struct i915_request * __must_check
i915_request_alloc(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 0aea19cefe4a..519bc07393cc 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -744,7 +744,7 @@ static void restart_work(struct work_struct *work)
if (!intel_engine_is_idle(engine))
continue;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (!IS_ERR(rq))
i915_request_add(rq);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a882b8d42bd9..fea8c5869da9 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,10 +235,9 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = dev_priv->engine[RCS0];
+ struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
- return i915_request_alloc(engine, dev_priv->kernel_context);
+ return i915_request_create(engine->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 27d8f853111b..eee838dc0634 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -46,7 +46,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index e43630b40fce..046a38743152 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 971148fbe6f5..5a12d965c2c0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e6ffe2240126..79ee983ba664 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -649,7 +649,7 @@ empty_request(struct intel_engine_cs *engine,
struct i915_request *request;
int err;
- request = i915_request_alloc(engine, engine->i915->kernel_context);
+ request = i915_request_create(engine->kernel_context);
if (IS_ERR(request))
return request;
@@ -853,7 +853,7 @@ static int live_all_engines(void *arg)
}
for_each_engine(engine, i915, id) {
- request[id] = i915_request_alloc(engine, i915->kernel_context);
+ request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -962,7 +962,7 @@ static int live_sequential_engines(void *arg)
goto out_unlock;
}
- request[id] = i915_request_alloc(engine, i915->kernel_context);
+ request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 8e7bcaa1eb66..227479349c61 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -454,7 +454,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out;
}
- rq = i915_request_alloc(engine, engine->i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
goto out_unpin;
@@ -678,7 +678,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
--
2.20.1
More information about the Intel-gfx
mailing list