[Intel-gfx] [CI 13/25] drm/i915: Remove the identical implementations of request space reservation

Chris Wilson chris at chris-wilson.co.uk
Thu Apr 28 08:56:47 UTC 2016


Now that we share intel_ring_begin(), reserving space for the tail of
the request is identical between legacy/execlists and so the tautology
can be removed. In the process, we move the reserved space tracking
from the ringbuffer on to the request. This is to enable us to reorder
the reserved space allocation in the next patch.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  3 +++
 drivers/gpu/drm/i915/i915_gem.c         | 23 ++++++++++-------
 drivers/gpu/drm/i915/intel_lrc.c        | 15 -----------
 drivers/gpu/drm/i915/intel_ringbuffer.c | 44 +++------------------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h | 17 -------------
 5 files changed, 20 insertions(+), 82 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 444a8ea0c5c4..831da9f43324 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2278,6 +2278,9 @@ struct drm_i915_gem_request {
 	/** Position in the ringbuffer of the end of the whole request */
 	u32 tail;
 
+	/** Preallocate space in the ringbuffer for the emitting the request */
+	u32 reserved_space;
+
 	/**
 	 * Context and ring buffer related to this request
 	 * Contexts are refcounted, so when this request is associated with a
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 71135c3ce44e..dd628eae0592 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2573,6 +2573,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
+	u32 reserved_tail;
 	int ret;
 
 	if (WARN_ON(request == NULL))
@@ -2587,9 +2588,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
-	intel_ring_reserved_space_use(ringbuf);
-
 	request_start = intel_ring_get_tail(ringbuf);
+	reserved_tail = request->reserved_space;
+	request->reserved_space = 0;
+
 	/*
 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
 	 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2653,7 +2655,13 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	intel_mark_busy(dev_priv->dev);
 
 	/* Sanity check that the reserved size was large enough. */
-	intel_ring_reserved_space_end(ringbuf);
+	ret = intel_ring_get_tail(ringbuf) - request_start;
+	if (ret < 0)
+		ret += ringbuf->size;
+	WARN_ONCE(ret > reserved_tail,
+		  "Not enough space reserved (%d bytes) "
+		  "for adding the request (%d bytes)\n",
+		  reserved_tail, ret);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2774,17 +2782,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * to be redone if the request is not actually submitted straight
 	 * away, e.g. because a GPU scheduler has deferred it.
 	 */
-	if (i915.enable_execlists)
-		ret = intel_logical_ring_reserve_space(req);
-	else
-		ret = intel_ring_reserve_space(req);
+	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+	ret = intel_ring_begin(req, 0);
 	if (ret) {
 		/*
 		 * At this point, the request is fully allocated even if not
 		 * fully prepared. Thus it can be cleaned up using the proper
-		 * free code.
+		 * free code, along with any reserved space.
 		 */
-		intel_ring_reserved_space_cancel(req->ringbuf);
 		i915_gem_request_unreference(req);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ba87c94928e7..910044cf143e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -772,21 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	return 0;
 }
 
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
-{
-	/*
-	 * The first call merely notes the reserve request and is common for
-	 * all back ends. The subsequent localised _begin() call actually
-	 * ensures that the reservation is available. Without the begin, if
-	 * the request creator immediately submitted the request without
-	 * adding any commands to it then there might not actually be
-	 * sufficient room for the submission commands.
-	 */
-	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
-	return intel_ring_begin(request, 0);
-}
-
 /**
  * execlists_submission() - submit a batchbuffer for execution, Execlists style
  * @dev: DRM device.
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4d8be0be6c4b..68e50b221f63 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2342,44 +2342,6 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 	return 0;
 }
 
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
-	/*
-	 * The first call merely notes the reserve request and is common for
-	 * all back ends. The subsequent localised _begin() call actually
-	 * ensures that the reservation is available. Without the begin, if
-	 * the request creator immediately submitted the request without
-	 * adding any commands to it then there might not actually be
-	 * sufficient room for the submission commands.
-	 */
-	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
-	return intel_ring_begin(request, 0);
-}
-
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
-	GEM_BUG_ON(ringbuf->reserved_size);
-	ringbuf->reserved_size = size;
-}
-
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
-{
-	GEM_BUG_ON(!ringbuf->reserved_size);
-	ringbuf->reserved_size   = 0;
-}
-
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
-	GEM_BUG_ON(!ringbuf->reserved_size);
-	ringbuf->reserved_size   = 0;
-}
-
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
-	GEM_BUG_ON(ringbuf->reserved_size);
-}
-
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
@@ -2399,7 +2361,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	 *
 	 * See also i915_gem_request_alloc() and i915_add_request().
 	 */
-	GEM_BUG_ON(!ringbuf->reserved_size);
+	GEM_BUG_ON(!req->reserved_space);
 
 	list_for_each_entry(target, &engine->request_list, list) {
 		unsigned space;
@@ -2434,7 +2396,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
 
-	total_bytes = bytes + ringbuf->reserved_size;
+	total_bytes = bytes + req->reserved_space;
 
 	if (unlikely(bytes > remain_usable)) {
 		/*
@@ -2450,7 +2412,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		 * and only need to effectively wait for the reserved
 		 * size space from the start of ringbuffer.
 		 */
-		wait_bytes = remain_actual + ringbuf->reserved_size;
+		wait_bytes = remain_actual + req->reserved_space;
 	} else {
 		/* No wrapping required, just waiting. */
 		wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 201e7752d765..038914ccc6fd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -107,7 +107,6 @@ struct intel_ringbuffer {
 	int space;
 	int size;
 	int effective_size;
-	int reserved_size;
 
 	/** We track the position of the requests in the ring buffer, and
 	 * when each is retired we increment last_retired_head as the GPU
@@ -491,20 +490,4 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
  */
 #define MIN_SPACE_FOR_ADD_REQUEST	160
 
-/*
- * Reserve space in the ring to guarantee that the i915_add_request() call
- * will always have sufficient room to do its stuff. The request creation
- * code calls this automatically.
- */
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
-/* Cancel the reservation, e.g. because the request is being discarded. */
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
-/* Use the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
-/* Finish with the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
-
-/* Legacy ringbuffer specific portion of reservation code: */
-int intel_ring_reserve_space(struct drm_i915_gem_request *request);
-
 #endif /* _INTEL_RINGBUFFER_H_ */
-- 
2.8.1



More information about the Intel-gfx mailing list