[Intel-gfx] [PATCH 24/49] drm/i915/bdw: Write a new set of context-aware ringbuffer management functions
oscar.mateo at intel.com
oscar.mateo at intel.com
Thu Mar 27 18:59:53 CET 2014
From: Oscar Mateo <oscar.mateo at intel.com>
Since the ringbuffer can live in the ring (pre-GEN8) or in the context (GEN8+)
we need functions that are aware of this. After this commit and some of the
previous, this new ringbuffer functions finally are:
intel_ringbuffer_get
intel_ringbuffer_begin
intel_ringbuffer_cacheline_align
intel_ringbuffer_emit
intel_ringbuffer_advance
intel_ringbuffer_advance_and_submit
intel_ringbuffer_get_tail
Some of the old ones remain after the refactoring as deprecated functions, simply
calling the previous set of functions to manipulate the engine's default ringbuffer:
intel_ring_begin
intel_ring_emit
intel_ring_advance
Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 4 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 +++++----
drivers/gpu/drm/i915/intel_display.c | 2 +-
drivers/gpu/drm/i915/intel_ringbuffer.c | 54 +++++++++++++-----------
drivers/gpu/drm/i915/intel_ringbuffer.h | 67 +++++++++++++++++++++++-------
5 files changed, 95 insertions(+), 51 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c7ba1f..a052a80 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2170,7 +2170,7 @@ int __i915_add_request(struct intel_engine *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ringbuffer_get_tail(ring, ctx);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2191,7 +2191,7 @@ int __i915_add_request(struct intel_engine *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ringbuffer_get_tail(ring, ctx);
ret = ring->add_request(ring, ctx);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2ef284..c0a1032 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -982,14 +982,15 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct i915_hw_context *ctx)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ struct intel_ringbuffer *ringbuf;
+ int i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
- ret = intel_ringbuffer_begin(ring, ctx, 4 * 3);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4 * 3);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return PTR_ERR(ringbuf);
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1230,9 +1231,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- goto err;
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf)) {
+ ret = (PTR_ERR(ringbuf));
+ goto err;
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 22be556..70c844f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8832,7 +8832,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
- ret = intel_ring_cacheline_align(ring);
+ ret = intel_ringbuffer_cacheline_align(ring, ring->default_context);
if (ret)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ac4e618..54aba64 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -376,9 +376,9 @@ gen8_render_ring_flush(struct intel_engine *ring,
struct i915_hw_context *ctx,
u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_ringbuffer *ringbuf;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
- int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -397,9 +397,9 @@ gen8_render_ring_flush(struct intel_engine *ring,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_ringbuffer_begin(ring, ctx, 6);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
@@ -735,11 +735,11 @@ static int
gen8_add_request(struct intel_engine *ring,
struct i915_hw_context *ctx)
{
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@@ -1744,33 +1744,37 @@ static int __intel_ring_prepare(struct intel_engine *ring,
return 0;
}
-int intel_ringbuffer_begin(struct intel_engine *ring,
- struct i915_hw_context *ctx,
- int num_dwords)
+struct intel_ringbuffer *
+intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
+ int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct intel_ringbuffer *ringbuf = intel_ringbuffer_get(ring, ctx);
int ret;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
- return ret;
+ return ERR_PTR(ret);
ret = __intel_ring_prepare(ring, ctx, num_dwords * sizeof(uint32_t));
if (ret)
- return ret;
+ return ERR_PTR(ret);
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- __get_ringbuf(ring)->space -= num_dwords * sizeof(uint32_t);
- return 0;
+ ringbuf->space -= num_dwords * sizeof(uint32_t);
+
+ return ringbuf;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine *ring)
+int intel_ringbuffer_cacheline_align(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
int num_dwords = (64 - (__get_ringbuf(ring)->tail & 63)) / sizeof(uint32_t);
int ret;
@@ -1845,11 +1849,11 @@ static int gen8_ring_flush(struct intel_engine *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
cmd = MI_FLUSH_DW + 1;
@@ -1905,11 +1909,11 @@ gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
!(flags & I915_DISPATCH_SECURE);
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return (PTR_ERR(ringbuf));
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index cd6c52a..101d4d4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -284,26 +284,66 @@ intel_write_status_page(struct intel_engine *ring,
void intel_cleanup_ring(struct intel_engine *ring);
-int __must_check intel_ringbuffer_begin(struct intel_engine *ring,
- struct i915_hw_context *ctx, int n);
-#define intel_ring_begin(ring, n) intel_ringbuffer_begin(ring, NULL, n)
-int __must_check intel_ring_cacheline_align(struct intel_engine *ring);
-static inline void intel_ring_emit(struct intel_engine *ring,
- u32 data)
+struct intel_ringbuffer *
+intel_ringbuffer_get(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+
+struct intel_ringbuffer *
+intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx, int n);
+
+static inline int __must_check
+intel_ring_begin(struct intel_engine *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = intel_ringbuffer_begin(ring, ring->default_context, data);
+ if (IS_ERR(ringbuf))
+ return (PTR_ERR(ringbuf));
+
+ return 0;
+}
+
+int __must_check
+intel_ringbuffer_cacheline_align(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+static inline void
+intel_ringbuffer_emit(struct intel_ringbuffer *ringbuf, u32 data)
+{
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-static inline void intel_ring_advance(struct intel_engine *ring)
+
+static inline void
+intel_ring_emit(struct intel_engine *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ intel_ringbuffer_emit(&ring->default_ringbuf, data);
+}
+static inline void
+intel_ringbuffer_advance(struct intel_ringbuffer *ringbuf)
+{
ringbuf->tail &= ringbuf->size - 1;
}
-void intel_ringbuffer_advance_and_submit(struct intel_engine *ring,
- struct i915_hw_context *ctx);
+
+static inline void
+intel_ring_advance(struct intel_engine *ring)
+{
+ intel_ringbuffer_advance(&ring->default_ringbuf);
+}
+
+void
+intel_ringbuffer_advance_and_submit(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+
+static inline u32
+intel_ringbuffer_get_tail(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf = intel_ringbuffer_get(ring, ctx);
+ return ringbuf->tail;
+}
int __must_check intel_ring_idle(struct intel_engine *ring);
void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno);
@@ -321,11 +361,6 @@ int intel_init_vebox_ring(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_engine *ring);
void intel_ring_setup_status_page(struct intel_engine *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine *ring)
-{
- return __get_ringbuf(ring)->tail;
-}
-
static inline u32 intel_ring_get_seqno(struct intel_engine *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
--
1.9.0
More information about the Intel-gfx
mailing list