[Intel-gfx] [PATCH 11/50] drm/i915: Write a new set of context-aware ringbuffer management functions
oscar.mateo at intel.com
oscar.mateo at intel.com
Fri May 9 14:08:41 CEST 2014
From: Oscar Mateo <oscar.mateo at intel.com>
We need functions that are aware of the fact that the ringbuffer might be living
somewhere else than in the engine. After this commit and some of the previous, these
new ringbuffer functions finally are:
intel_ringbuffer_get
intel_ringbuffer_begin
intel_ringbuffer_cacheline_align
intel_ringbuffer_emit
intel_ringbuffer_advance
intel_ringbuffer_advance_and_submit
intel_ringbuffer_get_tail
Some of the old ones remain after the refactoring as deprecated functions, simply
calling the previous set of functions to manipulate the engine's default ringbuffer:
intel_ring_begin
intel_ring_emit
intel_ring_advance
Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 4 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 ++++++---
drivers/gpu/drm/i915/intel_display.c | 2 +-
drivers/gpu/drm/i915/intel_ringbuffer.c | 37 ++++++++--------
drivers/gpu/drm/i915/intel_ringbuffer.h | 68 ++++++++++++++++++++++--------
5 files changed, 86 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 774151c..26bd68f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2181,7 +2181,7 @@ int __i915_add_request(struct intel_engine *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ringbuffer_get_tail(ring, ctx);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2202,7 +2202,7 @@ int __i915_add_request(struct intel_engine *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ringbuffer_get_tail(ring, ctx);
ret = ring->add_request(ring, ctx);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c93941d..e78ed94 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -988,16 +988,17 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct i915_hw_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i;
+ struct intel_ringbuffer *ringbuf;
+ int i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
- ret = intel_ringbuffer_begin(ring, ctx, 4 * 3);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4 * 3);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return PTR_ERR(ringbuf);
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1290,9 +1291,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- goto err;
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf)) {
+ ret = PTR_ERR(ringbuf);
+ goto err;
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e0550c6..24e2e3f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8812,7 +8812,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
- ret = intel_ring_cacheline_align(ring);
+ ret = intel_ringbuffer_cacheline_align(ring, ring->default_context);
if (ret)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0f4d3b6..6292e75 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -388,9 +388,9 @@ gen8_render_ring_flush(struct intel_engine *ring,
struct i915_hw_context *ctx,
u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_ringbuffer *ringbuf;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -409,9 +409,9 @@ gen8_render_ring_flush(struct intel_engine *ring,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_ringbuffer_begin(ring, ctx, 6);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return PTR_ERR(ringbuf);
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
@@ -1700,34 +1700,37 @@ static int __intel_ring_prepare(struct intel_engine *ring,
return 0;
}
-int intel_ringbuffer_begin(struct intel_engine *ring,
- struct i915_hw_context *ctx,
- int num_dwords)
+struct intel_ringbuffer *
+intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
+ int num_dwords)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ struct intel_ringbuffer *ringbuf = intel_ringbuffer_get(ring, ctx);
int ret;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
- return ret;
+ return ERR_PTR(ret);
ret = __intel_ring_prepare(ring, ctx, num_dwords * sizeof(uint32_t));
if (ret)
- return ret;
+ return ERR_PTR(ret);
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
- return ret;
+ return ERR_PTR(ret);
ringbuf->space -= num_dwords * sizeof(uint32_t);
- return 0;
+
+ return ringbuf;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine *ring)
+int intel_ringbuffer_cacheline_align(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
int num_dwords = (ringbuf->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
@@ -1844,11 +1847,11 @@ gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
!(flags & I915_DISPATCH_SECURE);
- int ret;
+ struct intel_ringbuffer *ringbuf;
- ret = intel_ringbuffer_begin(ring, ctx, 4);
- if (ret)
- return ret;
+ ringbuf = intel_ringbuffer_begin(ring, ctx, 4);
+ if (IS_ERR_OR_NULL(ringbuf))
+ return PTR_ERR(ringbuf);
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b2dcad4..59280b2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -294,27 +294,66 @@ intel_write_status_page(struct intel_engine *ring,
void intel_stop_ring(struct intel_engine *ring);
void intel_cleanup_ring(struct intel_engine *ring);
-int __must_check intel_ringbuffer_begin(struct intel_engine *ring,
- struct i915_hw_context *ctx, int n);
-#define intel_ring_begin(ring, n) \
- intel_ringbuffer_begin(ring, ring->default_context, n)
-int __must_check intel_ring_cacheline_align(struct intel_engine *ring);
-static inline void intel_ring_emit(struct intel_engine *ring,
- u32 data)
+struct intel_ringbuffer *
+intel_ringbuffer_get(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+
+struct intel_ringbuffer *
+intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx, int n);
+
+static inline int __must_check
+intel_ring_begin(struct intel_engine *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = intel_ringbuffer_begin(ring, ring->default_context, data);
+ if (IS_ERR(ringbuf))
+ return PTR_ERR(ringbuf);
+
+ return 0;
+}
+
+int __must_check
+intel_ringbuffer_cacheline_align(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+static inline void
+intel_ringbuffer_emit(struct intel_ringbuffer *ringbuf, u32 data)
+{
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-static inline void intel_ring_advance(struct intel_engine *ring)
+
+static inline void
+intel_ring_emit(struct intel_engine *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
+ intel_ringbuffer_emit(&ring->default_ringbuf, data);
+}
+static inline void
+intel_ringbuffer_advance(struct intel_ringbuffer *ringbuf)
+{
ringbuf->tail &= ringbuf->size - 1;
}
-void intel_ringbuffer_advance_and_submit(struct intel_engine *ring,
- struct i915_hw_context *ctx);
+
+static inline void
+intel_ring_advance(struct intel_engine *ring)
+{
+ intel_ringbuffer_advance(&ring->default_ringbuf);
+}
+
+void
+intel_ringbuffer_advance_and_submit(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+
+static inline u32
+intel_ringbuffer_get_tail(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf = intel_ringbuffer_get(ring, ctx);
+ return ringbuf->tail;
+}
int __must_check intel_ring_idle(struct intel_engine *ring);
void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno);
@@ -336,11 +375,6 @@ void intel_ring_setup_status_page(struct intel_engine *ring);
void intel_destroy_ring_buffer(struct intel_engine *ring);
int intel_allocate_ring_buffer(struct intel_engine *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine *ring)
-{
- return __get_ringbuf(ring)->tail;
-}
-
static inline u32 intel_ring_get_seqno(struct intel_engine *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
--
1.9.0
More information about the Intel-gfx
mailing list