[Intel-gfx] [PATCH 22/49] drm/i915/bdw: Plumbing for user LR context switching
oscar.mateo at intel.com
oscar.mateo at intel.com
Thu Mar 27 18:59:51 CET 2014
From: Oscar Mateo <oscar.mateo at intel.com>
Plumb ring->write_tail with a context argument, which in turn
means plumbing ring->add_request, which in turn, etc.... The
idea is that, by the time we would usually update the tail
register, we know which context we are working with and,
therefore, we can send it to the execlist submit port.
No functional changes.
Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
drivers/gpu/drm/i915/i915_dma.c | 2 +-
drivers/gpu/drm/i915/i915_drv.h | 3 +-
drivers/gpu/drm/i915/i915_gem.c | 5 +-
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 23 +++---
drivers/gpu/drm/i915/i915_gem_gtt.c | 7 +-
drivers/gpu/drm/i915/intel_display.c | 10 +--
drivers/gpu/drm/i915/intel_ringbuffer.c | 113 ++++++++++++++++++-----------
drivers/gpu/drm/i915/intel_ringbuffer.h | 20 +++--
9 files changed, 113 insertions(+), 72 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d9d28f4..76b47a6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -56,7 +56,7 @@
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
- __intel_ring_advance(LP_RING(dev_priv))
+ __intel_ring_advance(LP_RING(dev_priv), NULL)
/**
* Lock test for when it's just for synchronization of ring access.
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3a36e28..c0f0c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2168,11 +2168,12 @@ void i915_gem_cleanup_ring(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
- __i915_add_request(ring, NULL, NULL, seqno)
+ __i915_add_request(ring, NULL, NULL, NULL, seqno)
int __must_check i915_wait_seqno(struct intel_engine *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7ed56f7..0c7ba1f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2160,6 +2160,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
}
int __i915_add_request(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
@@ -2177,7 +2178,7 @@ int __i915_add_request(struct intel_engine *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- ret = intel_ring_flush_all_caches(ring);
+ ret = intel_ring_flush_all_caches(ring, ctx);
if (ret)
return ret;
@@ -2192,7 +2193,7 @@ int __i915_add_request(struct intel_engine *ring,
*/
request_ring_position = intel_ring_get_tail(ring);
- ret = ring->add_request(ring);
+ ret = ring->add_request(ring, ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4a6f1b0..cb43272 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -589,7 +589,7 @@ mi_set_context(struct intel_engine *ring,
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+ ret = ring->flush(ring, NULL, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 73f8712..d2ef284 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -825,6 +825,7 @@ err:
static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
struct list_head *vmas)
{
struct i915_vma *vma;
@@ -853,7 +854,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(ring);
+ return intel_ring_invalidate_all_caches(ring, ctx);
}
static bool
@@ -965,18 +966,20 @@ static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_engine *ring,
+ struct i915_hw_context *ctx,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj, NULL);
+ (void)__i915_add_request(ring, ctx, file, obj, NULL);
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_engine *ring)
+ struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
@@ -984,7 +987,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
- ret = intel_ring_begin(ring, 4 * 3);
+ ret = intel_ringbuffer_begin(ring, ctx, 4 * 3);
if (ret)
return ret;
@@ -1217,7 +1220,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, ctx, &eb->vmas);
if (ret)
goto err;
@@ -1227,7 +1230,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ringbuffer_begin(ring, ctx, 4);
if (ret)
goto err;
@@ -1241,7 +1244,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
+ ret = i915_reset_gen7_sol_offsets(dev, ring, ctx);
if (ret)
goto err;
}
@@ -1255,14 +1258,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- ret = ring->dispatch_execbuffer(ring,
+ ret = ring->dispatch_execbuffer(ring, ctx,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
} else {
- ret = ring->dispatch_execbuffer(ring,
+ ret = ring->dispatch_execbuffer(ring, ctx,
exec_start, exec_len,
flags);
if (ret)
@@ -1272,7 +1275,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, ctx, batch_obj);
err:
/* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5333319..e5911ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -711,7 +711,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(ring, NULL, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -755,7 +755,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(ring, NULL, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -773,7 +773,8 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(ring, NULL, I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30ab378..462c7ae 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8637,7 +8637,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ring->default_context);
return 0;
err_unpin:
@@ -8679,7 +8679,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ring->default_context);
return 0;
err_unpin:
@@ -8728,7 +8728,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ring->default_context);
return 0;
err_unpin:
@@ -8773,7 +8773,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ring->default_context);
return 0;
err_unpin:
@@ -8867,7 +8867,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ring->default_context);
return 0;
err_unpin:
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d334f5a..4fbea79 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -43,7 +43,8 @@ static inline int ring_space(struct intel_engine *ring)
return space;
}
-void __intel_ring_advance(struct intel_engine *ring)
+void __intel_ring_advance(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
@@ -51,11 +52,13 @@ void __intel_ring_advance(struct intel_engine *ring)
ringbuf->tail &= ringbuf->size - 1;
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
- ring->write_tail(ring, ringbuf->tail);
+
+ ring->write_tail(ring, ctx, ringbuf->tail);
}
static int
gen2_render_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -82,6 +85,7 @@ gen2_render_ring_flush(struct intel_engine *ring,
static int
gen4_render_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -212,7 +216,8 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine *ring)
static int
gen6_render_ring_flush(struct intel_engine *ring,
- u32 invalidate_domains, u32 flush_domains)
+ struct i915_hw_context *ctx,
+ u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
@@ -306,6 +311,7 @@ static int gen7_ring_fbc_flush(struct intel_engine *ring, u32 value)
static int
gen7_render_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -367,6 +373,7 @@ gen7_render_ring_flush(struct intel_engine *ring,
static int
gen8_render_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -390,7 +397,7 @@ gen8_render_ring_flush(struct intel_engine *ring,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ringbuffer_begin(ring, ctx, 6);
if (ret)
return ret;
@@ -407,13 +414,14 @@ gen8_render_ring_flush(struct intel_engine *ring,
}
static void ring_write_tail(struct intel_engine *ring,
- u32 value)
+ struct i915_hw_context *ctx, u32 value)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
static void gen8_write_tail_lrc(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 value)
{
DRM_ERROR("Execlists still not ready!\n");
@@ -453,7 +461,7 @@ static int init_ring_common(struct intel_engine *ring)
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
- ring->write_tail(ring, 0);
+ ring->write_tail(ring, NULL, 0);
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
@@ -690,7 +698,8 @@ update_mboxes(struct intel_engine *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_engine *ring)
+gen6_add_request(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -717,17 +726,18 @@ gen6_add_request(struct intel_engine *ring)
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ctx);
return 0;
}
static int
-gen8_add_request(struct intel_engine *ring)
+gen8_add_request(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ringbuffer_begin(ring, ctx, 4);
if (ret)
return ret;
@@ -735,7 +745,7 @@ gen8_add_request(struct intel_engine *ring)
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ctx);
return 0;
}
@@ -806,7 +816,8 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_engine *ring)
+pc_render_add_request(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -848,7 +859,7 @@ pc_render_add_request(struct intel_engine *ring)
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ctx);
return 0;
}
@@ -1052,6 +1063,7 @@ void intel_ring_setup_status_page(struct intel_engine *ring)
static int
bsd_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1068,7 +1080,8 @@ bsd_ring_flush(struct intel_engine *ring,
}
static int
-i9xx_add_request(struct intel_engine *ring)
+i9xx_add_request(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
int ret;
@@ -1080,7 +1093,7 @@ i9xx_add_request(struct intel_engine *ring)
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ctx);
return 0;
}
@@ -1214,6 +1227,7 @@ gen8_ring_put_irq(struct intel_engine *ring)
static int
i965_dispatch_execbuffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 length,
unsigned flags)
{
@@ -1237,6 +1251,7 @@ i965_dispatch_execbuffer(struct intel_engine *ring,
#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 len,
unsigned flags)
{
@@ -1288,6 +1303,7 @@ i830_dispatch_execbuffer(struct intel_engine *ring,
static int
i915_dispatch_execbuffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 len,
unsigned flags)
{
@@ -1540,16 +1556,16 @@ void intel_cleanup_ring(struct intel_engine *ring)
static int intel_ring_wait_request(struct intel_engine *ring, int n)
{
struct drm_i915_gem_request *request;
- struct intel_ringbuffer *ring_buf = __get_ringbuf(ring);
+ struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
u32 seqno = 0, tail;
int ret;
- if (ring_buf->last_retired_head != -1) {
- ring_buf->head = ring_buf->last_retired_head;
- ring_buf->last_retired_head = -1;
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
- ring_buf->space = ring_space(ring);
- if (ring_buf->space >= n)
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n)
return 0;
}
@@ -1559,9 +1575,9 @@ static int intel_ring_wait_request(struct intel_engine *ring, int n)
if (request->tail == -1)
continue;
- space = request->tail - (ring_buf->tail + I915_RING_FREE_SPACE);
+ space = request->tail - (ringbuf->tail + I915_RING_FREE_SPACE);
if (space < 0)
- space += ring_buf->size;
+ space += ringbuf->size;
if (space >= n) {
seqno = request->seqno;
tail = request->tail;
@@ -1583,15 +1599,16 @@ static int intel_ring_wait_request(struct intel_engine *ring, int n)
if (ret)
return ret;
- ring_buf->head = tail;
- ring_buf->space = ring_space(ring);
- if (WARN_ON(ring_buf->space < n))
+ ringbuf->head = tail;
+ ringbuf->space = ring_space(ring);
+ if (WARN_ON(ringbuf->space < n))
return -ENOSPC;
return 0;
}
-static int ring_wait_for_space(struct intel_engine *ring, int n)
+static int ring_wait_for_space(struct intel_engine *ring,
+ struct i915_hw_context *ctx, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1604,7 +1621,7 @@ static int ring_wait_for_space(struct intel_engine *ring, int n)
return ret;
/* force the tail write in case we have been skipping them */
- __intel_ring_advance(ring);
+ __intel_ring_advance(ring, ctx);
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
@@ -1640,14 +1657,15 @@ static int ring_wait_for_space(struct intel_engine *ring, int n)
return -EBUSY;
}
-static int intel_wrap_ring_buffer(struct intel_engine *ring)
+static int intel_wrap_ring_buffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
uint32_t __iomem *virt;
struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
int rem = ringbuf->size - ringbuf->tail;
if (ringbuf->space < rem) {
- int ret = ring_wait_for_space(ring, rem);
+ int ret = ring_wait_for_space(ring, ctx, rem);
if (ret)
return ret;
}
@@ -1706,19 +1724,19 @@ intel_ring_alloc_seqno(struct intel_engine *ring)
}
static int __intel_ring_prepare(struct intel_engine *ring,
- int bytes)
+ struct i915_hw_context *ctx, int bytes)
{
struct intel_ringbuffer *ringbuf = __get_ringbuf(ring);
int ret;
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
+ ret = intel_wrap_ring_buffer(ring, ctx);
if (unlikely(ret))
return ret;
}
if (unlikely(ringbuf->space < bytes)) {
- ret = ring_wait_for_space(ring, bytes);
+ ret = ring_wait_for_space(ring, ctx, bytes);
if (unlikely(ret))
return ret;
}
@@ -1726,8 +1744,9 @@ static int __intel_ring_prepare(struct intel_engine *ring,
return 0;
}
-int intel_ring_begin(struct intel_engine *ring,
- int num_dwords)
+int intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
+ int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret;
@@ -1737,7 +1756,7 @@ int intel_ring_begin(struct intel_engine *ring,
if (ret)
return ret;
- ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+ ret = __intel_ring_prepare(ring, ctx, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
@@ -1789,7 +1808,7 @@ void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno)
}
static void gen6_bsd_ring_write_tail(struct intel_engine *ring,
- u32 value)
+ struct i915_hw_context *ctx, u32 value)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1822,12 +1841,13 @@ static void gen6_bsd_ring_write_tail(struct intel_engine *ring,
}
static int gen8_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate, u32 flush)
{
uint32_t cmd;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ringbuffer_begin(ring, ctx, 4);
if (ret)
return ret;
@@ -1846,6 +1866,7 @@ static int gen8_ring_flush(struct intel_engine *ring,
}
static int gen6_bsd_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate, u32 flush)
{
uint32_t cmd;
@@ -1877,6 +1898,7 @@ static int gen6_bsd_ring_flush(struct intel_engine *ring,
static int
gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 len,
unsigned flags)
{
@@ -1885,7 +1907,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
!(flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ringbuffer_begin(ring, ctx, 4);
if (ret)
return ret;
@@ -1901,6 +1923,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
static int
hsw_ring_dispatch_execbuffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 len,
unsigned flags)
{
@@ -1922,6 +1945,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine *ring,
static int
gen6_ring_dispatch_execbuffer(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 len,
unsigned flags)
{
@@ -1944,6 +1968,7 @@ gen6_ring_dispatch_execbuffer(struct intel_engine *ring,
/* Blitter support (SandyBridge+) */
static int gen6_ring_flush(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
@@ -2293,14 +2318,15 @@ int intel_init_vebox_ring(struct drm_device *dev)
}
int
-intel_ring_flush_all_caches(struct intel_engine *ring)
+intel_ring_flush_all_caches(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
int ret;
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(ring, ctx, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -2311,7 +2337,8 @@ intel_ring_flush_all_caches(struct intel_engine *ring)
}
int
-intel_ring_invalidate_all_caches(struct intel_engine *ring)
+intel_ring_invalidate_all_caches(struct intel_engine *ring,
+ struct i915_hw_context *ctx)
{
uint32_t flush_domains;
int ret;
@@ -2320,7 +2347,7 @@ intel_ring_invalidate_all_caches(struct intel_engine *ring)
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->flush(ring, ctx, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a914348..95e29e0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -101,11 +101,13 @@ struct intel_engine {
int (*init)(struct intel_engine *ring);
void (*write_tail)(struct intel_engine *ring,
- u32 value);
+ struct i915_hw_context *ctx, u32 value);
int __must_check (*flush)(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_engine *ring);
+ int (*add_request)(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -117,6 +119,7 @@ struct intel_engine {
void (*set_seqno)(struct intel_engine *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine *ring,
+ struct i915_hw_context *ctx,
u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
@@ -281,7 +284,9 @@ intel_write_status_page(struct intel_engine *ring,
void intel_cleanup_ring(struct intel_engine *ring);
-int __must_check intel_ring_begin(struct intel_engine *ring, int n);
+int __must_check intel_ringbuffer_begin(struct intel_engine *ring,
+ struct i915_hw_context *ctx, int n);
+#define intel_ring_begin(ring, n) intel_ringbuffer_begin(ring, NULL, n)
int __must_check intel_ring_cacheline_align(struct intel_engine *ring);
static inline void intel_ring_emit(struct intel_engine *ring,
u32 data)
@@ -297,12 +302,15 @@ static inline void intel_ring_advance(struct intel_engine *ring)
ringbuf->tail &= ringbuf->size - 1;
}
-void __intel_ring_advance(struct intel_engine *ring);
+void __intel_ring_advance(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
int __must_check intel_ring_idle(struct intel_engine *ring);
void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine *ring);
+int intel_ring_flush_all_caches(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
+int intel_ring_invalidate_all_caches(struct intel_engine *ring,
+ struct i915_hw_context *ctx);
void intel_init_rings_early(struct drm_device *dev);
int intel_init_render_ring(struct drm_device *dev);
--
1.9.0
More information about the Intel-gfx
mailing list