[Intel-gfx] [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring
Dave Gordon
david.s.gordon at intel.com
Wed Jul 20 14:12:49 UTC 2016
On 20/07/16 14:11, Chris Wilson wrote:
> Now that we have disambuigated ring and engine, we can use the clearer
> and more consistent name for the intel_ringbuffer pointer in the
> request.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
You missed a few instances of 'ring' meaning engine:
i915_gem_execbuffer.c: struct intel_engine_cs **ring)
intel_mocs.h:int intel_mocs_init_engine(struct intel_engine_cs *ring);
intel_ringbuffer.c:gen5_seqno_barrier(struct intel_engine_cs *ring)
intel_ringbuffer.h: void (*irq_enable)(struct intel_engine_cs *ring);
intel_ringbuffer.h: void (*irq_disable)(struct intel_engine_cs *ring);
intel_ringbuffer.h: int (*init_hw)(struct intel_engine_cs *ring);
intel_ringbuffer.h: void (*irq_seqno_barrier)(struct intel_engine_cs
*ring);
intel_ringbuffer.h: void (*cleanup)(struct intel_engine_cs *ring);
I think we have to purge every last trace of this usage before using
'ring' as shorthand for 'ringbuf[fer]'.
.Dave.
> ---
> drivers/gpu/drm/i915/i915_gem_context.c | 4 +-
> drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +-
> drivers/gpu/drm/i915/i915_gem_gtt.c | 6 +-
> drivers/gpu/drm/i915/i915_gem_request.c | 16 +++---
> drivers/gpu/drm/i915/i915_gem_request.h | 2 +-
> drivers/gpu/drm/i915/i915_gpu_error.c | 20 +++----
> drivers/gpu/drm/i915/intel_display.c | 10 ++--
> drivers/gpu/drm/i915/intel_lrc.c | 57 +++++++++---------
> drivers/gpu/drm/i915/intel_mocs.c | 36 ++++++------
> drivers/gpu/drm/i915/intel_overlay.c | 8 +--
> drivers/gpu/drm/i915/intel_ringbuffer.c | 92 +++++++++++++++---------------
> 11 files changed, 126 insertions(+), 129 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index b6d10bd763a0..16138c4ff7db 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -552,7 +552,7 @@ static inline int
> mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
> {
> struct drm_i915_private *dev_priv = req->i915;
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 flags = hw_flags | MI_MM_SPACE_GTT;
> const int num_rings =
> /* Use an extended w/a on ivb+ if signalling from other rings */
> @@ -654,7 +654,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
> static int remap_l3(struct drm_i915_gem_request *req, int slice)
> {
> u32 *remap_info = req->i915->l3_parity.remap_info[slice];
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int i, ret;
>
> if (!remap_info)
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index e2c4d99a1e7f..501a1751d432 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
> static int
> i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret, i;
>
> if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
> @@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>
> if (params->engine->id == RCS &&
> instp_mode != dev_priv->relative_constants_mode) {
> - struct intel_ringbuffer *ring = params->request->ringbuf;
> + struct intel_ringbuffer *ring = params->request->ring;
>
> ret = intel_ring_begin(params->request, 4);
> if (ret)
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index abc439be2049..a48329baf432 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
> unsigned entry,
> dma_addr_t addr)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> BUG_ON(entry >= 4);
> @@ -1660,7 +1660,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
> static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
> struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> /* NB: TLBs must be flushed and invalidated before a switch */
> @@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
> static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
> struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> /* NB: TLBs must be flushed and invalidated before a switch */
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 60a3a343b3a8..0f415606a383 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -170,7 +170,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
> * Note this requires that we are always called in request
> * completion order.
> */
> - request->ringbuf->last_retired_head = request->postfix;
> + request->ring->last_retired_head = request->postfix;
>
> i915_gem_request_remove_from_client(request);
>
> @@ -425,7 +425,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
> bool flush_caches)
> {
> struct intel_engine_cs *engine;
> - struct intel_ringbuffer *ringbuf;
> + struct intel_ringbuffer *ring;
> u32 request_start;
> u32 reserved_tail;
> int ret;
> @@ -434,14 +434,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
> return;
>
> engine = request->engine;
> - ringbuf = request->ringbuf;
> + ring = request->ring;
>
> /*
> * To ensure that this call will not fail, space for its emissions
> * should already have been reserved in the ring buffer. Let the ring
> * know that it is time to use that space up.
> */
> - request_start = intel_ring_get_tail(ringbuf);
> + request_start = intel_ring_get_tail(ring);
> reserved_tail = request->reserved_space;
> request->reserved_space = 0;
>
> @@ -488,21 +488,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
> * GPU processing the request, we never over-estimate the
> * position of the head.
> */
> - request->postfix = intel_ring_get_tail(ringbuf);
> + request->postfix = intel_ring_get_tail(ring);
>
> if (i915.enable_execlists) {
> ret = engine->emit_request(request);
> } else {
> ret = engine->add_request(request);
>
> - request->tail = intel_ring_get_tail(ringbuf);
> + request->tail = intel_ring_get_tail(ring);
> }
> /* Not allowed to fail! */
> WARN(ret, "emit|add_request failed: %d!\n", ret);
> /* Sanity check that the reserved size was large enough. */
> - ret = intel_ring_get_tail(ringbuf) - request_start;
> + ret = intel_ring_get_tail(ring) - request_start;
> if (ret < 0)
> - ret += ringbuf->size;
> + ret += ring->size;
> WARN_ONCE(ret > reserved_tail,
> "Not enough space reserved (%d bytes) "
> "for adding the request (%d bytes)\n",
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index e06e81f459df..68868d825d9d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -61,7 +61,7 @@ struct drm_i915_gem_request {
> */
> struct i915_gem_context *ctx;
> struct intel_engine_cs *engine;
> - struct intel_ringbuffer *ringbuf;
> + struct intel_ringbuffer *ring;
> struct intel_signal_node signaling;
>
> /** GEM sequence number associated with the previous request,
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 4d39c7284605..09997c6adcd2 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1091,7 +1091,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
> request = i915_gem_find_active_request(engine);
> if (request) {
> struct i915_address_space *vm;
> - struct intel_ringbuffer *rb;
> + struct intel_ringbuffer *ring;
>
> vm = request->ctx->ppgtt ?
> &request->ctx->ppgtt->base : &ggtt->base;
> @@ -1108,7 +1108,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
> if (HAS_BROKEN_CS_TLB(dev_priv))
> error->ring[i].wa_batchbuffer =
> i915_error_ggtt_object_create(dev_priv,
> - engine->scratch.obj);
> + engine->scratch.obj);
>
> if (request->pid) {
> struct task_struct *task;
> @@ -1125,23 +1125,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
> error->simulated |=
> request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
>
> - rb = request->ringbuf;
> - error->ring[i].cpu_ring_head = rb->head;
> - error->ring[i].cpu_ring_tail = rb->tail;
> + ring = request->ring;
> + error->ring[i].cpu_ring_head = ring->head;
> + error->ring[i].cpu_ring_tail = ring->tail;
> error->ring[i].ringbuffer =
> i915_error_ggtt_object_create(dev_priv,
> - rb->obj);
> + ring->obj);
> }
>
> error->ring[i].hws_page =
> i915_error_ggtt_object_create(dev_priv,
> engine->status_page.obj);
>
> - if (engine->wa_ctx.obj) {
> - error->ring[i].wa_ctx =
> - i915_error_ggtt_object_create(dev_priv,
> - engine->wa_ctx.obj);
> - }
> + error->ring[i].wa_ctx =
> + i915_error_ggtt_object_create(dev_priv,
> + engine->wa_ctx.obj);
>
> i915_gem_record_active_context(engine, error, &error->ring[i]);
>
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index d18ed32e6a31..d1932840a268 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
> struct drm_i915_gem_request *req,
> uint32_t flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> u32 flip_mask;
> int ret;
> @@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
> struct drm_i915_gem_request *req,
> uint32_t flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> u32 flip_mask;
> int ret;
> @@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
> struct drm_i915_gem_request *req,
> uint32_t flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct drm_i915_private *dev_priv = to_i915(dev);
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> uint32_t pf, pipesrc;
> @@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
> struct drm_i915_gem_request *req,
> uint32_t flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct drm_i915_private *dev_priv = to_i915(dev);
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> uint32_t pf, pipesrc;
> @@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
> struct drm_i915_gem_request *req,
> uint32_t flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> uint32_t plane_bit = 0;
> int len, ret;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 8bf2ea5a2de3..c3542eb338ca 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -714,7 +714,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
> return ret;
> }
>
> - request->ringbuf = ce->ringbuf;
> + request->ring = ce->ringbuf;
>
> if (i915.enable_guc_submission) {
> /*
> @@ -770,11 +770,11 @@ err_unpin:
> static int
> intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
> {
> - struct intel_ringbuffer *ringbuf = request->ringbuf;
> + struct intel_ringbuffer *ring = request->ring;
> struct intel_engine_cs *engine = request->engine;
>
> - intel_ring_advance(ringbuf);
> - request->tail = ringbuf->tail;
> + intel_ring_advance(ring);
> + request->tail = ring->tail;
>
> /*
> * Here we add two extra NOOPs as padding to avoid
> @@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
> *
> * Caller must reserve WA_TAIL_DWORDS for us!
> */
> - intel_ring_emit(ringbuf, MI_NOOP);
> - intel_ring_emit(ringbuf, MI_NOOP);
> - intel_ring_advance(ringbuf);
> + intel_ring_emit(ring, MI_NOOP);
> + intel_ring_emit(ring, MI_NOOP);
> + intel_ring_advance(ring);
>
> /* We keep the previous context alive until we retire the following
> * request. This ensures that any the context object is still pinned
> @@ -821,7 +821,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> struct drm_device *dev = params->dev;
> struct intel_engine_cs *engine = params->engine;
> struct drm_i915_private *dev_priv = to_i915(dev);
> - struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
> + struct intel_ringbuffer *ring = params->request->ring;
> u64 exec_start;
> int instp_mode;
> u32 instp_mask;
> @@ -833,7 +833,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> case I915_EXEC_CONSTANTS_REL_GENERAL:
> case I915_EXEC_CONSTANTS_ABSOLUTE:
> case I915_EXEC_CONSTANTS_REL_SURFACE:
> - if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
> + if (instp_mode != 0 && engine->id != RCS) {
> DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
> return -EINVAL;
> }
> @@ -862,17 +862,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
> if (ret)
> return ret;
>
> - if (engine == &dev_priv->engine[RCS] &&
> + if (engine->id == RCS &&
> instp_mode != dev_priv->relative_constants_mode) {
> ret = intel_ring_begin(params->request, 4);
> if (ret)
> return ret;
>
> - intel_ring_emit(ringbuf, MI_NOOP);
> - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
> - intel_ring_emit_reg(ringbuf, INSTPM);
> - intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
> - intel_ring_advance(ringbuf);
> + intel_ring_emit(ring, MI_NOOP);
> + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> + intel_ring_emit_reg(ring, INSTPM);
> + intel_ring_emit(ring, instp_mask << 16 | instp_mode);
> + intel_ring_advance(ring);
>
> dev_priv->relative_constants_mode = instp_mode;
> }
> @@ -1030,7 +1030,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
> {
> int ret, i;
> struct intel_engine_cs *engine = req->engine;
> - struct intel_ringbuffer *ringbuf = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct i915_workarounds *w = &req->i915->workarounds;
>
> if (w->count == 0)
> @@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
> if (ret)
> return ret;
>
> - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
> + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
> for (i = 0; i < w->count; i++) {
> - intel_ring_emit_reg(ringbuf, w->reg[i].addr);
> - intel_ring_emit(ringbuf, w->reg[i].value);
> + intel_ring_emit_reg(ring, w->reg[i].addr);
> + intel_ring_emit(ring, w->reg[i].value);
> }
> - intel_ring_emit(ringbuf, MI_NOOP);
> + intel_ring_emit(ring, MI_NOOP);
>
> - intel_ring_advance(ringbuf);
> + intel_ring_advance(ring);
>
> engine->gpu_caches_dirty = true;
> ret = logical_ring_flush_all_caches(req);
> @@ -1546,7 +1546,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
> static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
> {
> struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
> int i, ret;
>
> @@ -1573,7 +1573,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
> static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
> u64 offset, unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
> int ret;
>
> @@ -1630,8 +1630,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
> u32 invalidate_domains,
> u32 unused)
> {
> - struct intel_ringbuffer *ring = request->ringbuf;
> - struct intel_engine_cs *engine = ring->engine;
> + struct intel_ringbuffer *ring = request->ring;
> uint32_t cmd;
> int ret;
>
> @@ -1650,7 +1649,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>
> if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
> cmd |= MI_INVALIDATE_TLB;
> - if (engine->id == VCS)
> + if (request->engine->id == VCS)
> cmd |= MI_INVALIDATE_BSD;
> }
>
> @@ -1669,7 +1668,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
> u32 invalidate_domains,
> u32 flush_domains)
> {
> - struct intel_ringbuffer *ring = request->ringbuf;
> + struct intel_ringbuffer *ring = request->ring;
> struct intel_engine_cs *engine = request->engine;
> u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> bool vf_flush_wa = false, dc_flush_wa = false;
> @@ -1783,7 +1782,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
>
> static int gen8_emit_request(struct drm_i915_gem_request *request)
> {
> - struct intel_ringbuffer *ring = request->ringbuf;
> + struct intel_ringbuffer *ring = request->ring;
> int ret;
>
> ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
> @@ -1806,7 +1805,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>
> static int gen8_emit_request_render(struct drm_i915_gem_request *request)
> {
> - struct intel_ringbuffer *ring = request->ringbuf;
> + struct intel_ringbuffer *ring = request->ring;
> int ret;
>
> ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index 2743424f2746..fe63c7e79fb1 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
> static int emit_mocs_control_table(struct drm_i915_gem_request *req,
> const struct drm_i915_mocs_table *table)
> {
> - struct intel_ringbuffer *ringbuf = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> enum intel_engine_id engine = req->engine->id;
> unsigned int index;
> int ret;
> @@ -288,11 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
> if (ret)
> return ret;
>
> - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
> + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
>
> for (index = 0; index < table->size; index++) {
> - intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
> - intel_ring_emit(ringbuf, table->table[index].control_value);
> + intel_ring_emit_reg(ring, mocs_register(engine, index));
> + intel_ring_emit(ring, table->table[index].control_value);
> }
>
> /*
> @@ -304,12 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
> * that value to all the used entries.
> */
> for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
> - intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
> - intel_ring_emit(ringbuf, table->table[0].control_value);
> + intel_ring_emit_reg(ring, mocs_register(engine, index));
> + intel_ring_emit(ring, table->table[0].control_value);
> }
>
> - intel_ring_emit(ringbuf, MI_NOOP);
> - intel_ring_advance(ringbuf);
> + intel_ring_emit(ring, MI_NOOP);
> + intel_ring_advance(ring);
>
> return 0;
> }
> @@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
> static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
> const struct drm_i915_mocs_table *table)
> {
> - struct intel_ringbuffer *ringbuf = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> unsigned int i;
> int ret;
>
> @@ -347,18 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
> if (ret)
> return ret;
>
> - intel_ring_emit(ringbuf,
> + intel_ring_emit(ring,
> MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
>
> for (i = 0; i < table->size/2; i++) {
> - intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> - intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
> + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> + intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
> }
>
> if (table->size & 0x01) {
> /* Odd table size - 1 left over */
> - intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> - intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
> + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> + intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
> i++;
> }
>
> @@ -368,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
> * they are reserved by the hardware.
> */
> for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
> - intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> - intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
> + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> + intel_ring_emit(ring, l3cc_combine(table, 0, 0));
> }
>
> - intel_ring_emit(ringbuf, MI_NOOP);
> - intel_ring_advance(ringbuf);
> + intel_ring_emit(ring, MI_NOOP);
> + intel_ring_advance(ring);
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index 92722e614955..84b8f74bd13c 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -253,7 +253,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
>
> overlay->active = true;
>
> - ring = req->ringbuf;
> + ring = req->ring;
> intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
> intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
> intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> @@ -295,7 +295,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
> return ret;
> }
>
> - ring = req->ringbuf;
> + ring = req->ring;
> intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> intel_ring_emit(ring, flip_addr);
> intel_ring_advance(ring);
> @@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
> return ret;
> }
>
> - ring = req->ringbuf;
> + ring = req->ring;
> /* wait for overlay to go idle */
> intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
> intel_ring_emit(ring, flip_addr);
> @@ -438,7 +438,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
> return ret;
> }
>
> - ring = req->ringbuf;
> + ring = req->ring;
> intel_ring_emit(ring,
> MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> intel_ring_emit(ring, MI_NOOP);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index da8134d43b26..ac51e4885046 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate_domains,
> u32 flush_domains)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 cmd;
> int ret;
>
> @@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate_domains,
> u32 flush_domains)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 cmd;
> int ret;
>
> @@ -187,7 +187,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
> static int
> intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 scratch_addr =
> req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> int ret;
> @@ -224,7 +224,7 @@ static int
> gen6_render_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate_domains, u32 flush_domains)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 scratch_addr =
> req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> u32 flags = 0;
> @@ -277,7 +277,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
> static int
> gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 4);
> @@ -299,7 +299,7 @@ static int
> gen7_render_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate_domains, u32 flush_domains)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 scratch_addr =
> req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> u32 flags = 0;
> @@ -364,7 +364,7 @@ static int
> gen8_emit_pipe_control(struct drm_i915_gem_request *req,
> u32 flags, u32 scratch_addr)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 6);
> @@ -680,7 +680,7 @@ err:
>
> static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct i915_workarounds *w = &req->i915->workarounds;
> int ret, i;
>
> @@ -1324,7 +1324,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> unsigned int num_dwords)
> {
> #define MBOX_UPDATE_DWORDS 8
> - struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> + struct intel_ringbuffer *signaller = signaller_req->ring;
> struct drm_i915_private *dev_priv = signaller_req->i915;
> struct intel_engine_cs *waiter;
> enum intel_engine_id id;
> @@ -1366,7 +1366,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
> unsigned int num_dwords)
> {
> #define MBOX_UPDATE_DWORDS 6
> - struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> + struct intel_ringbuffer *signaller = signaller_req->ring;
> struct drm_i915_private *dev_priv = signaller_req->i915;
> struct intel_engine_cs *waiter;
> enum intel_engine_id id;
> @@ -1405,7 +1405,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
> static int gen6_signal(struct drm_i915_gem_request *signaller_req,
> unsigned int num_dwords)
> {
> - struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> + struct intel_ringbuffer *signaller = signaller_req->ring;
> struct drm_i915_private *dev_priv = signaller_req->i915;
> struct intel_engine_cs *useless;
> enum intel_engine_id id;
> @@ -1449,7 +1449,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
> static int
> gen6_add_request(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> if (req->engine->semaphore.signal)
> @@ -1473,7 +1473,7 @@ static int
> gen8_render_add_request(struct drm_i915_gem_request *req)
> {
> struct intel_engine_cs *engine = req->engine;
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> if (engine->semaphore.signal)
> @@ -1518,7 +1518,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
> struct intel_engine_cs *signaller,
> u32 seqno)
> {
> - struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> + struct intel_ringbuffer *waiter = waiter_req->ring;
> struct drm_i915_private *dev_priv = waiter_req->i915;
> u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
> struct i915_hw_ppgtt *ppgtt;
> @@ -1552,7 +1552,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
> struct intel_engine_cs *signaller,
> u32 seqno)
> {
> - struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> + struct intel_ringbuffer *waiter = waiter_req->ring;
> u32 dw1 = MI_SEMAPHORE_MBOX |
> MI_SEMAPHORE_COMPARE |
> MI_SEMAPHORE_REGISTER;
> @@ -1686,7 +1686,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate_domains,
> u32 flush_domains)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 2);
> @@ -1702,7 +1702,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
> static int
> i9xx_add_request(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 4);
> @@ -1780,7 +1780,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
> u64 offset, u32 length,
> unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 2);
> @@ -1807,7 +1807,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
> u64 offset, u32 len,
> unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> u32 cs_offset = req->engine->scratch.gtt_offset;
> int ret;
>
> @@ -1869,7 +1869,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
> u64 offset, u32 len,
> unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 2);
> @@ -2297,7 +2297,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
> */
> request->reserved_space += LEGACY_REQUEST_SIZE;
>
> - request->ringbuf = request->engine->buffer;
> + request->ring = request->engine->buffer;
>
> ret = intel_ring_begin(request, 0);
> if (ret)
> @@ -2309,12 +2309,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>
> static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
> {
> - struct intel_ringbuffer *ringbuf = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> struct intel_engine_cs *engine = req->engine;
> struct drm_i915_gem_request *target;
>
> - intel_ring_update_space(ringbuf);
> - if (ringbuf->space >= bytes)
> + intel_ring_update_space(ring);
> + if (ring->space >= bytes)
> return 0;
>
> /*
> @@ -2336,12 +2336,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
> * from multiple ringbuffers. Here, we must ignore any that
> * aren't from the ringbuffer we're considering.
> */
> - if (target->ringbuf != ringbuf)
> + if (target->ring != ring)
> continue;
>
> /* Would completion of this request free enough space? */
> - space = __intel_ring_space(target->postfix, ringbuf->tail,
> - ringbuf->size);
> + space = __intel_ring_space(target->postfix, ring->tail,
> + ring->size);
> if (space >= bytes)
> break;
> }
> @@ -2354,9 +2354,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>
> int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
> {
> - struct intel_ringbuffer *ringbuf = req->ringbuf;
> - int remain_actual = ringbuf->size - ringbuf->tail;
> - int remain_usable = ringbuf->effective_size - ringbuf->tail;
> + struct intel_ringbuffer *ring = req->ring;
> + int remain_actual = ring->size - ring->tail;
> + int remain_usable = ring->effective_size - ring->tail;
> int bytes = num_dwords * sizeof(u32);
> int total_bytes, wait_bytes;
> bool need_wrap = false;
> @@ -2383,35 +2383,35 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
> wait_bytes = total_bytes;
> }
>
> - if (wait_bytes > ringbuf->space) {
> + if (wait_bytes > ring->space) {
> int ret = wait_for_space(req, wait_bytes);
> if (unlikely(ret))
> return ret;
>
> - intel_ring_update_space(ringbuf);
> - if (unlikely(ringbuf->space < wait_bytes))
> + intel_ring_update_space(ring);
> + if (unlikely(ring->space < wait_bytes))
> return -EAGAIN;
> }
>
> if (unlikely(need_wrap)) {
> - GEM_BUG_ON(remain_actual > ringbuf->space);
> - GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
> + GEM_BUG_ON(remain_actual > ring->space);
> + GEM_BUG_ON(ring->tail + remain_actual > ring->size);
>
> /* Fill the tail with MI_NOOP */
> - memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
> - ringbuf->tail = 0;
> - ringbuf->space -= remain_actual;
> + memset(ring->vaddr + ring->tail, 0, remain_actual);
> + ring->tail = 0;
> + ring->space -= remain_actual;
> }
>
> - ringbuf->space -= bytes;
> - GEM_BUG_ON(ringbuf->space < 0);
> + ring->space -= bytes;
> + GEM_BUG_ON(ring->space < 0);
> return 0;
> }
>
> /* Align the ring tail to a cacheline boundary */
> int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int num_dwords =
> (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
> int ret;
> @@ -2518,7 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
> static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate, u32 flush)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> uint32_t cmd;
> int ret;
>
> @@ -2564,7 +2564,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> u64 offset, u32 len,
> unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> bool ppgtt = USES_PPGTT(req->i915) &&
> !(dispatch_flags & I915_DISPATCH_SECURE);
> int ret;
> @@ -2590,7 +2590,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> u64 offset, u32 len,
> unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 2);
> @@ -2615,7 +2615,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> u64 offset, u32 len,
> unsigned dispatch_flags)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> int ret;
>
> ret = intel_ring_begin(req, 2);
> @@ -2638,7 +2638,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> static int gen6_ring_flush(struct drm_i915_gem_request *req,
> u32 invalidate, u32 flush)
> {
> - struct intel_ringbuffer *ring = req->ringbuf;
> + struct intel_ringbuffer *ring = req->ring;
> uint32_t cmd;
> int ret;
>
>
More information about the Intel-gfx
mailing list