[Intel-gfx] [PATCH 22/29] drm/i915: Remove the now redundant 'obj->ring'
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Thu Oct 30 19:41:14 CET 2014
From: John Harrison <John.C.Harrison at Intel.com>
The ring member of the object structure was always updated with the
last_read_seqno member. Thus with the conversion to last_read_req, obj->ring is
now a direct copy of obj->last_read_req->ring. This makes it somewhat redundant
and potentially misleading (especially as there was no comment to explain its
purpose).
This checkin removes the redundant field. Many uses were simply testing for
non-null to see if the object is active on the GPU. Some of these have been
converted to check 'obj->active' instead. Others (where the last_read_req is
about to be used anyway) have been changed to check obj->last_read_req. The rest
simply pull the ring out from the request structure and proceed as before.
For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 9 +++++----
drivers/gpu/drm/i915/i915_drv.h | 2 --
drivers/gpu/drm/i915/i915_gem.c | 32 +++++++++++++++++++------------
drivers/gpu/drm/i915/i915_gem_context.c | 3 ++-
drivers/gpu/drm/i915/i915_gpu_error.c | 3 ++-
drivers/gpu/drm/i915/intel_display.c | 14 ++++++++------
6 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f26c2b2..0f40e61 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -168,8 +168,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->ring != NULL)
- seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->last_read_req != NULL)
+ seq_printf(m, " (%s)",
+ i915_gem_request_get_ring(obj->last_read_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -336,7 +337,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (ppgtt->file_priv != stats->file_priv)
continue;
- if (obj->ring) /* XXX per-vma statistic */
+ if (obj->active) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -346,7 +347,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
if (i915_gem_obj_ggtt_bound(obj)) {
stats->global += obj->base.size;
- if (obj->ring)
+ if (obj->active)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1d30dcb..41c2db3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1868,8 +1868,6 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_engine_cs *ring;
-
/** Breadcrumb of last rendering to the buffer. */
struct drm_i915_gem_request *last_read_req;
struct drm_i915_gem_request *last_write_req;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2daafeb..ea6d679 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2183,14 +2183,18 @@ static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+ struct drm_i915_gem_request *req;
+ struct intel_engine_cs *old_ring;
BUG_ON(ring == NULL);
- if (obj->ring != ring && obj->last_write_req) {
+
+ req = intel_ring_get_request(ring);
+ old_ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ if (old_ring != ring && obj->last_write_req) {
/* Keep the request relative to the current ring */
i915_gem_request_assign(&obj->last_write_req, req);
}
- obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
@@ -2229,7 +2233,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
intel_fb_obj_flush(obj, true);
list_del_init(&obj->ring_list);
- obj->ring = NULL;
i915_gem_request_assign(&obj->last_read_req, NULL);
i915_gem_request_assign(&obj->last_write_req, NULL);
@@ -2246,9 +2249,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring = obj->ring;
-
- if (ring == NULL)
+ if (obj->last_read_req == NULL)
return;
if (i915_gem_request_completed(obj->last_read_req, true))
@@ -2786,14 +2787,17 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
+ struct intel_engine_cs *ring;
int ret;
if (obj->active) {
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+
ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ i915_gem_retire_requests_ring(ring);
}
return 0;
@@ -2896,10 +2900,12 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from = obj->ring;
+ struct intel_engine_cs *from;
u32 seqno;
int ret, idx;
+ from = i915_gem_request_get_ring(obj->last_read_req);
+
if (from == NULL || to == from)
return 0;
@@ -3872,7 +3878,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
bool was_pin_display;
int ret;
- if (pipelined != obj->ring) {
+ if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
return ret;
@@ -4292,9 +4298,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
- if (obj->ring) {
+ if (obj->last_read_req) {
+ struct intel_engine_cs *ring;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy |= intel_ring_flag(obj->ring) << 16;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+ args->busy |= intel_ring_flag(ring) << 16;
}
drm_gem_object_unreference(&obj->base);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 7d32571..0e35aff 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -613,7 +613,8 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
- BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
+ BUG_ON(i915_gem_request_get_ring(
+ from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 10ba188..4c11e5bcf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -683,7 +683,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
- err->ring = obj->ring ? obj->ring->id : -1;
+ err->ring = obj->last_read_req ?
+ i915_gem_request_get_ring(obj->last_read_req)->id : -1;
err->cache_level = obj->cache_level;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index eca4240..eea2e92 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9304,7 +9304,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
else if (i915.enable_execlists)
return true;
else
- return ring != obj->ring;
+ return ring != i915_gem_request_get_ring(obj->last_read_req);
}
static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
@@ -9344,7 +9344,7 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj)
if (!obj->last_write_req)
return 0;
- ring = obj->ring;
+ ring = i915_gem_request_get_ring(obj->last_write_req);
if (i915_gem_request_completed(obj->last_write_req, true))
return 0;
@@ -9410,14 +9410,15 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
spin_lock_irq(&dev_priv->mmio_flip_lock);
i915_gem_request_assign(&intel_crtc->mmio_flip.req,
obj->last_write_req);
- intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ WARN_ON(ring != i915_gem_request_get_ring(intel_crtc->mmio_flip.req));
+ intel_crtc->mmio_flip.ring_id = ring->id;
spin_unlock_irq(&dev_priv->mmio_flip_lock);
/*
* Double check to catch cases where irq fired before
* mmio flip data was ready
*/
- intel_notify_mmio_flip(obj->ring);
+ intel_notify_mmio_flip(ring);
return 0;
}
@@ -9597,7 +9598,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = obj->ring;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
@@ -9619,7 +9620,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
- work->flip_queued_ring = obj->ring;
+ work->flip_queued_ring =
+ i915_gem_request_get_ring(obj->last_write_req);
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
--
1.7.9.5
More information about the Intel-gfx
mailing list