[Intel-gfx] [PATCH 03/15] drm/i915: Track last read/write seqno independently
Chris Wilson
chris at chris-wilson.co.uk
Tue Mar 22 14:51:48 CET 2011
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_debugfs.c | 5 ++-
drivers/gpu/drm/i915/i915_drv.h | 17 +++------
drivers/gpu/drm/i915/i915_gem.c | 55 +++++++++++++++++++---------
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1 -
drivers/gpu/drm/i915/i915_irq.c | 4 +-
5 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 87c8e29..2aedceb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -118,14 +118,15 @@ static const char *agp_type_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_rendering_seqno,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
obj->last_fenced_seqno,
agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
obj->dirty ? " dirty" : "",
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ed970bd..967a599 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -554,7 +554,7 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -566,7 +566,7 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
- * last_rendering_seqno is 0 while an object is in this list.
+ * last_read_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
@@ -576,7 +576,7 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
- * last_rendering_seqno is 0 while an object is in this list.
+ * All seqno are 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
@@ -734,12 +734,6 @@ struct drm_i915_gem_object {
unsigned int dirty : 1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write : 1;
-
- /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -814,7 +808,8 @@ struct drm_i915_gem_object {
uint32_t gtt_offset;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
struct intel_ring_buffer *ring;
/** Breadcrumb of last fenced GPU access to the buffer. */
@@ -859,7 +854,7 @@ struct drm_i915_gem_object {
* and may be associated with active buffers to be retired.
*
* By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * sequence-number comparisons on buffer last_read_seqnos, and associate
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d8a0f7b..f63d33c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1623,13 +1623,17 @@ i915_gem_object_move_to_ring(struct drm_i915_gem_object *obj,
if (from == NULL || to == from)
return 0;
- /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
if (to == NULL || !HAS_GPU_SEMAPHORES(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
+ return i915_wait_request(obj->ring,
+ obj->last_read_seqno);
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
@@ -1672,7 +1676,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_rendering_seqno = seqno;
+ obj->last_read_seqno = seqno;
if (obj->fenced_gpu_access) {
struct drm_i915_fence_reg *reg;
@@ -1689,7 +1693,7 @@ static void
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
list_del_init(&obj->ring_list);
- obj->last_rendering_seqno = 0;
+ obj->last_read_seqno = 0;
}
static void
@@ -1722,10 +1726,10 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
+ obj->last_write_seqno = 0;
obj->last_fenced_seqno = 0;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
@@ -1773,6 +1777,7 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
seqno = i915_gem_next_request_seqno(ring);
obj->base.write_domain = 0;
+ obj->last_write_seqno = seqno;
list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring, seqno);
@@ -1967,7 +1972,7 @@ i915_ring_outstanding_dispatch(struct intel_ring_buffer *ring)
last_dispatch = list_entry(ring->active_list.prev,
struct drm_i915_gem_object,
- ring_list)->last_rendering_seqno;
+ ring_list)->last_read_seqno;
return !i915_seqno_passed(last_request, last_dispatch);
}
@@ -2027,7 +2032,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
if (obj->base.write_domain != 0)
@@ -2222,7 +2227,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_request(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
}
@@ -2565,7 +2570,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
return ret;
/* Since last_fence_seqno can retire much earlier than
- * last_rendering_seqno, we track that here for efficiency.
+ * last_read_seqno, we track that here for efficiency.
* (With a catch-all in move_to_inactive() to prevent very
* old seqno from lying around.)
*/
@@ -3048,6 +3053,7 @@ int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
+ u32 seqno;
int ret;
/* Not valid to be called on unbound objects. */
@@ -3061,10 +3067,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj);
+ seqno = write ? obj->last_read_seqno : obj->last_write_seqno;
+ if (seqno) {
+ ret = i915_wait_request(obj->ring, seqno);
if (ret)
return ret;
+
+ obj->last_write_seqno = 0;
}
i915_gem_object_flush_cpu_write_domain(obj);
@@ -3147,6 +3156,7 @@ static int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
+ u32 seqno;
int ret;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -3156,9 +3166,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
+ seqno = write ? obj->last_read_seqno : obj->last_write_seqno;
+ if (seqno) {
+ ret = i915_wait_request(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ obj->last_write_seqno = 0;
+ }
i915_gem_object_flush_gtt_write_domain(obj);
@@ -3254,9 +3269,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
+ if (obj->last_write_seqno) {
+ ret = i915_wait_request(obj->ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ obj->last_write_seqno = 0;
+ }
i915_gem_object_flush_gtt_write_domain(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 60aaf99..3c54911 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -863,7 +863,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
intel_mark_busy(ring->dev, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 188b497..c1e4368 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -662,7 +662,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
list_for_each_entry(obj, head, mm_list) {
err->size = obj->base.size;
err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
+ err->seqno = obj->last_read_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -731,7 +731,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (obj->ring != ring)
continue;
- if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
--
1.7.4.1
More information about the Intel-gfx
mailing list