[Intel-gfx] [PATCH 09/15] drm/i915: Track fence setup separately from fenced object lifetime
Chris Wilson
chris at chris-wilson.co.uk
Sun Mar 20 09:58:53 CET 2011
This fixes a bookkeeping error causing an OOPS whilst waiting for an
object to finish using a fence. Now we can simply wait for the fence to
be written independent of the objects currently inhabiting it (past,
present and future).
Cc: Andy Whitcroft <apw at canonical.com>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
drivers/gpu/drm/i915/i915_drv.h | 2 +-
drivers/gpu/drm/i915/i915_gem.c | 50 ++++++++++++++++++---------------------
2 files changed, 24 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4496505..bcbcb53 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -129,6 +129,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *setup_ring;
uint32_t setup_seqno;
};
@@ -818,7 +819,6 @@ struct drm_i915_gem_object {
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
- struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c5dfb59..ef84c13 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1640,7 +1640,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
obj->last_fenced_seqno = seqno;
- obj->last_fenced_ring = ring;
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
@@ -1684,6 +1683,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
+ obj->last_fenced_seqno = 0;
+
obj->active = 0;
obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
@@ -1849,7 +1850,6 @@ static void i915_gem_reset_fences(struct drm_device *dev)
reg->obj->fence_reg = I915_FENCE_REG_NONE;
reg->obj->fenced_gpu_access = false;
reg->obj->last_fenced_seqno = 0;
- reg->obj->last_fenced_ring = NULL;
i915_gem_clear_fence_reg(dev, reg);
}
}
@@ -2450,7 +2450,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2459,17 +2459,15 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->fenced_gpu_access = false;
}
- if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- obj->last_fenced_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
+ if (obj->last_fenced_seqno && pipelined != obj->ring) {
+ if (!ring_passed_seqno(obj->ring, obj->last_fenced_seqno)) {
+ ret = i915_wait_request(obj->ring,
obj->last_fenced_seqno);
if (ret)
return ret;
}
obj->last_fenced_seqno = 0;
- obj->last_fenced_ring = NULL;
}
/* Ensure that all CPU reads are completed before installing a fence
@@ -2536,8 +2534,8 @@ i915_find_fence_reg(struct drm_device *dev,
first = reg;
if (!pipelined ||
- !reg->obj->last_fenced_ring ||
- reg->obj->last_fenced_ring == pipelined) {
+ !reg->obj->last_fenced_seqno ||
+ reg->obj->ring == pipelined) {
avail = reg;
break;
}
@@ -2589,30 +2587,23 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
pipelined = NULL;
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
-
goto update;
}
if (!pipelined) {
if (reg->setup_seqno) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
+ if (!ring_passed_seqno(reg->setup_ring,
reg->setup_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
+ ret = i915_wait_request(reg->setup_ring,
reg->setup_seqno);
if (ret)
return ret;
}
reg->setup_seqno = 0;
+ reg->setup_ring = NULL;
}
- } else if (obj->last_fenced_ring &&
- obj->last_fenced_ring != pipelined) {
+ } else if (obj->last_fenced_seqno && obj->ring != pipelined) {
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
@@ -2647,9 +2638,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
pipelined = NULL;
old->fence_reg = I915_FENCE_REG_NONE;
- old->last_fenced_ring = pipelined;
- old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
+ old->last_fenced_seqno = 0;
+ if (pipelined) {
+ old->last_fenced_seqno =
+ i915_gem_next_request_seqno(pipelined);
+ i915_gem_object_move_to_active(old, pipelined,
+ old->last_fenced_seqno);
+ }
drm_gem_object_unreference(&old->base);
} else if (obj->last_fenced_seqno == 0)
@@ -2658,13 +2653,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg->obj = obj;
list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
obj->fence_reg = reg - dev_priv->fence_regs;
- obj->last_fenced_ring = pipelined;
+update:
reg->setup_seqno =
pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
- obj->last_fenced_seqno = reg->setup_seqno;
+ reg->setup_ring = pipelined;
-update:
+ obj->last_fenced_seqno = reg->setup_seqno;
obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 6:
@@ -2721,6 +2716,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
list_del_init(®->lru_list);
reg->obj = NULL;
reg->setup_seqno = 0;
+ reg->setup_ring = NULL;
}
/**
--
1.7.4.1
More information about the Intel-gfx
mailing list