[Intel-gfx] [PATCH 3/6] drm/i915: Remove the per-ring write list
Daniel Vetter
daniel at ffwll.ch
Thu Jul 12 21:37:16 CEST 2012
On Thu, Jul 12, 2012 at 04:13:36PM +0100, Chris Wilson wrote:
> This is now handled by a global flag to ensure we emit a flush before
> the next serialisation point (if we failed to queue one previously).
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 2 -
> drivers/gpu/drm/i915/i915_gem.c | 55 ++--------------------------
> drivers/gpu/drm/i915/i915_gem_execbuffer.c | 10 ++---
> drivers/gpu/drm/i915/intel_ringbuffer.c | 2 -
> drivers/gpu/drm/i915/intel_ringbuffer.h | 9 -----
> 5 files changed, 7 insertions(+), 71 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 38b95be..7871760 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -865,8 +865,6 @@ struct drm_i915_gem_object {
> /** This object's place on the active/inactive lists */
> struct list_head ring_list;
> struct list_head mm_list;
> - /** This object's place on GPU write list */
> - struct list_head gpu_write_list;
> /** This object's place in the batchbuffer or on the eviction list */
> struct list_head exec_list;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f69d897..535dd61 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1465,7 +1465,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
>
> list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>
> - BUG_ON(!list_empty(&obj->gpu_write_list));
> BUG_ON(!obj->active);
>
> list_del_init(&obj->ring_list);
> @@ -1510,30 +1509,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
> return obj->madv == I915_MADV_DONTNEED;
> }
>
> -static void
> -i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
> - uint32_t flush_domains)
> -{
> - struct drm_i915_gem_object *obj, *next;
> -
> - list_for_each_entry_safe(obj, next,
> - &ring->gpu_write_list,
> - gpu_write_list) {
> - if (obj->base.write_domain & flush_domains) {
> - uint32_t old_write_domain = obj->base.write_domain;
> -
> - obj->base.write_domain = 0;
> - list_del_init(&obj->gpu_write_list);
> - i915_gem_object_move_to_active(obj, ring,
> - i915_gem_next_request_seqno(ring));
> -
> - trace_i915_gem_object_change_domain(obj,
> - obj->base.read_domains,
> - old_write_domain);
> - }
> - }
> -}
> -
> static u32
> i915_gem_get_seqno(struct drm_device *dev)
> {
> @@ -1633,8 +1608,6 @@ i915_add_request(struct intel_ring_buffer *ring,
> &dev_priv->mm.retire_work, HZ);
> }
>
> - WARN_ON(!list_empty(&ring->gpu_write_list));
> -
> return 0;
> }
>
> @@ -1677,7 +1650,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
> ring_list);
>
> obj->base.write_domain = 0;
> - list_del_init(&obj->gpu_write_list);
> + obj->pending_gpu_write = false;
> i915_gem_object_move_to_inactive(obj);
Hm, this hunk makes me wonder whether we don't leak some bogus state
accross a gpu reset. Can't we just reuse the move_to_inactive helper here,
ensuring that we consistenly clear up any active state?
Otherwise I couldn't poke any other holes into this patch series, but let
me sleep over it a bit first ...
-Daniel
> }
> }
> @@ -2005,11 +1978,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
> {
> int ret;
>
> - /* This function only exists to support waiting for existing rendering,
> - * not for emitting required flushes.
> - */
> - BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
> -
> /* If there is rendering queued on the buffer being evicted, wait for
> * it.
> */
> @@ -2017,6 +1985,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
> ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
> if (ret)
> return ret;
> +
> i915_gem_retire_requests_ring(obj->ring);
> }
>
> @@ -2288,26 +2257,14 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
> if (ret)
> return ret;
>
> - if (flush_domains & I915_GEM_GPU_DOMAINS)
> - i915_gem_process_flushing_list(ring, flush_domains);
> -
> return 0;
> }
>
> static int i915_ring_idle(struct intel_ring_buffer *ring)
> {
> - int ret;
> -
> - if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
> + if (list_empty(&ring->active_list))
> return 0;
>
> - if (!list_empty(&ring->gpu_write_list)) {
> - ret = i915_gem_flush_ring(ring,
> - I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> - if (ret)
> - return ret;
> - }
> -
> return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
> }
>
> @@ -2323,10 +2280,6 @@ int i915_gpu_idle(struct drm_device *dev)
> if (ret)
> return ret;
>
> - /* Is the device fubar? */
> - if (WARN_ON(!list_empty(&ring->gpu_write_list)))
> - return -EBUSY;
> -
> ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
> if (ret)
> return ret;
> @@ -3471,7 +3424,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
> INIT_LIST_HEAD(&obj->gtt_list);
> INIT_LIST_HEAD(&obj->ring_list);
> INIT_LIST_HEAD(&obj->exec_list);
> - INIT_LIST_HEAD(&obj->gpu_write_list);
> obj->madv = I915_MADV_WILLNEED;
> /* Avoid an unnecessary call to unbind on the first bind. */
> obj->map_and_fenceable = true;
> @@ -3892,7 +3844,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
> {
> INIT_LIST_HEAD(&ring->active_list);
> INIT_LIST_HEAD(&ring->request_list);
> - INIT_LIST_HEAD(&ring->gpu_write_list);
> }
>
> void
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 0c26692..ca48978 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -938,9 +938,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
> struct drm_i915_gem_object *obj;
>
> list_for_each_entry(obj, objects, exec_list) {
> - u32 old_read = obj->base.read_domains;
> - u32 old_write = obj->base.write_domain;
> -
> + u32 old_read = obj->base.read_domains;
> + u32 old_write = obj->base.write_domain;
>
> obj->base.read_domains = obj->base.pending_read_domains;
> obj->base.write_domain = obj->base.pending_write_domain;
> @@ -950,11 +949,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
> if (obj->base.write_domain) {
> obj->dirty = 1;
> obj->pending_gpu_write = true;
> - list_move_tail(&obj->gpu_write_list,
> - &ring->gpu_write_list);
> if (obj->pin_count) /* check for potential scanout */
> intel_mark_busy(ring->dev, obj);
> - }
> + } else
> + obj->pending_gpu_write = false;
>
> trace_i915_gem_object_change_domain(obj, old_read, old_write);
> }
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index ddc4859..7c166ff 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1001,7 +1001,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
> ring->dev = dev;
> INIT_LIST_HEAD(&ring->active_list);
> INIT_LIST_HEAD(&ring->request_list);
> - INIT_LIST_HEAD(&ring->gpu_write_list);
> ring->size = 32 * PAGE_SIZE;
>
> init_waitqueue_head(&ring->irq_queue);
> @@ -1472,7 +1471,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
> ring->dev = dev;
> INIT_LIST_HEAD(&ring->active_list);
> INIT_LIST_HEAD(&ring->request_list);
> - INIT_LIST_HEAD(&ring->gpu_write_list);
>
> ring->size = size;
> ring->effective_size = ring->size;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 1d3c81f..7986f30 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -101,15 +101,6 @@ struct intel_ring_buffer {
> struct list_head request_list;
>
> /**
> - * List of objects currently pending a GPU write flush.
> - *
> - * All elements on this list will belong to either the
> - * active_list or flushing_list, last_rendering_seqno can
> - * be used to differentiate between the two elements.
> - */
> - struct list_head gpu_write_list;
> -
> - /**
> * Do we have some not yet emitted requests outstanding?
> */
> u32 outstanding_lazy_request;
> --
> 1.7.10.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Mail: daniel at ffwll.ch
Mobile: +41 (0)79 365 57 48
More information about the Intel-gfx
mailing list