[Intel-gfx] [PATCH 2/6] drm/i915: Remove the defunct flushing list
Daniel Vetter
daniel at ffwll.ch
Fri Jul 13 11:12:40 CEST 2012
On Thu, Jul 12, 2012 at 04:13:35PM +0100, Chris Wilson wrote:
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
I'd like to keep the analysis of why things blow up on the BUG_ON(seqno ==
0) here in the commit message, i.e. the two patches that together caused
the regression (I think we need both the flushing list prep patch an
-ERESTART from intel_ring_begin). Plus the scenario of what exactly needs
to be submitted and interrupted that you've laid out on irc.
Although I have to admit that I still fail to understand how the
unconditional flush can't plug all holes, i.e. if you come up with a
recipe to blow things up with only that broken patch applied, I'd much
appreciate it ;-)
/me feels way to dense about all the complexity we have here
One thing popped out a bit while reading this patch again, comment below.
Thanks, Daniel
> ---
> drivers/gpu/drm/i915/i915_debugfs.c | 7 ----
> drivers/gpu/drm/i915/i915_drv.h | 19 +++--------
> drivers/gpu/drm/i915/i915_gem.c | 57 ++++++---------------------------
> drivers/gpu/drm/i915/i915_gem_evict.c | 20 ------------
> 4 files changed, 13 insertions(+), 90 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 359f6e8..d149890 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -44,7 +44,6 @@
>
> enum {
> ACTIVE_LIST,
> - FLUSHING_LIST,
> INACTIVE_LIST,
> PINNED_LIST,
> };
> @@ -177,10 +176,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
> seq_printf(m, "Inactive:\n");
> head = &dev_priv->mm.inactive_list;
> break;
> - case FLUSHING_LIST:
> - seq_printf(m, "Flushing:\n");
> - head = &dev_priv->mm.flushing_list;
> - break;
> default:
> mutex_unlock(&dev->struct_mutex);
> return -EINVAL;
> @@ -238,7 +233,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
>
> size = count = mappable_size = mappable_count = 0;
> count_objects(&dev_priv->mm.active_list, mm_list);
> - count_objects(&dev_priv->mm.flushing_list, mm_list);
> seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
> count, mappable_count, size, mappable_size);
>
> @@ -2006,7 +2000,6 @@ static struct drm_info_list i915_debugfs_list[] = {
> {"i915_gem_gtt", i915_gem_gtt_info, 0},
> {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
> {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
> - {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
> {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
> {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
> {"i915_gem_request", i915_gem_request_info, 0},
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 51e234e..38b95be 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -696,17 +696,6 @@ typedef struct drm_i915_private {
> struct list_head active_list;
>
> /**
> - * List of objects which are not in the ringbuffer but which
> - * still have a write_domain which needs to be flushed before
> - * unbinding.
> - *
> - * last_rendering_seqno is 0 while an object is in this list.
> - *
> - * A reference is held on the buffer while on this list.
> - */
> - struct list_head flushing_list;
> -
> - /**
> * LRU list of objects which are not in the ringbuffer and
> * are ready to unbind, but are still in the GTT.
> *
> @@ -873,7 +862,7 @@ struct drm_i915_gem_object {
> struct drm_mm_node *gtt_space;
> struct list_head gtt_list;
>
> - /** This object's place on the active/flushing/inactive lists */
> + /** This object's place on the active/inactive lists */
> struct list_head ring_list;
> struct list_head mm_list;
> /** This object's place on GPU write list */
> @@ -882,9 +871,9 @@ struct drm_i915_gem_object {
> struct list_head exec_list;
>
> /**
> - * This is set if the object is on the active or flushing lists
> - * (has pending rendering), and is not set if it's on inactive (ready
> - * to be unbound).
> + * This is set if the object is on the active lists (has pending
> + * rendering and so a non-zero seqno), and is not set if it i s on
> + * inactive (ready to be unbound) list.
> */
> unsigned int active:1;
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 875b745..f69d897 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1458,26 +1458,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> }
>
> static void
> -i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
> -{
> - list_del_init(&obj->ring_list);
> - obj->last_rendering_seqno = 0;
> - obj->last_fenced_seqno = 0;
> -}
> -
> -static void
> -i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
> -{
> - struct drm_device *dev = obj->base.dev;
> - drm_i915_private_t *dev_priv = dev->dev_private;
> -
> - BUG_ON(!obj->active);
> - list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
> -
> - i915_gem_object_move_off_active(obj);
> -}
> -
> -static void
> i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
> {
> struct drm_device *dev = obj->base.dev;
> @@ -1487,13 +1467,18 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
>
> BUG_ON(!list_empty(&obj->gpu_write_list));
> BUG_ON(!obj->active);
> +
> + list_del_init(&obj->ring_list);
> obj->ring = NULL;
>
> - i915_gem_object_move_off_active(obj);
> + obj->last_rendering_seqno = 0;
> + obj->last_fenced_seqno = 0;
> obj->fenced_gpu_access = false;
>
> - obj->active = 0;
> + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
Clearing the write_domain here makes me rather uneasy ... I see that we
should plug gpu write domains leaking out of obj->active (at least until
we've ripped out all that gpu domain tracking complexity for good). But I
also fear that this papers over some issues - at least in the old code we
should never be able to see this. Maybe just add a comment to explain
what's going on?
> obj->pending_gpu_write = false;
> +
> + obj->active = 0;
> drm_gem_object_unreference(&obj->base);
>
> WARN_ON(i915_verify_lists(dev));
> @@ -1728,20 +1713,6 @@ void i915_gem_reset(struct drm_device *dev)
> for_each_ring(ring, dev_priv, i)
> i915_gem_reset_ring_lists(dev_priv, ring);
>
> - /* Remove anything from the flushing lists. The GPU cache is likely
> - * to be lost on reset along with the data, so simply move the
> - * lost bo to the inactive list.
> - */
> - while (!list_empty(&dev_priv->mm.flushing_list)) {
> - obj = list_first_entry(&dev_priv->mm.flushing_list,
> - struct drm_i915_gem_object,
> - mm_list);
> -
> - obj->base.write_domain = 0;
> - list_del_init(&obj->gpu_write_list);
> - i915_gem_object_move_to_inactive(obj);
> - }
> -
> /* Move everything out of the GPU domains to ensure we do any
> * necessary invalidation upon reuse.
> */
> @@ -1812,10 +1783,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
> if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
> break;
>
> - if (obj->base.write_domain != 0)
> - i915_gem_object_move_to_flushing(obj);
> - else
> - i915_gem_object_move_to_inactive(obj);
> + i915_gem_object_move_to_inactive(obj);
> }
>
> if (unlikely(ring->trace_irq_seqno &&
> @@ -3877,7 +3845,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
> }
>
> BUG_ON(!list_empty(&dev_priv->mm.active_list));
> - BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
> BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
> mutex_unlock(&dev->struct_mutex);
>
> @@ -3935,7 +3902,6 @@ i915_gem_load(struct drm_device *dev)
> drm_i915_private_t *dev_priv = dev->dev_private;
>
> INIT_LIST_HEAD(&dev_priv->mm.active_list);
> - INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
> INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
> INIT_LIST_HEAD(&dev_priv->mm.fence_list);
> INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
> @@ -4186,12 +4152,7 @@ static int
> i915_gpu_is_active(struct drm_device *dev)
> {
> drm_i915_private_t *dev_priv = dev->dev_private;
> - int lists_empty;
> -
> - lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
> - list_empty(&dev_priv->mm.active_list);
> -
> - return !lists_empty;
> + return !list_empty(&dev_priv->mm.active_list);
> }
>
> static int
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index ae7c24e..f61af59 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -92,23 +92,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
>
> /* Now merge in the soon-to-be-expired objects... */
> list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
> - /* Does the object require an outstanding flush? */
> - if (obj->base.write_domain)
> - continue;
> -
> - if (mark_free(obj, &unwind_list))
> - goto found;
> - }
> -
> - /* Finally add anything with a pending flush (in order of retirement) */
> - list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
> - if (mark_free(obj, &unwind_list))
> - goto found;
> - }
> - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
> - if (!obj->base.write_domain)
> - continue;
> -
> if (mark_free(obj, &unwind_list))
> goto found;
> }
> @@ -171,7 +154,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
> int ret;
>
> lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
> - list_empty(&dev_priv->mm.flushing_list) &&
> list_empty(&dev_priv->mm.active_list));
> if (lists_empty)
> return -ENOSPC;
> @@ -188,8 +170,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
>
> i915_gem_retire_requests(dev);
>
> - BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
> -
> /* Having flushed everything, unbind() should never raise an error */
> list_for_each_entry_safe(obj, next,
> &dev_priv->mm.inactive_list, mm_list) {
> --
> 1.7.10.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Mail: daniel at ffwll.ch
Mobile: +41 (0)79 365 57 48
More information about the Intel-gfx
mailing list