[Intel-gfx] [PATCH] drm/i915: Update write_domains on active list after flush.

Eric Anholt eric at anholt.net
Fri Feb 13 21:14:42 CET 2009


On Wed, 2009-02-11 at 14:26 +0000, Chris Wilson wrote:
> Before changing the status of a buffer with a pending write we will await
> upon a new flush for that buffer. So we can take advantage of any flushes
> posted whilst the buffer is active and pending processing by the GPU, by
> clearing its write_domain and updating its last_rendering_seqno -- thus
> saving a potential flush in deep queues and improves flushing behaviour
> upon eviction for both GTT space and fences.
> 
> In order to reduce the time spent searching the active list for matching
> write_domains, we move those to a separate list whose elements are
> the buffers belong to the active/flushing list with pending writes.

I was chatting with keithp about this one yesterday, and we're both a
little leery of adding more lists unless strictly necessary.  Did you
find walking the whole active list to be too expensive?  One thought to
mitigate that without more pointers for us to forget about would be to
record the last flush's seqno and just walk the active list from head
until !i915_gem_seqno_passed(buffer->last_rendering_seqno,
dev_priv->last_flush_seqno).

> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h |   11 +++++++++++
>  drivers/gpu/drm/i915/i915_gem.c |   33 ++++++++++++++++++++++++---------
>  2 files changed, 35 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index b9253bd..3151da4 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -311,6 +311,15 @@ typedef struct drm_i915_private {
>  		struct list_head flushing_list;
>  
>  		/**
> +		 * List of objects currently pending a GPU write.
> +		 *
> +		 * All elements on this list will belong to either the
> +		 * active_list or flushing_list, last_rendering_seqno can
> +		 * be used to differentiate between the two elements.
> +		 */
> +		struct list_head gpu_write_list;
> +
> +		/**
>  		 * LRU list of objects which are not in the ringbuffer and
>  		 * are ready to unbind, but are still in the GTT.
>  		 *
> @@ -387,6 +396,8 @@ struct drm_i915_gem_object {
>  
>  	/** This object's place on the active/flushing/inactive lists */
>  	struct list_head list;
> +	/** This object's place on GPU write list */
> +	struct list_head gpu_write_list;
>  
>  	/**
>  	 * This is set if the object is on the active or flushing lists
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8249c17..40bfbe9 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -866,6 +866,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
>  	else
>  		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
>  
> +	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
> +
>  	obj_priv->last_rendering_seqno = 0;
>  	if (obj_priv->active) {
>  		obj_priv->active = 0;
> @@ -918,23 +920,23 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
>  	was_empty = list_empty(&dev_priv->mm.request_list);
>  	list_add_tail(&request->list, &dev_priv->mm.request_list);
>  
> -	/* Associate any objects on the flushing list matching the write
> +	/* Associate any objects on the gpu_write_list matching the write
>  	 * domain we're flushing with our flush.
>  	 */
> -	if (flush_domains != 0) {
> +	if (flush_domains & I915_GEM_GPU_DOMAINS) {
>  		struct drm_i915_gem_object *obj_priv, *next;
>  
>  		list_for_each_entry_safe(obj_priv, next,
> -					 &dev_priv->mm.flushing_list, list) {
> +					 &dev_priv->mm.gpu_write_list,
> +					 gpu_write_list) {
>  			struct drm_gem_object *obj = obj_priv->obj;
>  
> -			if ((obj->write_domain & flush_domains) ==
> -			    obj->write_domain) {
> +			if (obj->write_domain & flush_domains) {
>  				obj->write_domain = 0;
> +				list_del_init(&obj_priv->gpu_write_list);
>  				i915_gem_object_move_to_active(obj, seqno);
>  			}
>  		}
> -
>  	}
>  
>  	if (was_empty && !dev_priv->mm.suspended)
> @@ -1791,7 +1793,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
>  	/* Queue the GPU write cache flushing we need. */
>  	i915_gem_flush(dev, 0, obj->write_domain);
>  	seqno = i915_add_request(dev, obj->write_domain);
> -	obj->write_domain = 0;
> +	BUG_ON(obj->write_domain);
>  	i915_gem_object_move_to_active(obj, seqno);
>  }
>  
> @@ -2032,6 +2034,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
>  				  uint32_t write_domain)
>  {
>  	struct drm_device		*dev = obj->dev;
> +	drm_i915_private_t		*dev_priv = dev->dev_private;
>  	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
>  	uint32_t			invalidate_domains = 0;
>  	uint32_t			flush_domains = 0;
> @@ -2077,8 +2080,14 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
>  		i915_gem_clflush_object(obj);
>  	}
>  
> -	if ((write_domain | flush_domains) != 0)
> +	if ((write_domain | flush_domains) != 0) {
>  		obj->write_domain = write_domain;
> +		if (write_domain)
> +			list_move_tail(&obj_priv->gpu_write_list,
> +				       &dev_priv->mm.gpu_write_list);
> +		else
> +			list_del_init(&obj_priv->gpu_write_list);
> +	}
>  	obj->read_domains = read_domains;
>  
>  	dev->invalidate_domains |= invalidate_domains;
> @@ -2606,7 +2615,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
>  		i915_gem_flush(dev,
>  			       dev->invalidate_domains,
>  			       dev->flush_domains);
> -		if (dev->flush_domains)
> +		if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
>  			(void)i915_add_request(dev, dev->flush_domains);
>  	}
>  
> @@ -2917,6 +2926,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
>  	obj_priv->obj = obj;
>  	obj_priv->fence_reg = I915_FENCE_REG_NONE;
>  	INIT_LIST_HEAD(&obj_priv->list);
> +	INIT_LIST_HEAD(&obj_priv->gpu_write_list);
>  
>  	return 0;
>  }
> @@ -3072,6 +3082,8 @@ i915_gem_idle(struct drm_device *dev)
>  					    struct drm_i915_gem_object,
>  					    list);
>  		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
> +		if (!list_empty(&obj_priv->gpu_write_list))
> +			list_del_init (&obj_priv->gpu_write_list);
>  		i915_gem_object_move_to_inactive(obj_priv->obj);
>  	}
>  
> @@ -3082,6 +3094,8 @@ i915_gem_idle(struct drm_device *dev)
>  					    struct drm_i915_gem_object,
>  					    list);
>  		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
> +		if (!list_empty(&obj_priv->gpu_write_list))
> +			list_del_init (&obj_priv->gpu_write_list);
>  		i915_gem_object_move_to_inactive(obj_priv->obj);
>  	}
>  
> @@ -3377,6 +3391,7 @@ i915_gem_load(struct drm_device *dev)
>  
>  	INIT_LIST_HEAD(&dev_priv->mm.active_list);
>  	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
> +	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
>  	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
>  	INIT_LIST_HEAD(&dev_priv->mm.request_list);
>  	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
-- 
Eric Anholt
eric at anholt.net                         eric.anholt at intel.com


-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 197 bytes
Desc: This is a digitally signed message part
URL: <http://lists.freedesktop.org/archives/intel-gfx/attachments/20090213/a0739ef1/attachment.sig>


More information about the Intel-gfx mailing list