[Intel-gfx] [PATCH] drm/i915: Avoid unbinding due to an interrupted pin_and_fence during execbuffer
Daniel Vetter
daniel at ffwll.ch
Fri Aug 24 21:03:26 CEST 2012
On Fri, Aug 24, 2012 at 07:18:18PM +0100, Chris Wilson wrote:
> If we need to stall in order to complete the pin_and_fence operation
> during execbuffer reservation, there is a high likelihood that the
> operation will be interrupted by a signal (thanks X!). In order to
> simplify the cleanup along that error path, the object was
> unconditionally unbound and the error propagated. However, being
> interrupted here is far more common than I would like and so we can
> strive to avoid the extra work by eliminating the forced unbind.
>
> v2: In discussion over the indecent colour of the new functions and
> unwind path, we realised that we can use the new unreserve function to
> clean up the code even further.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Nice colours, merged to dinq.
-Daniel
> ---
> drivers/gpu/drm/i915/i915_gem_execbuffer.c | 114 +++++++++++-----------------
> 1 file changed, 45 insertions(+), 69 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index dc87563..e6b2205 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -331,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
> return ret;
> }
>
> -#define __EXEC_OBJECT_HAS_FENCE (1<<31)
> +#define __EXEC_OBJECT_HAS_PIN (1<<31)
> +#define __EXEC_OBJECT_HAS_FENCE (1<<30)
>
> static int
> need_reloc_mappable(struct drm_i915_gem_object *obj)
> @@ -341,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
> }
>
> static int
> -pin_and_fence_object(struct drm_i915_gem_object *obj,
> - struct intel_ring_buffer *ring)
> +i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
> + struct intel_ring_buffer *ring)
> {
> + struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
> bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
> bool need_fence, need_mappable;
> @@ -359,11 +361,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
> if (ret)
> return ret;
>
> + entry->flags |= __EXEC_OBJECT_HAS_PIN;
> +
> if (has_fenced_gpu_access) {
> if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
> ret = i915_gem_object_get_fence(obj);
> if (ret)
> - goto err_unpin;
> + return ret;
>
> if (i915_gem_object_pin_fence(obj))
> entry->flags |= __EXEC_OBJECT_HAS_FENCE;
> @@ -372,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
> }
> }
>
> + /* Ensure ppgtt mapping exists if needed */
> + if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
> + i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> + obj, obj->cache_level);
> +
> + obj->has_aliasing_ppgtt_mapping = 1;
> + }
> +
> entry->offset = obj->gtt_offset;
> return 0;
> +}
>
> -err_unpin:
> - i915_gem_object_unpin(obj);
> - return ret;
> +static void
> +i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
> +{
> + struct drm_i915_gem_exec_object2 *entry;
> +
> + if (!obj->gtt_space)
> + return;
> +
> + entry = obj->exec_entry;
> +
> + if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
> + i915_gem_object_unpin_fence(obj);
> +
> + if (entry->flags & __EXEC_OBJECT_HAS_PIN)
> + i915_gem_object_unpin(obj);
> +
> + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
> }
>
> static int
> @@ -385,11 +412,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> struct drm_file *file,
> struct list_head *objects)
> {
> - drm_i915_private_t *dev_priv = ring->dev->dev_private;
> struct drm_i915_gem_object *obj;
> - int ret, retry;
> - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
> struct list_head ordered_objects;
> + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
> + int retry;
>
> INIT_LIST_HEAD(&ordered_objects);
> while (!list_empty(objects)) {
> @@ -427,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> * 2. Bind new objects.
> * 3. Decrement pin count.
> *
> - * This avoid unnecessary unbinding of later objects in order to makr
> + * This avoid unnecessary unbinding of later objects in order to make
> * room for the earlier objects *unless* we need to defragment.
> */
> retry = 0;
> do {
> - ret = 0;
> + int ret = 0;
>
> /* Unbind any ill-fitting objects or pin. */
> list_for_each_entry(obj, objects, exec_list) {
> @@ -452,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> (need_mappable && !obj->map_and_fenceable))
> ret = i915_gem_object_unbind(obj);
> else
> - ret = pin_and_fence_object(obj, ring);
> + ret = i915_gem_execbuffer_reserve_object(obj, ring);
> if (ret)
> goto err;
> }
> @@ -462,46 +488,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> if (obj->gtt_space)
> continue;
>
> - ret = pin_and_fence_object(obj, ring);
> - if (ret) {
> - int ret_ignore;
> -
> - /* This can potentially raise a harmless
> - * -EINVAL if we failed to bind in the above
> - * call. It cannot raise -EINTR since we know
> - * that the bo is freshly bound and so will
> - * not need to be flushed or waited upon.
> - */
> - ret_ignore = i915_gem_object_unbind(obj);
> - (void)ret_ignore;
> - WARN_ON(obj->gtt_space);
> - break;
> - }
> + ret = i915_gem_execbuffer_reserve_object(obj, ring);
> + if (ret)
> + goto err;
> }
>
> - /* Decrement pin count for bound objects */
> - list_for_each_entry(obj, objects, exec_list) {
> - struct drm_i915_gem_exec_object2 *entry;
> -
> - if (!obj->gtt_space)
> - continue;
> -
> - entry = obj->exec_entry;
> - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
> - i915_gem_object_unpin_fence(obj);
> - entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
> - }
> -
> - i915_gem_object_unpin(obj);
> -
> - /* ... and ensure ppgtt mapping exist if needed. */
> - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
> - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> - obj, obj->cache_level);
> -
> - obj->has_aliasing_ppgtt_mapping = 1;
> - }
> - }
> +err: /* Decrement pin count for bound objects */
> + list_for_each_entry(obj, objects, exec_list)
> + i915_gem_execbuffer_unreserve_object(obj);
>
> if (ret != -ENOSPC || retry++)
> return ret;
> @@ -510,24 +504,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> if (ret)
> return ret;
> } while (1);
> -
> -err:
> - list_for_each_entry_continue_reverse(obj, objects, exec_list) {
> - struct drm_i915_gem_exec_object2 *entry;
> -
> - if (!obj->gtt_space)
> - continue;
> -
> - entry = obj->exec_entry;
> - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
> - i915_gem_object_unpin_fence(obj);
> - entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
> - }
> -
> - i915_gem_object_unpin(obj);
> - }
> -
> - return ret;
> }
>
> static int
> --
> 1.7.10.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Mail: daniel at ffwll.ch
Mobile: +41 (0)79 365 57 48
More information about the Intel-gfx
mailing list