[Intel-gfx] [PATCH 03/16] drm/i915: Remove domain flubbing from i915_gem_object_finish_gpu()
Daniel Vetter
daniel at ffwll.ch
Mon May 11 09:43:55 PDT 2015
On Mon, Apr 27, 2015 at 01:41:14PM +0100, Chris Wilson wrote:
> We no longer interpolate domains in the same manner, and even if we did,
> we should trust setting either of the other write domains would trigger
> an invalidation rather than force it. Remove the tweaking of the
> read_domains since it serves no purpose and use
> i915_gem_object_wait_rendering() directly.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Merged with a bit of history digging added.
Thanks, Daniel
> ---
> drivers/gpu/drm/i915/i915_drv.h | 4 +++-
> drivers/gpu/drm/i915/i915_gem.c | 26 +++-----------------------
> drivers/gpu/drm/i915/intel_display.c | 13 ++++++++-----
> 3 files changed, 14 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f3c77ca6b838..c9161c3d1e34 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2791,7 +2791,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
>
> void i915_gem_reset(struct drm_device *dev);
> bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
> -int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
> int __must_check i915_gem_init(struct drm_device *dev);
> int i915_gem_init_rings(struct drm_device *dev);
> int __must_check i915_gem_init_hw(struct drm_device *dev);
> @@ -2813,6 +2812,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
> int __must_check i915_wait_request(struct drm_i915_gem_request *req);
> int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
> int __must_check
> +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
> + bool readonly);
> +int __must_check
> i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
> bool write);
> int __must_check
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index ec9e36e9ec78..cd0ac1b54122 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -40,9 +40,6 @@
>
> static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
> static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
> -static __must_check int
> -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
> - bool readonly);
> static void
> i915_gem_object_retire(struct drm_i915_gem_object *obj);
>
> @@ -1399,7 +1396,7 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
> * Ensures that all rendering to the object has completed and the object is
> * safe to unbind from the GTT or access from the CPU.
> */
> -static __must_check int
> +int
> i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
> bool readonly)
> {
> @@ -3039,7 +3036,7 @@ int i915_vma_unbind(struct i915_vma *vma)
>
> BUG_ON(obj->pages == NULL);
>
> - ret = i915_gem_object_finish_gpu(obj);
> + ret = i915_gem_object_wait_rendering(obj, false);
> if (ret)
> return ret;
> /* Continue on if we fail due to EIO, the GPU is hung so we
> @@ -3792,7 +3789,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> }
>
> if (bound) {
> - ret = i915_gem_object_finish_gpu(obj);
> + ret = i915_gem_object_wait_rendering(obj, false);
> if (ret)
> return ret;
>
> @@ -3996,23 +3993,6 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
> obj->pin_display--;
> }
>
> -int
> -i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
> -{
> - int ret;
> -
> - if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
> - return 0;
> -
> - ret = i915_gem_object_wait_rendering(obj, false);
> - if (ret)
> - return ret;
> -
> - /* Ensure that we invalidate the GPU's caches and TLBs. */
> - obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
> - return 0;
> -}
> -
> /**
> * Moves a single object to the CPU read, and possibly write domain.
> *
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 3094b0807b40..4334e59bf0aa 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -3235,27 +3235,30 @@ void intel_finish_reset(struct drm_device *dev)
> drm_modeset_unlock_all(dev);
> }
>
> -static int
> +static void
> intel_finish_fb(struct drm_framebuffer *old_fb)
> {
> struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
> - struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> + struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> bool was_interruptible = dev_priv->mm.interruptible;
> int ret;
>
> /* Big Hammer, we also need to ensure that any pending
> * MI_WAIT_FOR_EVENT inside a user batch buffer on the
> * current scanout is retired before unpinning the old
> - * framebuffer.
> + * framebuffer. Note that we rely on userspace rendering
> + * into the buffer attached to the pipe they are waiting
> + * on. If not, userspace generates a GPU hang with IPEHR
> + * point to the MI_WAIT_FOR_EVENT.
> *
> * This should only fail upon a hung GPU, in which case we
> * can safely continue.
> */
> dev_priv->mm.interruptible = false;
> - ret = i915_gem_object_finish_gpu(obj);
> + ret = i915_gem_object_wait_rendering(obj, true);
> dev_priv->mm.interruptible = was_interruptible;
>
> - return ret;
> + WARN_ON(ret);
> }
>
> static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
> --
> 2.1.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list