[Intel-gfx] [PATCH 15/19] drm/i915: Remove support for unlocked i915_vma unbind
Niranjana Vishwanathapura
niranjana.vishwanathapura at intel.com
Wed Sep 8 03:35:00 UTC 2021
On Mon, Aug 30, 2021 at 02:10:02PM +0200, Maarten Lankhorst wrote:
>Now that we require the object lock for all ops, some code handling
>race conditions can be removed.
>
>This is required to not take short-term pins inside execbuf.
>
Ok I get it, as i915_vma_unbind() is now called uner obj lock
we don't need these race handling. But would like someone else
also take a look.
Acked-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
>---
> drivers/gpu/drm/i915/i915_vma.c | 40 +++++----------------------------
> 1 file changed, 5 insertions(+), 35 deletions(-)
>
>diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
>index da54e6882650..f6dacfc3e840 100644
>--- a/drivers/gpu/drm/i915/i915_vma.c
>+++ b/drivers/gpu/drm/i915/i915_vma.c
>@@ -748,7 +748,6 @@ i915_vma_detach(struct i915_vma *vma)
> static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
> {
> unsigned int bound;
>- bool pinned = true;
>
> bound = atomic_read(&vma->flags);
> do {
>@@ -758,34 +757,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
> if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
> return false;
>
>- if (!(bound & I915_VMA_PIN_MASK))
>- goto unpinned;
>-
> GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
> } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
>
> return true;
>-
>-unpinned:
>- /*
>- * If pin_count==0, but we are bound, check under the lock to avoid
>- * racing with a concurrent i915_vma_unbind().
>- */
>- mutex_lock(&vma->vm->mutex);
>- do {
>- if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
>- pinned = false;
>- break;
>- }
>-
>- if (unlikely(flags & ~bound)) {
>- pinned = false;
>- break;
>- }
>- } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
>- mutex_unlock(&vma->vm->mutex);
>-
>- return pinned;
> }
>
>
>@@ -1085,13 +1060,7 @@ __i915_vma_get_pages(struct i915_vma *vma)
> vma->ggtt_view.type, ret);
> }
>
>- pages = xchg(&vma->pages, pages);
>-
>- /* did we race against a put_pages? */
>- if (pages && pages != vma->obj->mm.pages) {
>- sg_free_table(vma->pages);
>- kfree(vma->pages);
>- }
>+ vma->pages = pages;
>
> return ret;
> }
>@@ -1125,13 +1094,14 @@ I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
> static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
> {
> /* We allocate under vma_get_pages, so beware the shrinker */
>- struct sg_table *pages = READ_ONCE(vma->pages);
>+ struct sg_table *pages = vma->pages;
>
> GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
>
> if (atomic_sub_return(count, &vma->pages_count) == 0) {
>- if (pages == cmpxchg(&vma->pages, pages, NULL) &&
>- pages != vma->obj->mm.pages) {
>+ vma->pages = NULL;
>+
>+ if (pages != vma->obj->mm.pages) {
> sg_free_table(pages);
> kfree(pages);
> }
>--
>2.32.0
>
More information about the Intel-gfx
mailing list