[Intel-gfx] [bug report] drm/i915: Add support for moving fence waiting

Dan Carpenter dan.carpenter at oracle.com
Mon Nov 29 07:43:27 UTC 2021


Hello Maarten Lankhorst,

This is a semi-automatic email about new static checker warnings.

The patch f6c466b84cfa: "drm/i915: Add support for moving fence 
waiting" from Nov 22, 2021, leads to the following Smatch complaint:

    drivers/gpu/drm/i915/i915_vma.c:1015 i915_vma_pin_ww()
    error: we previously assumed 'vma->obj' could be null (see line 924)

drivers/gpu/drm/i915/i915_vma.c
   923	
   924		moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
                         ^^^^^^^^
The patch adds a check for NULL

   925		if (flags & vma->vm->bind_async_flags || moving) {
   926			/* lock VM */
   927			err = i915_vm_lock_objects(vma->vm, ww);
   928			if (err)
   929				goto err_rpm;
   930	
   931			work = i915_vma_work();
   932			if (!work) {
   933				err = -ENOMEM;
   934				goto err_rpm;
   935			}
   936	
   937			work->vm = i915_vm_get(vma->vm);
   938	
   939			dma_fence_work_chain(&work->base, moving);
   940	
   941			/* Allocate enough page directories to used PTE */
   942			if (vma->vm->allocate_va_range) {
   943				err = i915_vm_alloc_pt_stash(vma->vm,
   944							     &work->stash,
   945							     vma->size);
   946				if (err)
   947					goto err_fence;
   948	
   949				err = i915_vm_map_pt_stash(vma->vm, &work->stash);
   950				if (err)
   951					goto err_fence;
   952			}
   953		}
   954	
   955		/*
   956		 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
   957		 *
   958		 * We conflate the Global GTT with the user's vma when using the
   959		 * aliasing-ppgtt, but it is still vitally important to try and
   960		 * keep the use cases distinct. For example, userptr objects are
   961		 * not allowed inside the Global GTT as that will cause lock
   962		 * inversions when we have to evict them the mmu_notifier callbacks -
   963		 * but they are allowed to be part of the user ppGTT which can never
   964		 * be mapped. As such we try to give the distinct users of the same
   965		 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
   966		 * and i915_ppgtt separate].
   967		 *
   968		 * NB this may cause us to mask real lock inversions -- while the
   969		 * code is safe today, lockdep may not be able to spot future
   970		 * transgressions.
   971		 */
   972		err = mutex_lock_interruptible_nested(&vma->vm->mutex,
   973						      !(flags & PIN_GLOBAL));
   974		if (err)
   975			goto err_fence;
   976	
   977		/* No more allocations allowed now we hold vm->mutex */
   978	
   979		if (unlikely(i915_vma_is_closed(vma))) {
   980			err = -ENOENT;
   981			goto err_unlock;
   982		}
   983	
   984		bound = atomic_read(&vma->flags);
   985		if (unlikely(bound & I915_VMA_ERROR)) {
   986			err = -ENOMEM;
   987			goto err_unlock;
   988		}
   989	
   990		if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
   991			err = -EAGAIN; /* pins are meant to be fairly temporary */
   992			goto err_unlock;
   993		}
   994	
   995		if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
   996			__i915_vma_pin(vma);
   997			goto err_unlock;
   998		}
   999	
  1000		err = i915_active_acquire(&vma->active);
  1001		if (err)
  1002			goto err_unlock;
  1003	
  1004		if (!(bound & I915_VMA_BIND_MASK)) {
  1005			err = i915_vma_insert(vma, size, alignment, flags);
  1006			if (err)
  1007				goto err_active;
  1008	
  1009			if (i915_is_ggtt(vma->vm))
  1010				__i915_vma_set_map_and_fenceable(vma);
  1011		}
  1012	
  1013		GEM_BUG_ON(!vma->pages);
  1014		err = i915_vma_bind(vma,
  1015				    vma->obj->cache_level,
                                    ^^^^^^^^^^
This older code dereferences it without checking.

  1016				    flags, work);
  1017		if (err)

regards,
dan carpenter


More information about the Intel-gfx mailing list