[Intel-gfx] [PATCH] drm/i915/blt: Remove recursive vma->lock

kbuild test robot lkp at intel.com
Tue Jun 18 17:28:26 UTC 2019


Hi Chris,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on drm-intel/for-linux-next]
[also build test WARNING on next-20190618]
[cannot apply to v5.2-rc5]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Chris-Wilson/drm-i915-blt-Remove-recursive-vma-lock/20190618-194749
base:   git://anongit.freedesktop.org/drm-intel for-linux-next
reproduce:
        # apt-get install sparse
        # sparse version: v0.6.1-rc1-7-g2b96cd8-dirty
        make ARCH=x86_64 allmodconfig
        make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp at intel.com>


sparse warnings: (new ones prefixed by >>)

>> drivers/gpu/drm/i915/gem/i915_gem_client_blt.c:201:65: sparse: sparse: incorrect type in argument 3 (different base types) @@    expected struct i915_request *rq @@    got uct i915_request *rq @@
>> drivers/gpu/drm/i915/gem/i915_gem_client_blt.c:201:65: sparse:    expected struct i915_request *rq
>> drivers/gpu/drm/i915/gem/i915_gem_client_blt.c:201:65: sparse:    got struct dma_fence *
   include/linux/reservation.h:220:20: sparse: sparse: dereference of noderef expression
   include/linux/reservation.h:220:45: sparse: sparse: dereference of noderef expression
   include/linux/reservation.h:220:20: sparse: sparse: dereference of noderef expression
   include/linux/reservation.h:220:45: sparse: sparse: dereference of noderef expression

vim +201 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c

   153	
   154	static void clear_pages_worker(struct work_struct *work)
   155	{
   156		struct clear_pages_work *w = container_of(work, typeof(*w), work);
   157		struct drm_i915_private *i915 = w->ce->gem_context->i915;
   158		struct drm_i915_gem_object *obj = w->sleeve->obj;
   159		struct i915_vma *vma = w->sleeve->vma;
   160		struct i915_request *rq;
   161		int err = w->dma.error;
   162	
   163		if (unlikely(err))
   164			goto out_signal;
   165	
   166		if (obj->cache_dirty) {
   167			obj->write_domain = 0;
   168			if (i915_gem_object_has_struct_page(obj))
   169				drm_clflush_sg(w->sleeve->pages);
   170			obj->cache_dirty = false;
   171		}
   172	
   173		/* XXX: we need to kill this */
   174		mutex_lock(&i915->drm.struct_mutex);
   175		err = i915_vma_pin(vma, 0, 0, PIN_USER);
   176		if (unlikely(err))
   177			goto out_unlock;
   178	
   179		rq = i915_request_create(w->ce);
   180		if (IS_ERR(rq)) {
   181			err = PTR_ERR(rq);
   182			goto out_unpin;
   183		}
   184	
   185		/* There's no way the fence has signalled */
   186		if (dma_fence_add_callback(&rq->fence, &w->cb,
   187					   clear_pages_dma_fence_cb))
   188			GEM_BUG_ON(1);
   189	
   190		if (w->ce->engine->emit_init_breadcrumb) {
   191			err = w->ce->engine->emit_init_breadcrumb(rq);
   192			if (unlikely(err))
   193				goto out_request;
   194		}
   195	
   196		/*
   197		 * w->dma is already exported via (vma|obj)->resv we need only
   198		 * keep track of the GPU activity within this vma/request, and
   199		 * propagate the signal from the request to w->dma.
   200		 */
 > 201		err = i915_active_ref(&vma->active, rq->fence.context, &rq->fence);
   202		if (err)
   203			goto out_request;
   204	
   205		err = intel_emit_vma_fill_blt(rq, vma, w->value);
   206	out_request:
   207		if (unlikely(err)) {
   208			i915_request_skip(rq, err);
   209			err = 0;
   210		}
   211	
   212		i915_request_add(rq);
   213	out_unpin:
   214		i915_vma_unpin(vma);
   215	out_unlock:
   216		mutex_unlock(&i915->drm.struct_mutex);
   217	out_signal:
   218		if (unlikely(err)) {
   219			dma_fence_set_error(&w->dma, err);
   220			dma_fence_signal(&w->dma);
   221			dma_fence_put(&w->dma);
   222		}
   223	}
   224	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation


More information about the Intel-gfx mailing list