[PATCH 10/10] drm/i915: Pull i915_vma_pin under the vm->mutex

Chris Wilson chris at chris-wilson.co.uk
Sat Aug 17 20:33:56 UTC 2019


Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |   1 +
 drivers/gpu/drm/i915/gem/i915_gem_domain.c    |   2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c    |   2 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |   2 +-
 drivers/gpu/drm/i915/i915_active.c            |  63 ++++++
 drivers/gpu/drm/i915/i915_active.h            |   2 +
 drivers/gpu/drm/i915/i915_active_types.h      |   4 +
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  18 +-
 drivers/gpu/drm/i915/i915_sw_fence_work.c     |  80 +++++++
 drivers/gpu/drm/i915/i915_sw_fence_work.h     |  31 +++
 drivers/gpu/drm/i915/i915_vma.c               | 203 +++++++++++++-----
 drivers/gpu/drm/i915/i915_vma.h               |  83 ++++---
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |   4 +-
 15 files changed, 390 insertions(+), 109 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_sw_fence_work.c
 create mode 100644 drivers/gpu/drm/i915/i915_sw_fence_work.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 45add812048b..658b930d34a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -62,6 +62,7 @@ i915-y += \
 	i915_memcpy.o \
 	i915_mm.o \
 	i915_sw_fence.o \
+	i915_sw_fence_work.o \
 	i915_syncmap.o \
 	i915_user_extensions.o
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 60134c5aefa7..720e384dda2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -277,7 +277,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
-			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 49020720be5e..b72a10b143d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1387,7 +1387,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 		    IS_GEN(eb->i915, 6)) {
 			err = i915_vma_bind(target, target->obj->cache_level,
-					    PIN_GLOBAL);
+					    PIN_GLOBAL, NULL);
 			if (WARN_ONCE(err,
 				      "Unexpected failure to bind target VMA!"))
 				return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d7855dc5a5c5..0ef60dae23a7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -163,7 +163,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
 			GEM_BUG_ON(i915_vma_is_active(vma));
-			vma->flags &= ~I915_VMA_PIN_MASK;
+			atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
 			i915_vma_destroy(vma);
 		}
 		GEM_BUG_ON(!list_empty(&obj->vma.list));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 2e1bfd5e4adf..f4504b7c479f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -685,7 +685,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
 	vma->pages = obj->mm.pages;
-	vma->flags |= I915_VMA_GLOBAL_BIND;
+	atomic_or(I915_VMA_GLOBAL_BIND, &vma->flags);
 	__i915_vma_set_map_and_fenceable(vma);
 
 	mutex_lock(&ggtt->vm.mutex);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 8de83c6d81f5..b66f02ee3ed7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1387,7 +1387,7 @@ static int igt_ppgtt_pin_update(void *arg)
 			goto out_unpin;
 		}
 
-		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
 		if (err)
 			goto out_unpin;
 
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index df6164591702..438a7f7af4dc 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -335,6 +335,45 @@ int i915_active_ref(struct i915_active *ref,
 	return err;
 }
 
+static void excl_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct i915_active *ref = container_of(cb, typeof(*ref), excl_cb);
+
+	RCU_INIT_POINTER(ref->excl, NULL);
+	dma_fence_put(f);
+
+	active_retire(ref);
+}
+
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+	GEM_BUG_ON(i915_active_is_idle(ref));
+
+	dma_fence_get(f);
+
+	rcu_read_lock();
+	if (rcu_access_pointer(ref->excl)) {
+		struct dma_fence *old;
+
+		old = dma_fence_get_rcu_safe(&ref->excl);
+		if (old) {
+			if (dma_fence_remove_callback(old, &ref->excl_cb))
+				atomic_dec(&ref->count);
+			dma_fence_put(old);
+		}
+	}
+	rcu_read_unlock();
+
+	atomic_inc(&ref->count);
+	rcu_assign_pointer(ref->excl, f);
+
+	if (dma_fence_add_callback(f, &ref->excl_cb, excl_cb)) {
+		RCU_INIT_POINTER(ref->excl, NULL);
+		atomic_dec(&ref->count);
+		dma_fence_put(f);
+	}
+}
+
 int i915_active_acquire(struct i915_active *ref)
 {
 	int err;
@@ -393,6 +432,25 @@ void i915_active_ungrab(struct i915_active *ref)
 	__active_ungrab(ref);
 }
 
+static int excl_wait(struct i915_active *ref)
+{
+	int err = 0;
+
+	rcu_read_lock();
+	if (rcu_access_pointer(ref->excl)) {
+		struct dma_fence *old;
+
+		old = dma_fence_get_rcu_safe(&ref->excl);
+		if (old) {
+			err = dma_fence_wait(old, true);
+			dma_fence_put(old);
+		}
+	}
+	rcu_read_unlock();
+
+	return err;
+}
+
 int i915_active_wait(struct i915_active *ref)
 {
 	struct active_node *it, *n;
@@ -413,6 +471,10 @@ int i915_active_wait(struct i915_active *ref)
 		return 0;
 	}
 
+	err = excl_wait(ref);
+	if (err)
+		goto out;
+
 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 		if (is_barrier(&it->base)) { /* unconnected idle-barrier */
 			err = -EBUSY;
@@ -424,6 +486,7 @@ int i915_active_wait(struct i915_active *ref)
 			break;
 	}
 
+out:
 	__active_retire(ref);
 	if (err)
 		return err;
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index f95058f99057..bc1c9d5d4f63 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -373,6 +373,8 @@ int i915_active_ref(struct i915_active *ref,
 		    struct intel_timeline *tl,
 		    struct i915_request *rq);
 
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+
 int i915_active_wait(struct i915_active *ref);
 
 int i915_request_await_active(struct i915_request *rq,
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 1854e7d168c1..b2f227e7de0d 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -8,6 +8,7 @@
 #define _I915_ACTIVE_TYPES_H_
 
 #include <linux/atomic.h>
+#include <linux/dma-fence.h>
 #include <linux/llist.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
@@ -51,6 +52,9 @@ struct i915_active {
 	struct mutex mutex;
 	atomic_t count;
 
+	struct dma_fence __rcu *excl;
+	struct dma_fence_cb excl_cb;
+
 	unsigned long flags;
 #define I915_ACTIVE_GRAB_BIT 0
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3e376ba35bb8..688ce50be107 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -143,12 +143,12 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
-			  u32 unused)
+			  u32 flags)
 {
 	u32 pte_flags;
 	int err;
 
-	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+	if (flags & BIND_ALLOC) {
 		err = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start, vma->size);
 		if (err)
@@ -1863,7 +1863,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
 
 	vma->size = size;
 	vma->fence_size = size;
-	vma->flags = I915_VMA_GGTT;
+	atomic_or(I915_VMA_GGTT, &vma->flags);
 	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
 
 	INIT_LIST_HEAD(&vma->obj_link);
@@ -2422,7 +2422,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
 	 * upgrade to both bound if we bind either to avoid double-binding.
 	 */
-	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
 
 	return 0;
 }
@@ -2452,7 +2452,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
 
-		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+		if (flags & BIND_ALLOC) {
 			ret = alias->vm.allocate_va_range(&alias->vm,
 							  vma->node.start,
 							  vma->size);
@@ -2480,7 +2480,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
 
-	if (vma->flags & I915_VMA_GLOBAL_BIND) {
+	if (atomic_read(&vma->flags) & I915_VMA_GLOBAL_BIND) {
 		struct i915_address_space *vm = vma->vm;
 		intel_wakeref_t wakeref;
 
@@ -2488,7 +2488,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
 			vm->clear_range(vm, vma->node.start, vma->size);
 	}
 
-	if (vma->flags & I915_VMA_LOCAL_BIND) {
+	if (atomic_read(&vma->flags) & I915_VMA_LOCAL_BIND) {
 		struct i915_address_space *vm =
 			&i915_vm_to_ggtt(vma->vm)->alias->vm;
 
@@ -3272,7 +3272,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+		if (!(atomic_read(&vma->flags) & I915_VMA_GLOBAL_BIND))
 			continue;
 
 		mutex_unlock(&ggtt->vm.mutex);
@@ -3282,7 +3282,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 
 		WARN_ON(i915_vma_bind(vma,
 				      obj ? obj->cache_level : 0,
-				      PIN_UPDATE));
+				      PIN_UPDATE, NULL));
 		if (obj) {
 			i915_gem_object_lock(obj);
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
new file mode 100644
index 000000000000..0725b1db9ab2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -0,0 +1,80 @@
+#include "i915_sw_fence_work.h"
+
+static void fence_work(struct work_struct *work)
+{
+	struct dma_fence_work *f = container_of(work, typeof(*f), work);
+	int err;
+
+	err = f->fn(f);
+	if (err)
+		dma_fence_set_error(&f->base, err);
+	dma_fence_signal(&f->base);
+	dma_fence_put(&f->base);
+}
+
+static int __i915_sw_fence_call
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		queue_work(system_unbound_wq, &f->work);
+		break;
+
+	case FENCE_FREE:
+		dma_fence_put(&f->base);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+	return "dma-fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+	return "work";
+}
+
+static void fence_release(struct dma_fence *fence)
+{
+	struct dma_fence_work *f = container_of(fence, typeof(*f), base);
+
+	i915_sw_fence_fini(&f->chain);
+
+	BUILD_BUG_ON(offsetof(typeof(*f), base));
+	dma_fence_free(&f->base);
+}
+
+static const struct dma_fence_ops fence_ops = {
+	.get_driver_name = get_driver_name,
+	.get_timeline_name = get_timeline_name,
+	.release = fence_release,
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+			 int (*fn)(struct dma_fence_work *f))
+{
+	spin_lock_init(&f->lock);
+	dma_fence_init(&f->base, &fence_ops, &f->lock, 0, 0);
+
+	dma_fence_get(&f->base);
+	INIT_WORK(&f->work, fence_work);
+
+	dma_fence_get(&f->base);
+	i915_sw_fence_init(&f->chain, fence_notify);
+
+	f->fn = fn;
+}
+
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
+{
+	if (!signal)
+		return 0;
+
+	return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
new file mode 100644
index 000000000000..488e0fb76a7a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -0,0 +1,31 @@
+#ifndef I915_SW_FENCE_WORK_H
+#define I915_SW_FENCE_WORK_H
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "i915_sw_fence.h"
+
+struct dma_fence_work {
+	struct dma_fence base;
+	spinlock_t lock;
+
+	struct i915_sw_fence chain;
+	struct i915_sw_dma_fence_cb cb;
+
+	struct work_struct work;
+	int (*fn)(struct dma_fence_work *f);
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+			 int (*fn)(struct dma_fence_work *f));
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
+
+static inline void dma_fence_work_commit(struct dma_fence_work *f)
+{
+	i915_sw_fence_commit(&f->chain);
+	dma_fence_put(&f->base);
+}
+
+#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b75a409908f6..a3ee529b7c8c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
 
 #include "i915_drv.h"
 #include "i915_globals.h"
+#include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 #include "i915_vma.h"
 
@@ -172,7 +173,7 @@ vma_create(struct drm_i915_gem_object *obj,
 								i915_gem_object_get_stride(obj));
 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
 
-		vma->flags |= I915_VMA_GGTT;
+		atomic_or(I915_VMA_GGTT, &vma->flags);
 	}
 
 	spin_lock(&obj->vma.lock);
@@ -288,6 +289,38 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 	return vma;
 }
 
+struct i915_vma_work {
+	struct dma_fence_work base;
+	struct i915_vma *vma;
+	enum i915_cache_level cache_level;
+	unsigned int flags;
+};
+
+static int __vma_bind(struct dma_fence_work *work)
+{
+	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+	struct i915_vma *vma = vw->vma;
+	int err;
+
+	err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+	if (err)
+		atomic_or(I915_VMA_ERROR, &vma->flags);
+
+	return err;
+}
+
+struct i915_vma_work *i915_vma_work(void)
+{
+	struct i915_vma_work *vw;
+
+	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
+	if (!vw)
+		return NULL;
+
+	dma_fence_work_init(&vw->base, __vma_bind);
+	return vw;
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
@@ -298,8 +331,10 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
  * this VMA in case of non-default GGTT views) and PTE entries set up.
  * Note that DMA addresses are also the only part of the SG table we care about.
  */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
-		  u32 flags)
+int i915_vma_bind(struct i915_vma *vma,
+		  enum i915_cache_level cache_level,
+		  u32 flags,
+		  struct i915_vma_work *work)
 {
 	u32 bind_flags;
 	u32 vma_flags;
@@ -322,7 +357,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (flags & PIN_USER)
 		bind_flags |= I915_VMA_LOCAL_BIND;
 
-	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+	vma_flags = atomic_read(&vma->flags);
+	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 	if (flags & PIN_UPDATE)
 		bind_flags |= vma_flags;
 	else
@@ -332,12 +368,28 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 
 	GEM_BUG_ON(!vma->pages);
 
+	if (bind_flags & ~vma_flags)
+		bind_flags |= BIND_ALLOC;
+
 	trace_i915_vma_bind(vma, bind_flags);
-	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
-	if (ret)
-		return ret;
+	if (!work) {
+		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+		if (ret)
+			return ret;
+	} else {
+		work->vma = vma;
+		work->cache_level = cache_level;
+		work->flags = bind_flags;
+
+		i915_vma_lock(vma);
+		dma_fence_work_chain(&work->base, dma_resv_get_excl(vma->resv));
+		dma_resv_add_excl_fence(vma->resv, &work->base.base);
+		i915_vma_unlock(vma);
+
+		i915_active_set_exclusive(&vma->active, &work->base.base);
+	}
 
-	vma->flags |= bind_flags;
+	atomic_or(bind_flags, &vma->flags);
 	return 0;
 }
 
@@ -356,7 +408,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 	}
 
 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+	GEM_BUG_ON((atomic_read(&vma->flags) & I915_VMA_GLOBAL_BIND) == 0);
 
 	ptr = vma->iomap;
 	if (ptr == NULL) {
@@ -469,9 +521,9 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
 
 	if (mappable && fenceable)
-		vma->flags |= I915_VMA_CAN_FENCE;
+		atomic_or(I915_VMA_CAN_FENCE, &vma->flags);
 	else
-		vma->flags &= ~I915_VMA_CAN_FENCE;
+		atomic_and(~I915_VMA_CAN_FENCE, &vma->flags);
 }
 
 static bool color_differs(struct drm_mm_node *node, unsigned long color)
@@ -544,7 +596,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	int ret;
 
 	GEM_BUG_ON(i915_vma_is_closed(vma));
-	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+	GEM_BUG_ON(atomic_read(&vma->flags) & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
 	size = max(size, vma->size);
@@ -580,21 +632,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		return -ENOSPC;
 	}
 
-	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
-		if (ret)
-			return ret;
-
-		cache_level = vma->obj->cache_level;
-	} else {
-		cache_level = 0;
-	}
-
 	GEM_BUG_ON(vma->pages);
-
 	ret = vma->ops->set_pages(vma);
 	if (ret)
-		goto err_unpin;
+		return ret;
+
+	cache_level = 0;
+	if (vma->obj)
+		cache_level = vma->obj->cache_level;
 
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
@@ -660,6 +705,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	mutex_unlock(&vma->vm->mutex);
 
 	if (vma->obj) {
+		atomic_inc(&vma->obj->mm.pages_pin_count);
 		atomic_inc(&vma->obj->bind_count);
 		assert_bind_count(vma->obj);
 	}
@@ -668,9 +714,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 err_clear:
 	vma->ops->clear_pages(vma);
-err_unpin:
-	if (vma->obj)
-		i915_gem_object_unpin_pages(vma->obj);
 	return ret;
 }
 
@@ -678,14 +721,12 @@ static void
 i915_vma_remove(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+	GEM_BUG_ON(atomic_read(&vma->flags) & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
 	vma->ops->clear_pages(vma);
 
-	mutex_lock(&vma->vm->mutex);
 	drm_mm_remove_node(&vma->node);
 	list_del(&vma->vm_link);
-	mutex_unlock(&vma->vm->mutex);
 
 	/*
 	 * Since the unbound list is global, only move to that list if
@@ -706,51 +747,115 @@ i915_vma_remove(struct i915_vma *vma)
 	}
 }
 
-int __i915_vma_do_pin(struct i915_vma *vma,
-		      u64 size, u64 alignment, u64 flags)
+static int __i915_vma_do_pin(struct i915_vma *vma,
+			     u64 size, u64 alignment, u64 flags,
+			     struct i915_vma_work *work)
 {
-	const unsigned int bound = vma->flags;
+	const unsigned int bound = atomic_read(&vma->flags);
 	int ret;
 
-	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	lockdep_assert_held(&vma->vm->mutex);
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
-		ret = -EBUSY;
-		goto err_unpin;
-	}
-
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
-			goto err_unpin;
+			return ret;
 	}
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
+	ret = i915_vma_bind(vma,
+			    vma->obj ? vma->obj->cache_level : 0,
+			    flags, work);
 	if (ret)
 		goto err_remove;
 
-	GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+	GEM_BUG_ON((atomic_read(&vma->flags) & I915_VMA_BIND_MASK) == 0);
 
-	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+	if ((bound ^ atomic_read(&vma->flags)) & I915_VMA_GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+
+	atomic_inc(&vma->flags);
 	return 0;
 
 err_remove:
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		i915_vma_remove(vma);
 		GEM_BUG_ON(vma->pages);
-		GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
+		GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_BIND_MASK);
 	}
-err_unpin:
-	__i915_vma_unpin(vma);
 	return ret;
 }
 
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+	struct i915_vma_work *work = NULL;
+	unsigned int bound;
+	int err;
+
+	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+	bound = atomic_inc_return(&vma->flags);
+	if (likely(((bound ^ flags) & I915_VMA_BIND_MASK) == 0)) {
+		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+		GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+		return 0;
+	}
+
+	if (bound & I915_VMA_ERROR) {
+		err = -ENOMEM;
+		goto err_unpin;
+	}
+
+	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+		err = -ENOMEM;
+		goto err_unpin;
+	}
+
+	if (vma->obj) {
+		err = i915_gem_object_pin_pages(vma->obj);
+		if (err)
+			return err;
+	}
+
+	if (flags & PIN_USER) {
+		work = i915_vma_work();
+		if (!work) {
+			err = -ENOMEM;
+			goto err_pages;
+		}
+	}
+
+	err = i915_active_acquire(&vma->active);
+	if (err)
+		goto err_fence;
+
+	err = mutex_lock_interruptible(&vma->vm->mutex);
+	if (err)
+		goto err_active;
+
+	err = __i915_vma_do_pin(vma, size, alignment, flags, work);
+
+	mutex_unlock(&vma->vm->mutex);
+err_active:
+	i915_active_release(&vma->active);
+err_fence:
+	if (work)
+		dma_fence_work_commit(&work->base);
+err_pages:
+	if (vma->obj)
+		i915_gem_object_unpin_pages(vma->obj);
+err_unpin:
+	atomic_dec(&vma->flags);
+	return err;
+}
+
+
 void i915_vma_close(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
@@ -985,7 +1090,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		i915_vma_revoke_mmap(vma);
 
 		__i915_vma_iounmap(vma);
-		vma->flags &= ~I915_VMA_CAN_FENCE;
+		atomic_and(~I915_VMA_CAN_FENCE, &vma->flags);
 	}
 	GEM_BUG_ON(vma->fence);
 	GEM_BUG_ON(i915_vma_has_userfault(vma));
@@ -994,9 +1099,11 @@ int i915_vma_unbind(struct i915_vma *vma)
 		trace_i915_vma_unbind(vma);
 		vma->ops->unbind_vma(vma);
 	}
-	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+	atomic_and(~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND), &vma->flags);
 
+	mutex_lock(&vma->vm->mutex);
 	i915_vma_remove(vma);
+	mutex_unlock(&vma->vm->mutex);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 910d365d5599..fee6715fd32b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -72,7 +72,8 @@ struct i915_vma {
 	 * that exist in the ctx->handle_vmas LUT for this vma.
 	 */
 	atomic_t open_count;
-	unsigned long flags;
+	atomic_t flags;
+
 	/**
 	 * How many users have pinned this object in GTT space.
 	 *
@@ -97,18 +98,24 @@ struct i915_vma {
 	 * users.
 	 */
 #define I915_VMA_PIN_MASK 0xff
-#define I915_VMA_PIN_OVERFLOW	BIT(8)
+#define I915_VMA_PIN_OVERFLOW	(int)BIT(8)
+
+#define BIND_ALLOC I915_VMA_PIN_OVERFLOW
 
 	/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND	BIT(9)
-#define I915_VMA_LOCAL_BIND	BIT(10)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_GLOBAL_BIND	(int)BIT(9)
+#define I915_VMA_LOCAL_BIND	(int)BIT(10)
+#define I915_VMA_ERROR		(int)BIT(11)
+#define I915_VMA_BIND_MASK (I915_VMA_PIN_OVERFLOW | \
+			    I915_VMA_GLOBAL_BIND | \
+			    I915_VMA_LOCAL_BIND | \
+			    I915_VMA_ERROR)
+
+#define I915_VMA_GGTT		(int)BIT(12)
+#define I915_VMA_CAN_FENCE	(int)BIT(13)
+#define I915_VMA_USERFAULT	(int)BIT(14)
+#define I915_VMA_GGTT_WRITE	(int)BIT(15)
 
-#define I915_VMA_GGTT		BIT(11)
-#define I915_VMA_CAN_FENCE	BIT(12)
-#define I915_VMA_USERFAULT_BIT	13
-#define I915_VMA_USERFAULT	BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE	BIT(14)
 
 	struct i915_active active;
 
@@ -164,46 +171,46 @@ int __must_check i915_vma_move_to_active(struct i915_vma *vma,
 
 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_GGTT;
+	return atomic_read(&vma->flags) & I915_VMA_GGTT;
 }
 
 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_GGTT_WRITE;
+	return atomic_read(&vma->flags) & I915_VMA_GGTT_WRITE;
 }
 
 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-	vma->flags |= I915_VMA_GGTT_WRITE;
+	atomic_or(I915_VMA_GGTT_WRITE, &vma->flags);
 }
 
 static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
 {
-	vma->flags &= ~I915_VMA_GGTT_WRITE;
+	atomic_and(~I915_VMA_GGTT_WRITE, &vma->flags);
 }
 
 void i915_vma_flush_writes(struct i915_vma *vma);
 
 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_CAN_FENCE;
+	return atomic_read(&vma->flags) & I915_VMA_CAN_FENCE;
 }
 
 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-	return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+	return atomic_fetch_or(I915_VMA_USERFAULT, &vma->flags);
 }
 
 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
 {
-	return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+	atomic_and(~I915_VMA_USERFAULT, &vma->flags);
 }
 
 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
 {
-	return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+	return atomic_read(&vma->flags) & I915_VMA_USERFAULT;
 }
 
 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
@@ -285,8 +292,12 @@ i915_vma_compare(struct i915_vma *vma,
 	return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
 }
 
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
-		  u32 flags);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+		  enum i915_cache_level cache_level,
+		  u32 flags,
+		  struct i915_vma_work *work);
+
 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
 bool i915_vma_misplaced(const struct i915_vma *vma,
 			u64 size, u64 alignment, u64 flags);
@@ -310,30 +321,12 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
 	dma_resv_unlock(vma->resv);
 }
 
-int __i915_vma_do_pin(struct i915_vma *vma,
-		      u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
-	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
-	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
-	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
-	/* Pin early to prevent the shrinker/eviction logic from destroying
-	 * our vma as we insert and bind.
-	 */
-	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
-		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-		GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
-		return 0;
-	}
-
-	return __i915_vma_do_pin(vma, size, alignment, flags);
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
 
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
-	return vma->flags & I915_VMA_PIN_MASK;
+	return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
 }
 
 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -343,13 +336,13 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 
 static inline void __i915_vma_pin(struct i915_vma *vma)
 {
-	vma->flags++;
-	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+	atomic_inc(&vma->flags);
+	GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_PIN_OVERFLOW);
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
 {
-	vma->flags--;
+	atomic_dec(&vma->flags);
 }
 
 static inline void i915_vma_unpin(struct i915_vma *vma)
@@ -362,7 +355,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
 				     unsigned int where)
 {
-	return vma->flags & where;
+	return atomic_read(&vma->flags) & where;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e62a67e0f79c..91cec91d6712 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
 			   u32 flags)
 {
 	GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
-	vma->flags |= I915_VMA_LOCAL_BIND;
+	atomic_or(I915_VMA_LOCAL_BIND, &vma->flags);
 	return 0;
 }
 
@@ -86,7 +86,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
 			  u32 flags)
 {
-	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
 	return 0;
 }
 
-- 
2.23.0.rc1



More information about the Intel-gfx-trybot mailing list