[PATCH 3/3] drm/i915: Use refcount_t for struct address_space::open
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Feb 8 10:37:35 UTC 2022
Replace atomic_t with refcount_t to help us find and debug problems with
vm open refcounting.
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 2 +-
drivers/gpu/drm/i915/gt/intel_ggtt.c | 22 +++++++++----------
drivers/gpu/drm/i915/gt/intel_gtt.c | 4 ++--
drivers/gpu/drm/i915/gt/intel_gtt.h | 8 +++----
drivers/gpu/drm/i915/i915_vma.c | 6 ++---
drivers/gpu/drm/i915/i915_vma_resource.c | 2 +-
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +-
7 files changed, 22 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index be5b953ba07b..b63789252403 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -320,7 +320,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
- GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+ GEM_BUG_ON(!refcount_read(&ppgtt->base.vm.open));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 8850d4e0f9cc..71e129dd3910 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -135,8 +135,14 @@ void i915_ggtt_suspend_vm(struct i915_address_space *vm)
mutex_lock(&vm->mutex);
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&vm->open, 0);
+ /*
+ * Skip rewriting PTE on VMA unbind. We shouldn't race anyone
+ * here so could probably replace the atomic_xchg with
+ * a refcount_read, refcount_set sequence. We shouldn't really be
+ * accessing the internals of refcount_t directly.
+ */
+
+ open = atomic_xchg(&vm->open.refs, 0);
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -154,7 +160,7 @@ void i915_ggtt_suspend_vm(struct i915_address_space *vm)
*/
i915_gem_object_get(obj);
- atomic_set(&vm->open, open);
+ refcount_set(&vm->open, open);
mutex_unlock(&vm->mutex);
i915_gem_object_lock(obj, NULL);
@@ -179,7 +185,7 @@ void i915_ggtt_suspend_vm(struct i915_address_space *vm)
vm->clear_range(vm, 0, vm->total);
- atomic_set(&vm->open, open);
+ refcount_set(&vm->open, open);
mutex_unlock(&vm->mutex);
}
@@ -773,7 +779,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
- atomic_set(&ggtt->vm.open, 0);
+ refcount_set(&ggtt->vm.open, 0);
flush_workqueue(ggtt->vm.i915->wq);
i915_gem_drain_freed_objects(ggtt->vm.i915);
@@ -1307,16 +1313,12 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
- int open;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&vm->open, 0);
-
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -1333,8 +1335,6 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
}
}
- atomic_set(&vm->open, open);
-
return write_domain_objs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index c08c057399af..576ec55bf70d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -123,7 +123,7 @@ static void clear_vm_list(struct list_head *list)
void __i915_vm_close(struct i915_address_space *vm)
{
- if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+ if (!refcount_dec_and_mutex_lock(&vm->open, &vm->mutex))
return;
clear_vm_list(&vm->bound_list);
@@ -210,7 +210,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
- atomic_set(&vm->open, 1);
+ refcount_set(&vm->open, 1);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index ff566f6ee958..3254e7a5b894 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -233,7 +233,7 @@ struct i915_address_space {
* and do not allow any new attachments, and proceed to shutdown our
* vma and page directories.
*/
- atomic_t open;
+ refcount_t open;
struct mutex mutex; /* protects vma and our lists */
@@ -451,15 +451,14 @@ static inline void i915_vm_resv_put(struct i915_address_space *vm)
static inline struct i915_address_space *
i915_vm_open(struct i915_address_space *vm)
{
- GEM_BUG_ON(!atomic_read(&vm->open));
- atomic_inc(&vm->open);
+ refcount_inc(&vm->open);
return i915_vm_get(vm);
}
static inline bool
i915_vm_tryopen(struct i915_address_space *vm)
{
- if (atomic_add_unless(&vm->open, 1, 0))
+ if (refcount_inc_not_zero(&vm->open))
return i915_vm_get(vm);
return false;
@@ -470,7 +469,6 @@ void __i915_vm_close(struct i915_address_space *vm);
static inline void
i915_vm_close(struct i915_address_space *vm)
{
- GEM_BUG_ON(!atomic_read(&vm->open));
__i915_vm_close(vm);
i915_vm_put(vm);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 8f1f47078bc8..4a1d6300455e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -47,7 +47,7 @@ static inline void assert_vma_held_evict(const struct i915_vma *vma)
* This is the only exception to the requirement of the object lock
* being held.
*/
- if (atomic_read(&vma->vm->open))
+ if (refcount_read(&vma->vm->open))
assert_object_held_shared(vma->obj);
}
@@ -292,7 +292,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
- GEM_BUG_ON(!atomic_read(&vm->open));
+ GEM_BUG_ON(!refcount_read(&vm->open));
spin_lock(&obj->vma.lock);
vma = i915_vma_lookup(obj, vm, view);
@@ -1891,7 +1891,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
- atomic_read(&vma->vm->open);
+ refcount_read(&vma->vm->open);
trace_i915_vma_unbind(vma);
unbind_fence = i915_vma_resource_unbind(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 57ae92ba8af1..e639a05ae4c6 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -178,7 +178,7 @@ static void i915_vma_resource_unbind_work(struct work_struct *work)
bool lockdep_cookie;
lockdep_cookie = dma_fence_begin_signalling();
- if (likely(atomic_read(&vm->open)))
+ if (likely(refcount_read(&vm->open)))
vma_res->ops->unbind_vma(vm, vma_res);
dma_fence_end_signalling(lockdep_cookie);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index fb402789ef8d..ce512ad51401 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1060,7 +1060,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
+ GEM_BUG_ON(!refcount_read(&ppgtt->vm.open));
err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
--
2.34.1
More information about the Intel-gfx-trybot
mailing list