[PATCH] drm/xe: Refcount VMA
Matthew Brost
matthew.brost at intel.com
Wed Apr 17 18:56:35 UTC 2024
xe_vma is one of the few objects that doesn't currently have a refcount,
rather an internal scheme which figures out when the VMA can be freed.
Switch to refcount as this is the standard way to handle this and will
future-proof the code for cases where additional refs to VMAs are
needed.
One functional change in the patch is that the freeing of the VMA is
always done in a worker, as the final put can be done from an IRQ
context and the freeing of a VMA is not ever in a critical path.
Cc: Oak Zeng <oak.zeng at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 16 ++++++++++++----
drivers/gpu/drm/xe/xe_vm.h | 12 ++++++++++++
drivers/gpu/drm/xe/xe_vm_types.h | 3 +++
3 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 8a858b8588bd..a2d3aacbf29f 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -791,6 +791,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.gem.obj = &bo->ttm.base;
}
+ kref_init(&vma->refcount);
INIT_LIST_HEAD(&vma->combined_links.rebind);
INIT_LIST_HEAD(&vma->gpuva.gem.entry);
@@ -899,13 +900,20 @@ static void vma_destroy_work_func(struct work_struct *w)
xe_vma_destroy_late(vma);
}
+void __xe_vma_destroy_async(struct kref *ref)
+{
+ struct xe_vma *vma = container_of(ref, struct xe_vma, refcount);
+
+ INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
+ queue_work(system_unbound_wq, &vma->destroy_work);
+}
+
static void vma_destroy_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
- INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
- queue_work(system_unbound_wq, &vma->destroy_work);
+ xe_vma_put(vma);
}
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
@@ -934,10 +942,10 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
if (ret) {
XE_WARN_ON(ret != -ENOENT);
- xe_vma_destroy_late(vma);
+ xe_vma_put(vma);
}
} else {
- xe_vma_destroy_late(vma);
+ xe_vma_put(vma);
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 306cd0934a19..1cfe175d99e0 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -39,6 +39,18 @@ static inline void xe_vm_put(struct xe_vm *vm)
drm_gpuvm_put(&vm->gpuvm);
}
+static inline struct xe_vma *xe_vma_get(struct xe_vma *vma)
+{
+ kref_get(&vma->refcount);
+ return vma;
+}
+
+void __xe_vma_destroy_async(struct kref *ref);
+static inline void xe_vma_put(struct xe_vma *vma)
+{
+ kref_put(&vma->refcount, __xe_vma_destroy_async);
+}
+
int xe_vm_lock(struct xe_vm *vm, bool intr);
void xe_vm_unlock(struct xe_vm *vm);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7570c2c6c463..9fd4a8939386 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -65,6 +65,9 @@ struct xe_vma {
/** @gpuva: Base GPUVA object */
struct drm_gpuva gpuva;
+ /** @refcount: ref count of this vma */
+ struct kref refcount;
+
/**
* @combined_links: links into lists which are mutually exclusive.
* Locking: vm lock in write mode OR vm lock in read mode and the vm's
--
2.34.1
More information about the Intel-xe
mailing list