[Intel-xe] [CI 14/16] drm/xe: Adjust to commit drm/gpuvm: reference count drm_gpuvm structures
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Dec 5 15:19:10 UTC 2023
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++------------------
drivers/gpu/drm/xe/xe_vm.h | 5 ++---
drivers/gpu/drm/xe/xe_vm_types.h | 2 --
3 files changed, 14 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 034496ac2341..4c3a1b173646 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1197,8 +1197,11 @@ static struct drm_gpuva_op *xe_vm_op_alloc(void)
return &op->base;
}
+static void xe_vm_free(struct drm_gpuvm *gpuvm);
+
static struct drm_gpuvm_ops gpuvm_ops = {
.op_alloc = xe_vm_op_alloc,
+ .vm_free = xe_vm_free,
};
static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
@@ -1346,7 +1349,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
return ERR_PTR(-ENOMEM);
vm->xe = xe;
- kref_init(&vm->refcount);
vm->size = 1ull << xe->info.va_bits;
@@ -1498,7 +1500,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
dma_resv_unlock(xe_vm_resv(vm));
err_put:
- drm_gpuvm_destroy(&vm->gpuvm);
+ // drm_gpuvm_destroy(&vm->gpuvm);
err_no_resv:
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
@@ -1586,6 +1588,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
NULL);
}
+ if (vm->pt_root[id]) {
+ xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+ vm->pt_root[id] = NULL;
+ }
}
xe_vm_unlock(vm);
@@ -1639,29 +1645,17 @@ static void vm_destroy_work_func(struct work_struct *w)
}
}
- /*
- * XXX: We delay destroying the PT root until the VM if freed as PT root
- * is needed for xe_vm_lock to work. If we remove that dependency this
- * can be moved to xe_vm_close_and_put.
- */
- xe_vm_lock(vm, false);
- for_each_tile(tile, xe, id) {
- if (vm->pt_root[id]) {
- xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
- vm->pt_root[id] = NULL;
- }
- }
- xe_vm_unlock(vm);
+ for_each_tile(tile, xe, id)
+ XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
dma_fence_put(vm->rebind_fence);
- drm_gpuvm_destroy(&vm->gpuvm);
kfree(vm);
}
-void xe_vm_free(struct kref *ref)
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
{
- struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
+ struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
/* To destroy the VM we need to be able to sleep */
queue_work(system_unbound_wq, &vm->destroy_work);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 1d2b1917b33a..811b57232520 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -24,20 +24,19 @@ struct xe_sync_entry;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
-void xe_vm_free(struct kref *ref);
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
{
- kref_get(&vm->refcount);
+ drm_gpuvm_get(&vm->gpuvm);
return vm;
}
static inline void xe_vm_put(struct xe_vm *vm)
{
- kref_put(&vm->refcount, xe_vm_free);
+ drm_gpuvm_put(&vm->gpuvm);
}
int xe_vm_lock(struct xe_vm *vm, bool intr);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 5b449ec9c3e5..29271a8dbe48 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -142,8 +142,6 @@ struct xe_vm {
struct xe_device *xe;
- struct kref refcount;
-
/* exec queue used for (un)binding vma's */
struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
--
2.42.0
More information about the Intel-xe
mailing list