[Intel-xe] [CI 15/16] drm/xe: Adjust to commit "drm/gpuvm: add an abstraction for a VM/BO combination"
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Dec 5 15:19:11 UTC 2023
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 11 +++++++----
drivers/gpu/drm/xe/xe_vm.c | 32 +++++++++++++++++++++++++-------
2 files changed, 32 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index ad9d8793db3e..7e25c8b7a01a 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -470,6 +470,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
struct dma_fence *fence;
struct drm_gpuva *gpuva;
struct drm_gem_object *obj = &bo->ttm.base;
+ struct drm_gpuvm_bo *vm_bo;
int ret = 0;
dma_resv_assert_held(bo->ttm.base.resv);
@@ -482,11 +483,12 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
dma_resv_iter_end(&cursor);
}
- drm_gem_for_each_gpuva(gpuva, obj) {
- struct xe_vma *vma = gpuva_to_vma(gpuva);
- struct xe_vm *vm = xe_vma_vm(vma);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ struct xe_vm *vm = xe_vma_vm(vma);
- trace_xe_vma_evict(vma);
+ trace_xe_vma_evict(vma);
if (xe_vm_in_fault_mode(vm)) {
/* Wait for pending binds / unbinds. */
@@ -540,6 +542,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
if (vm_resv_locked)
dma_resv_unlock(xe_vm_resv(vm));
}
+ }
}
return ret;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 4c3a1b173646..0f290621889c 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -911,12 +911,21 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->pat_index = pat_index;
if (bo) {
+ struct drm_gpuvm_bo *vm_bo;
+
xe_bo_assert_held(bo);
+ vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
+ if (IS_ERR(vm_bo)) {
+ kfree(vma);
+ return ERR_CAST(vm_bo);
+ }
+
drm_gem_object_get(&bo->ttm.base);
vma->gpuva.gem.obj = &bo->ttm.base;
vma->gpuva.gem.offset = bo_offset_or_userptr;
- drm_gpuva_link(&vma->gpuva);
+ drm_gpuva_link(&vma->gpuva, vm_bo);
+ drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
if (!is_null) {
u64 size = end - start + 1;
@@ -998,16 +1007,19 @@ static struct xe_vma *
bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
struct xe_vma *ignore)
{
- struct drm_gpuva *gpuva;
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gpuva *va;
struct drm_gem_object *obj = &bo->ttm.base;
xe_bo_assert_held(bo);
- drm_gem_for_each_gpuva(gpuva, obj) {
- struct xe_vma *vma = gpuva_to_vma(gpuva);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct xe_vma *vma = gpuva_to_vma(va);
- if (vma != ignore && xe_vma_vm(vma) == vm)
- return vma;
+ if (vma != ignore && xe_vma_vm(vma) == vm)
+ return vma;
+ }
}
return NULL;
@@ -2171,6 +2183,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct xe_vma_op *op;
+ struct drm_gpuvm_bo *vm_bo;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -2198,7 +2211,12 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
err = xe_bo_lock(bo, true);
if (err)
return ERR_PTR(err);
- ops = drm_gpuvm_gem_unmap_ops_create(&vm->gpuvm, obj);
+
+ vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
+ if (!vm_bo)
+ break;
+
+ ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
xe_bo_unlock(bo);
break;
default:
--
2.42.0
More information about the Intel-xe
mailing list