[Intel-xe] [RFC PATCH 1/2] drm/xe: Expose vma bind-unbind functions
Nirmoy Das
nirmoy.das at intel.com
Wed May 24 12:36:47 UTC 2023
Expose vma bind and unbind functionality which is
needed to implement vma madvise.
Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 52 ++++++++++++++++++--------------------
drivers/gpu/drm/xe/xe_vm.h | 3 +++
2 files changed, 27 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index a0306526b269..ae53f08923f5 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -668,10 +668,6 @@ static void preempt_rebind_work_func(struct work_struct *w)
}
struct async_op_fence;
-static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence);
-
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
@@ -1704,13 +1700,21 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence)
}
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_sync_entry *syncs,
+ struct xe_engine *e, struct xe_bo *bo,
+ struct xe_sync_entry *syncs,
u32 num_syncs, struct async_op_fence *afence)
{
struct dma_fence *fence;
+ int err;
xe_vm_assert_held(vm);
+ xe_bo_assert_held(bo);
+ if (bo) {
+ err = xe_bo_validate(bo, vm, true);
+ if (err)
+ return err;
+ }
fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -1721,27 +1725,14 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
return 0;
}
-static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
- struct xe_bo *bo, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence)
+int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma)
{
- int err;
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(bo);
-
- if (bo) {
- err = xe_bo_validate(bo, vm, true);
- if (err)
- return err;
- }
-
- return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
+ return __xe_vm_bind(vm, vma, NULL, vma->bo, NULL, 0, NULL);
}
-static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_engine *e, struct xe_sync_entry *syncs,
- u32 num_syncs, struct async_op_fence *afence)
+int __xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_engine *e, struct xe_sync_entry *syncs,
+ u32 num_syncs, struct async_op_fence *afence)
{
struct dma_fence *fence;
@@ -1760,6 +1751,11 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
return 0;
}
+int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma)
+{
+ return __xe_vm_unbind(vm, vma, NULL, NULL, 0, NULL);
+}
+
static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
u64 value)
{
@@ -1984,8 +1980,8 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
}
if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
- return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
- afence);
+ return __xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
+ afence);
} else {
int i;
@@ -2008,12 +2004,12 @@ static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
{
switch (VM_BIND_OP(op)) {
case XE_VM_BIND_OP_MAP:
- return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
+ return __xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
case XE_VM_BIND_OP_UNMAP:
case XE_VM_BIND_OP_UNMAP_ALL:
- return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
+ return __xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
case XE_VM_BIND_OP_MAP_USERPTR:
- return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
+ return __xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
case XE_VM_BIND_OP_PREFETCH:
return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
afence);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 748dc16ebed9..c8be6d7ec526 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -147,6 +147,9 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
+int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma);
+int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma);
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define vm_dbg drm_dbg
#else
--
2.39.0
More information about the Intel-xe
mailing list