[Intel-xe] [PATCH v3 05/30] drm/xe/vm: Remove VM_BIND_OP macro
Francois Dugast
francois.dugast at intel.com
Tue Sep 26 12:55:15 UTC 2023
This macro was necessary when bind operations were shifted but this
is no longer the case, so removing to simplify code.
Signed-off-by: Francois Dugast <francois.dugast at intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 42 +++++++++++++++++---------------------
1 file changed, 19 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index e81d8cc5c29e..e84002555d34 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2142,8 +2142,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
}
}
-#define VM_BIND_OP(op) (op & 0xffff)
-
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
{
int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
@@ -2168,7 +2166,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
lockdep_assert_held(&vm->lock);
- switch (VM_BIND_OP(op)) {
+ switch (op) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
vma = xe_vm_find_overlapping_vma(vm, addr, range);
@@ -2277,10 +2275,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
vm_dbg(&vm->xe->drm,
"op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
- VM_BIND_OP(operation), (ULL)addr, (ULL)range,
+ operation, (ULL)addr, (ULL)range,
(ULL)bo_offset_or_userptr);
- switch (VM_BIND_OP(operation)) {
+ switch (operation) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuva_sm_map_ops_create(&vm->mgr, addr, range,
@@ -3118,50 +3116,48 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
} else if (XE_IOCTL_DBG(xe, !*async) ||
XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
- XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
- XE_VM_BIND_OP_RESTART)) {
+ XE_IOCTL_DBG(xe, op == XE_VM_BIND_OP_RESTART)) {
err = -EINVAL;
goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !*async &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
+ op == XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !*async &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
+ op == XE_VM_BIND_OP_PREFETCH)) {
err = -EINVAL;
goto free_bind_ops;
}
- if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
- XE_VM_BIND_OP_PREFETCH) ||
+ if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, obj && is_null) ||
XE_IOCTL_DBG(xe, obj_offset && is_null) ||
- XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
+ XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
is_null) ||
XE_IOCTL_DBG(xe, !obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
+ op == XE_VM_BIND_OP_MAP &&
!is_null) ||
XE_IOCTL_DBG(xe, !obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+ op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+ op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, range &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+ op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
+ op == XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
+ op == XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, region &&
- VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
+ op != XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, !(BIT(region) &
xe->info.mem_region_mask)) ||
XE_IOCTL_DBG(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
+ op == XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3169,9 +3165,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
- XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
+ XE_IOCTL_DBG(xe, !range && op !=
XE_VM_BIND_OP_RESTART &&
- VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
+ op != XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3235,7 +3231,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto release_vm_lock;
}
- if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
+ if (bind_ops[0].op == XE_VM_BIND_OP_RESTART) {
if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
err = -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
--
2.34.1
More information about the Intel-xe
mailing list