[PATCH] drm/xe: Remove the pat_index field from xe_vma_op_map
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Tue May 14 11:03:41 UTC 2024
The pat_index field within xe_vma_op_map facilitates the creation of new
VMAs for DRM_GPUVA_OP_MAP as a GPU baseva operation. Rather than
initializing this field in vm_bind_ioctl_ops_create and subsequently
utilizing it in vm_bind_ioctl_ops_parse, eliminate pat_index from
xe_vma_op_map altogether. Instead, directly pass the user-provided
pat_index to vm_bind_ioctl_ops_parse.
Cc: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 16 +++++++---------
drivers/gpu/drm/xe/xe_vm_types.h | 2 --
2 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c5b1694b292f..a11685e9546b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2055,8 +2055,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
static struct drm_gpuva_ops *
vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
u64 bo_offset_or_userptr, u64 addr, u64 range,
- u32 operation, u32 flags,
- u32 prefetch_region, u16 pat_index)
+ u32 operation, u32 flags, u32 prefetch_region)
{
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops;
@@ -2117,7 +2116,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
- op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
}
@@ -2278,7 +2276,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
- struct drm_gpuva_ops *ops,
+ struct drm_gpuva_ops *ops, u16 pat_index,
struct xe_sync_entry *syncs, u32 num_syncs,
struct xe_vma_ops *vops, bool last)
{
@@ -2322,8 +2320,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->map.dumpable ?
VMA_CREATE_FLAG_DUMPABLE : 0;
- vma = new_vma(vm, &op->base.map, op->map.pat_index,
- flags);
+ vma = new_vma(vm, &op->base.map, pat_index, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -3242,15 +3239,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
addr, range, op, flags,
- prefetch_region, pat_index);
+ prefetch_region);
if (IS_ERR(ops[i])) {
err = PTR_ERR(ops[i]);
ops[i] = NULL;
goto unwind_ops;
}
- err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
- &vops, i == args->num_binds - 1);
+ err = vm_bind_ioctl_ops_parse(vm, q, ops[i], pat_index,
+ syncs, num_syncs, &vops,
+ i == args->num_binds - 1);
if (err)
goto unwind_ops;
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ce1a63a5e3e7..e13400dd5ee8 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -284,8 +284,6 @@ struct xe_vma_op_map {
bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
- /** @pat_index: The pat index to use for this operation. */
- u16 pat_index;
};
/** struct xe_vma_op_remap - VMA remap operation */
--
2.25.1
More information about the Intel-xe
mailing list