[Intel-xe] [PATCH 04/17] drm/xe/uapi: Separate VM_BIND's operation and flag
Matthew Brost
matthew.brost at intel.com
Tue Sep 19 20:39:48 UTC 2023
On Tue, Sep 19, 2023 at 10:24:51AM -0400, Rodrigo Vivi wrote:
> From: Francois Dugast <francois.dugast at intel.com>
>
> Use different members in the drm_xe_vm_bind_op for op and for flags as
> it is done in other structures.
>
> Type is left to u32 to leave enough room for future operations and flags.
>
> v2: Remove the XE_VM_BIND_* flags shift (Rodrigo Vivi)
>
> Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/303
> Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
This patch LGTM but noticed in xe_drm.h we probably should do this:
s/define XE_/define DRM_XE_/
As I said this patch is fine though, with that:
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 29 ++++++++++++++++-------------
> include/uapi/drm/xe_drm.h | 14 ++++++++------
> 2 files changed, 24 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 2b225c0692a6..7579c9537bfa 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2152,11 +2152,11 @@ static void vm_set_async_error(struct xe_vm *vm, int err)
> }
>
> static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
> - u64 addr, u64 range, u32 op)
> + u64 addr, u64 range, u32 op, u32 flags)
> {
> struct xe_device *xe = vm->xe;
> struct xe_vma *vma;
> - bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
> + bool async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
>
> lockdep_assert_held(&vm->lock);
>
> @@ -2257,7 +2257,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
> static struct drm_gpuva_ops *
> vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
> u64 bo_offset_or_userptr, u64 addr, u64 range,
> - u32 operation, u8 tile_mask, u32 region)
> + u32 operation, u32 flags, u8 tile_mask, u32 region)
> {
> struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
> struct drm_gpuva_ops *ops;
> @@ -2285,10 +2285,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>
> op->tile_mask = tile_mask;
> op->map.immediate =
> - operation & XE_VM_BIND_FLAG_IMMEDIATE;
> + flags & XE_VM_BIND_FLAG_IMMEDIATE;
> op->map.read_only =
> - operation & XE_VM_BIND_FLAG_READONLY;
> - op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
> + flags & XE_VM_BIND_FLAG_READONLY;
> + op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
> }
> break;
> case XE_VM_BIND_OP_UNMAP:
> @@ -3100,15 +3100,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
> u64 range = (*bind_ops)[i].range;
> u64 addr = (*bind_ops)[i].addr;
> u32 op = (*bind_ops)[i].op;
> + u32 flags = (*bind_ops)[i].flags;
> u32 obj = (*bind_ops)[i].obj;
> u64 obj_offset = (*bind_ops)[i].obj_offset;
> u32 region = (*bind_ops)[i].region;
> - bool is_null = op & XE_VM_BIND_FLAG_NULL;
> + bool is_null = flags & XE_VM_BIND_FLAG_NULL;
>
> if (i == 0) {
> - *async = !!(op & XE_VM_BIND_FLAG_ASYNC);
> + *async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
> } else if (XE_IOCTL_DBG(xe, !*async) ||
> - XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
> + XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
> XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
> XE_VM_BIND_OP_RESTART)) {
> err = -EINVAL;
> @@ -3129,7 +3130,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
>
> if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
> XE_VM_BIND_OP_PREFETCH) ||
> - XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
> + XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
> XE_IOCTL_DBG(xe, obj && is_null) ||
> XE_IOCTL_DBG(xe, obj_offset && is_null) ||
> XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
> @@ -3344,8 +3345,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> u64 range = bind_ops[i].range;
> u64 addr = bind_ops[i].addr;
> u32 op = bind_ops[i].op;
> + u32 flags = bind_ops[i].flags;
>
> - err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
> + err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op, flags);
> if (err)
> goto free_syncs;
> }
> @@ -3354,13 +3356,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> u64 range = bind_ops[i].range;
> u64 addr = bind_ops[i].addr;
> u32 op = bind_ops[i].op;
> + u32 flags = bind_ops[i].flags;
> u64 obj_offset = bind_ops[i].obj_offset;
> u8 tile_mask = bind_ops[i].tile_mask;
> u32 region = bind_ops[i].region;
>
> ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
> - addr, range, op, tile_mask,
> - region);
> + addr, range, op, flags,
> + tile_mask, region);
> if (IS_ERR(ops[i])) {
> err = PTR_ERR(ops[i]);
> ops[i] = NULL;
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 3e249f197d90..59e9f2f87074 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -663,8 +663,10 @@ struct drm_xe_vm_bind_op {
> #define XE_VM_BIND_OP_RESTART 0x3
> #define XE_VM_BIND_OP_UNMAP_ALL 0x4
> #define XE_VM_BIND_OP_PREFETCH 0x5
> + /** @op: Bind operation to perform */
> + __u32 op;
>
> -#define XE_VM_BIND_FLAG_READONLY (0x1 << 16)
> +#define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
> /*
> * A bind ops completions are always async, hence the support for out
> * sync. This flag indicates the allocation of the memory for new page
> @@ -689,12 +691,12 @@ struct drm_xe_vm_bind_op {
> * configured in the VM and must be set if the VM is configured with
> * DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state.
> */
> -#define XE_VM_BIND_FLAG_ASYNC (0x1 << 17)
> +#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
> /*
> * Valid on a faulting VM only, do the MAP operation immediately rather
> * than deferring the MAP to the page fault handler.
> */
> -#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 18)
> +#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
> /*
> * When the NULL flag is set, the page tables are setup with a special
> * bit which indicates writes are dropped and all reads return zero. In
> @@ -702,9 +704,9 @@ struct drm_xe_vm_bind_op {
> * operations, the BO handle MBZ, and the BO offset MBZ. This flag is
> * intended to implement VK sparse bindings.
> */
> -#define XE_VM_BIND_FLAG_NULL (0x1 << 19)
> - /** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
> - __u32 op;
> +#define XE_VM_BIND_FLAG_NULL (0x1 << 3)
> + /** @flags: Bind flags */
> + __u32 flags;
>
> /** @mem_region: Memory region to prefetch VMA to, instance not a mask */
> __u32 region;
> --
> 2.41.0
>
More information about the Intel-xe
mailing list