[igt-dev] [PATCH i-g-t 1/8] vm_bind: import uapi definitions
Matthew Auld
matthew.auld at intel.com
Thu Sep 29 15:34:29 UTC 2022
On 28/09/2022 07:21, Niranjana Vishwanathapura wrote:
> Import required VM_BIND kernel uapi definitions.
>
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
i915_drm.h stuff usually has to make it to drm-next or so, before adding
them here. We can add these to i915_drm_local.h instead, and then remove
them and resync the proper header later, once everything has landed.
> ---
> include/drm-uapi/i915_drm.h | 285 +++++++++++++++++++++++++++++++++++-
> 1 file changed, 282 insertions(+), 3 deletions(-)
>
> diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
> index b4efc96c2e..ab38d0dfac 100644
> --- a/include/drm-uapi/i915_drm.h
> +++ b/include/drm-uapi/i915_drm.h
> @@ -470,6 +470,9 @@ typedef struct _drm_i915_sarea {
> #define DRM_I915_GEM_VM_CREATE 0x3a
> #define DRM_I915_GEM_VM_DESTROY 0x3b
> #define DRM_I915_GEM_CREATE_EXT 0x3c
> +#define DRM_I915_GEM_VM_BIND 0x3d
> +#define DRM_I915_GEM_VM_UNBIND 0x3e
> +#define DRM_I915_GEM_EXECBUFFER3 0x3f
> /* Must be kept compact -- no holes */
>
> #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
> @@ -534,6 +537,9 @@ typedef struct _drm_i915_sarea {
> #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
> #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
> #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_unbind)
> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>
> /* Allow drivers to submit batchbuffers directly to hardware, relying
> * on the security mechanisms provided by hardware.
> @@ -749,6 +755,25 @@ typedef struct drm_i915_irq_wait {
> /* Query if the kernel supports the I915_USERPTR_PROBE flag. */
> #define I915_PARAM_HAS_USERPTR_PROBE 56
>
> +/*
> + * VM_BIND feature version supported.
> + *
> + * The following versions of VM_BIND have been defined:
> + *
> + * 0: No VM_BIND support.
> + *
> + * 1: In VM_UNBIND calls, the UMD must specify the exact mappings created
> + * previously with VM_BIND, the ioctl will not support unbinding multiple
> + * mappings or splitting them. Similarly, VM_BIND calls will not replace
> + * any existing mappings.
> + *
> + * 2: The restrictions on unbinding partial or multiple mappings is
> + * lifted, Similarly, binding will replace any mappings in the given range.
> + *
> + * See struct drm_i915_gem_vm_bind and struct drm_i915_gem_vm_unbind.
> + */
> +#define I915_PARAM_VM_BIND_VERSION 57
> +
> /* Must be kept compact -- no holes and well documented */
>
> typedef struct drm_i915_getparam {
> @@ -1254,7 +1279,8 @@ struct drm_i915_gem_exec_fence {
> /*
> * See drm_i915_gem_execbuffer_ext_timeline_fences.
> */
> -#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
> +#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES 0
>
> /*
> * This structure describes an array of drm_syncobj and associated points for
> @@ -1441,6 +1467,100 @@ struct drm_i915_gem_execbuffer2 {
> #define i915_execbuffer2_get_context_id(eb2) \
> ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
>
> +/**
> + * struct drm_i915_gem_timeline_fence - An input or output timeline fence.
> + *
> + * The operation will wait for input fence to signal.
> + *
> + * The returned output fence will be signaled after the completion of the
> + * operation.
> + */
> +struct drm_i915_gem_timeline_fence {
> + /** @handle: User's handle for a drm_syncobj to wait on or signal. */
> + __u32 handle;
> +
> + /**
> + * @flags: Supported flags are:
> + *
> + * I915_TIMELINE_FENCE_WAIT:
> + * Wait for the input fence before the operation.
> + *
> + * I915_TIMELINE_FENCE_SIGNAL:
> + * Return operation completion fence as output.
> + */
> + __u32 flags;
> +#define I915_TIMELINE_FENCE_WAIT (1 << 0)
> +#define I915_TIMELINE_FENCE_SIGNAL (1 << 1)
> +#define __I915_TIMELINE_FENCE_UNKNOWN_FLAGS (-(I915_TIMELINE_FENCE_SIGNAL << 1))
> +
> + /**
> + * @value: A point in the timeline.
> + * Value must be 0 for a binary drm_syncobj. A Value of 0 for a
> + * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
> + * binary one.
> + */
> + __u64 value;
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
> + * ioctl.
> + *
> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
> + * only works with this ioctl for submission.
> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
> + */
> +struct drm_i915_gem_execbuffer3 {
> + /**
> + * @ctx_id: Context id
> + *
> + * Only contexts with user engine map are allowed.
> + */
> + __u32 ctx_id;
> +
> + /**
> + * @engine_idx: Engine index
> + *
> + * An index in the user engine map of the context specified by @ctx_id.
> + */
> + __u32 engine_idx;
> +
> + /**
> + * @batch_address: Batch gpu virtual address/es.
> + *
> + * For normal submission, it is the gpu virtual address of the batch
> + * buffer. For parallel submission, it is a pointer to an array of
> + * batch buffer gpu virtual addresses with array size equal to the
> + * number of (parallel) engines involved in that submission (See
> + * struct i915_context_engines_parallel_submit).
> + */
> + __u64 batch_address;
> +
> + /** @flags: Currently reserved, MBZ */
> + __u64 flags;
> +#define __I915_EXEC3_UNKNOWN_FLAGS (~0)
> +
> + /** @fence_count: Number of fences in @timeline_fences array. */
> + __u64 fence_count;
> +
> + /**
> + * @timeline_fences: Pointer to an array of timeline fences.
> + *
> + * Timeline fences are of format struct drm_i915_gem_timeline_fence.
> + */
> + __u64 timeline_fences;
> +
> + /** @rsvd: Reserved, MBZ */
> + __u64 rsvd;
> +
> + /**
> + * @extensions: Zero-terminated chain of extensions.
> + *
> + * For future extensions. See struct i915_user_extension.
> + */
> + __u64 extensions;
> +};
> +
> struct drm_i915_gem_pin {
> /** Handle of the buffer to be pinned. */
> __u32 handle;
> @@ -2397,8 +2517,6 @@ struct drm_i915_gem_context_destroy {
> * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
> * returned in the outparam @id.
> *
> - * No flags are defined, with all bits reserved and must be zero.
> - *
> * An extension chain maybe provided, starting with @extensions, and terminated
> * by the @next_extension being 0. Currently, no extensions are defined.
> *
> @@ -2410,6 +2528,9 @@ struct drm_i915_gem_context_destroy {
> */
> struct drm_i915_gem_vm_control {
> __u64 extensions;
> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND (1u << 0)
> +#define I915_VM_CREATE_FLAGS_UNKNOWN \
> + (-(I915_VM_CREATE_FLAGS_USE_VM_BIND << 1))
> __u32 flags;
> __u32 vm_id;
> };
> @@ -3400,9 +3521,13 @@ struct drm_i915_gem_create_ext {
> *
> * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> * struct drm_i915_gem_create_ext_protected_content.
> + *
> + * For I915_GEM_CREATE_EXT_VM_PRIVATE usage see
> + * struct drm_i915_gem_create_ext_vm_private.
> */
> #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +#define I915_GEM_CREATE_EXT_VM_PRIVATE 2
> __u64 extensions;
> };
>
> @@ -3504,6 +3629,160 @@ struct drm_i915_gem_create_ext_protected_content {
> /* ID of the protected content session managed by i915 when PXP is active */
> #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
>
> +/**
> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> + * private to the specified VM.
> + *
> + * See struct drm_i915_gem_create_ext.
> + *
> + * By default, BOs can be mapped on multiple VMs and can also be dma-buf
> + * exported. Hence these BOs are referred to as Shared BOs.
> + * During each execbuf3 submission, the request fence must be added to the
> + * dma-resv fence list of all shared BOs mapped on the VM.
> + *
> + * Unlike Shared BOs, these VM private BOs can only be mapped on the VM they
> + * are private to and can't be dma-buf exported. All private BOs of a VM share
> + * the dma-resv object. Hence during each execbuf3 submission, they need only
> + * one dma-resv fence list updated. Thus, the fast path (where required
> + * mappings are already bound) submission latency is O(1) w.r.t the number of
> + * VM private BOs.
> + */
> +struct drm_i915_gem_create_ext_vm_private {
> + /** @base: Extension link. See struct i915_user_extension. */
> + struct i915_user_extension base;
> +
> + /** @vm_id: Id of the VM to which Object is private */
> + __u32 vm_id;
> +};
> +
> +/**
> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> + *
> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
> + * virtual address (VA) range to the section of an object that should be bound
> + * in the device page table of the specified address space (VM).
> + * The VA range specified must be unique (ie., not currently bound) and can
> + * be mapped to whole object or a section of the object (partial binding).
> + * Multiple VA mappings can be created to the same section of the object
> + * (aliasing).
> + *
> + * The @start, @offset and @length must be 4K page aligned. However the DG2
> + * and XEHPSDV has 64K page size for device local memory and has compact page
> + * table. On those platforms, for binding device local-memory objects, the
> + * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix
> + * the local memory 64K page and the system memory 4K page bindings in the same
> + * 2M range.
> + *
> + * Error code -EINVAL will be returned if @start, @offset and @length are not
> + * properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
> + * -ENOSPC will be returned if the VA range specified can't be reserved.
> + *
> + * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
> + * are not ordered. Furthermore, parts of the VM_BIND operation can be done
> + * asynchronously, if valid @fence is specified.
> + */
> +struct drm_i915_gem_vm_bind {
> + /** @vm_id: VM (address space) id to bind */
> + __u32 vm_id;
> +
> + /** @handle: Object handle */
> + __u32 handle;
> +
> + /** @start: Virtual Address start to bind */
> + __u64 start;
> +
> + /** @offset: Offset in object to bind */
> + __u64 offset;
> +
> + /** @length: Length of mapping to bind */
> + __u64 length;
> +
> + /**
> + * @flags: Currently reserved, MBZ.
> + *
> + * Note that @fence carries its own flags.
> + */
> + __u64 flags;
> +
> + /**
> + * @fence: Timeline fence for bind completion signaling.
> + *
> + * Timeline fence is of format struct drm_i915_gem_timeline_fence.
> + *
> + * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
> + * is invalid, and an error will be returned.
> + *
> + * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
> + * is not requested and binding is completed synchronously.
> + */
> + struct drm_i915_gem_timeline_fence fence;
> +
> + /**
> + * @extensions: Zero-terminated chain of extensions.
> + *
> + * For future extensions. See struct i915_user_extension.
> + */
> + __u64 extensions;
> +};
> +
> +/**
> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
> + *
> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
> + * address (VA) range that should be unbound from the device page table of the
> + * specified address space (VM). VM_UNBIND will force unbind the specified
> + * range from device page table without waiting for any GPU job to complete.
> + * It is UMDs responsibility to ensure the mapping is no longer in use before
> + * calling VM_UNBIND.
> + *
> + * If the specified mapping is not found, the ioctl will simply return without
> + * any error.
> + *
> + * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
> + * are not ordered. Furthermore, parts of the VM_UNBIND operation can be done
> + * asynchronously, if valid @fence is specified.
> + */
> +struct drm_i915_gem_vm_unbind {
> + /** @vm_id: VM (address space) id to bind */
> + __u32 vm_id;
> +
> + /** @rsvd: Reserved, MBZ */
> + __u32 rsvd;
> +
> + /** @start: Virtual Address start to unbind */
> + __u64 start;
> +
> + /** @length: Length of mapping to unbind */
> + __u64 length;
> +
> + /**
> + * @flags: Currently reserved, MBZ.
> + *
> + * Note that @fence carries its own flags.
> + */
> + __u64 flags;
> +
> + /**
> + * @fence: Timeline fence for unbind completion signaling.
> + *
> + * Timeline fence is of format struct drm_i915_gem_timeline_fence.
> + *
> + * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
> + * is invalid, and an error will be returned.
> + *
> + * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
> + * is not requested and unbinding is completed synchronously.
> + */
> + struct drm_i915_gem_timeline_fence fence;
> +
> + /**
> + * @extensions: Zero-terminated chain of extensions.
> + *
> + * For future extensions. See struct i915_user_extension.
> + */
> + __u64 extensions;
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
More information about the igt-dev
mailing list