[igt-dev] [PATCH i-g-t v1 4/5] drm-uapi/nouveau: sync with drm-next

Lyude Paul lyude at redhat.com
Wed Nov 8 20:41:53 UTC 2023


Reviewed-by: Lyude Paul <lyude at redhat.com>

On Tue, 2023-11-07 at 18:43 +0100, Kamil Konieczny wrote:
> Sync with drm-next commit ("f2cab4b318ee8023f4ad640b906ae268942a7db4")
> 
> Cc: Lyude Paul <lyude at redhat.com>
> Signed-off-by: Kamil Konieczny <kamil.konieczny at linux.intel.com>
> ---
>  include/drm-uapi/nouveau_drm.h | 281 ++++++++++++++++++++++++++++++++-
>  1 file changed, 278 insertions(+), 3 deletions(-)
> 
> diff --git a/include/drm-uapi/nouveau_drm.h b/include/drm-uapi/nouveau_drm.h
> index 853a32743..0bade1592 100644
> --- a/include/drm-uapi/nouveau_drm.h
> +++ b/include/drm-uapi/nouveau_drm.h
> @@ -33,11 +33,61 @@
>  extern "C" {
>  #endif
>  
> +#define NOUVEAU_GETPARAM_PCI_VENDOR      3
> +#define NOUVEAU_GETPARAM_PCI_DEVICE      4
> +#define NOUVEAU_GETPARAM_BUS_TYPE        5
> +#define NOUVEAU_GETPARAM_FB_SIZE         8
> +#define NOUVEAU_GETPARAM_AGP_SIZE        9
> +#define NOUVEAU_GETPARAM_CHIPSET_ID      11
> +#define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
> +#define NOUVEAU_GETPARAM_GRAPH_UNITS     13
> +#define NOUVEAU_GETPARAM_PTIMER_TIME     14
> +#define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
> +#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
> +
> +/*
> + * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
> + *
> + * Query the maximum amount of IBs that can be pushed through a single
> + * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
> + * ioctl().
> + */
> +#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX   17
> +
> +struct drm_nouveau_getparam {
> +	__u64 param;
> +	__u64 value;
> +};
> +
> +struct drm_nouveau_channel_alloc {
> +	__u32     fb_ctxdma_handle;
> +	__u32     tt_ctxdma_handle;
> +
> +	__s32     channel;
> +	__u32     pushbuf_domains;
> +
> +	/* Notifier memory */
> +	__u32     notifier_handle;
> +
> +	/* DRM-enforced subchannel assignments */
> +	struct {
> +		__u32 handle;
> +		__u32 grclass;
> +	} subchan[8];
> +	__u32 nr_subchan;
> +};
> +
> +struct drm_nouveau_channel_free {
> +	__s32 channel;
> +};
> +
>  #define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
>  #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
>  #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
>  #define NOUVEAU_GEM_DOMAIN_MAPPABLE  (1 << 3)
>  #define NOUVEAU_GEM_DOMAIN_COHERENT  (1 << 4)
> +/* The BO will never be shared via import or export. */
> +#define NOUVEAU_GEM_DOMAIN_NO_SHARE  (1 << 5)
>  
>  #define NOUVEAU_GEM_TILE_COMP        0x00030000 /* nv50-only */
>  #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
> @@ -98,6 +148,7 @@ struct drm_nouveau_gem_pushbuf_push {
>  	__u32 pad;
>  	__u64 offset;
>  	__u64 length;
> +#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
>  };
>  
>  struct drm_nouveau_gem_pushbuf {
> @@ -126,16 +177,233 @@ struct drm_nouveau_gem_cpu_fini {
>  	__u32 handle;
>  };
>  
> -#define DRM_NOUVEAU_GETPARAM           0x00 /* deprecated */
> +/**
> + * struct drm_nouveau_sync - sync object
> + *
> + * This structure serves as synchronization mechanism for (potentially)
> + * asynchronous operations such as EXEC or VM_BIND.
> + */
> +struct drm_nouveau_sync {
> +	/**
> +	 * @flags: the flags for a sync object
> +	 *
> +	 * The first 8 bits are used to determine the type of the sync object.
> +	 */
> +	__u32 flags;
> +#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
> +#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
> +#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
> +	/**
> +	 * @handle: the handle of the sync object
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @timeline_value:
> +	 *
> +	 * The timeline point of the sync object in case the syncobj is of
> +	 * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
> +	 */
> +	__u64 timeline_value;
> +};
> +
> +/**
> + * struct drm_nouveau_vm_init - GPU VA space init structure
> + *
> + * Used to initialize the GPU's VA space for a user client, telling the kernel
> + * which portion of the VA space is managed by the UMD and kernel respectively.
> + *
> + * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
> + * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
> + * with -ENOSYS.
> + */
> +struct drm_nouveau_vm_init {
> +	/**
> +	 * @kernel_managed_addr: start address of the kernel managed VA space
> +	 * region
> +	 */
> +	__u64 kernel_managed_addr;
> +	/**
> +	 * @kernel_managed_size: size of the kernel managed VA space region in
> +	 * bytes
> +	 */
> +	__u64 kernel_managed_size;
> +};
> +
> +/**
> + * struct drm_nouveau_vm_bind_op - VM_BIND operation
> + *
> + * This structure represents a single VM_BIND operation. UMDs should pass
> + * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
> + */
> +struct drm_nouveau_vm_bind_op {
> +	/**
> +	 * @op: the operation type
> +	 */
> +	__u32 op;
> +/**
> + * @DRM_NOUVEAU_VM_BIND_OP_MAP:
> + *
> + * Map a GEM object to the GPU's VA space. Optionally, the
> + * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
> + * create sparse mappings for the given range.
> + */
> +#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
> +/**
> + * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
> + *
> + * Unmap an existing mapping in the GPU's VA space. If the region the mapping
> + * is located in is a sparse region, new sparse mappings are created where the
> + * unmapped (memory backed) mapping was mapped previously. To remove a sparse
> + * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
> + */
> +#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
> +	/**
> +	 * @flags: the flags for a &drm_nouveau_vm_bind_op
> +	 */
> +	__u32 flags;
> +/**
> + * @DRM_NOUVEAU_VM_BIND_SPARSE:
> + *
> + * Indicates that an allocated VA space region should be sparse.
> + */
> +#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
> +	/**
> +	 * @handle: the handle of the DRM GEM object to map
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @pad: 32 bit padding, should be 0
> +	 */
> +	__u32 pad;
> +	/**
> +	 * @addr:
> +	 *
> +	 * the address the VA space region or (memory backed) mapping should be mapped to
> +	 */
> +	__u64 addr;
> +	/**
> +	 * @bo_offset: the offset within the BO backing the mapping
> +	 */
> +	__u64 bo_offset;
> +	/**
> +	 * @range: the size of the requested mapping in bytes
> +	 */
> +	__u64 range;
> +};
> +
> +/**
> + * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
> + */
> +struct drm_nouveau_vm_bind {
> +	/**
> +	 * @op_count: the number of &drm_nouveau_vm_bind_op
> +	 */
> +	__u32 op_count;
> +	/**
> +	 * @flags: the flags for a &drm_nouveau_vm_bind ioctl
> +	 */
> +	__u32 flags;
> +/**
> + * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
> + *
> + * Indicates that the given VM_BIND operation should be executed asynchronously
> + * by the kernel.
> + *
> + * If this flag is not supplied the kernel executes the associated operations
> + * synchronously and doesn't accept any &drm_nouveau_sync objects.
> + */
> +#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
> +	/**
> +	 * @wait_count: the number of wait &drm_nouveau_syncs
> +	 */
> +	__u32 wait_count;
> +	/**
> +	 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
> +	 */
> +	__u32 sig_count;
> +	/**
> +	 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
> +	 */
> +	__u64 wait_ptr;
> +	/**
> +	 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
> +	 */
> +	__u64 sig_ptr;
> +	/**
> +	 * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
> +	 */
> +	__u64 op_ptr;
> +};
> +
> +/**
> + * struct drm_nouveau_exec_push - EXEC push operation
> + *
> + * This structure represents a single EXEC push operation. UMDs should pass an
> + * array of this structure via struct drm_nouveau_exec's &push_ptr field.
> + */
> +struct drm_nouveau_exec_push {
> +	/**
> +	 * @va: the virtual address of the push buffer mapping
> +	 */
> +	__u64 va;
> +	/**
> +	 * @va_len: the length of the push buffer mapping
> +	 */
> +	__u32 va_len;
> +	/**
> +	 * @flags: the flags for this push buffer mapping
> +	 */
> +	__u32 flags;
> +#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
> +};
> +
> +/**
> + * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
> + */
> +struct drm_nouveau_exec {
> +	/**
> +	 * @channel: the channel to execute the push buffer in
> +	 */
> +	__u32 channel;
> +	/**
> +	 * @push_count: the number of &drm_nouveau_exec_push ops
> +	 */
> +	__u32 push_count;
> +	/**
> +	 * @wait_count: the number of wait &drm_nouveau_syncs
> +	 */
> +	__u32 wait_count;
> +	/**
> +	 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
> +	 */
> +	__u32 sig_count;
> +	/**
> +	 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
> +	 */
> +	__u64 wait_ptr;
> +	/**
> +	 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
> +	 */
> +	__u64 sig_ptr;
> +	/**
> +	 * @push_ptr: pointer to &drm_nouveau_exec_push ops
> +	 */
> +	__u64 push_ptr;
> +};
> +
> +#define DRM_NOUVEAU_GETPARAM           0x00
>  #define DRM_NOUVEAU_SETPARAM           0x01 /* deprecated */
> -#define DRM_NOUVEAU_CHANNEL_ALLOC      0x02 /* deprecated */
> -#define DRM_NOUVEAU_CHANNEL_FREE       0x03 /* deprecated */
> +#define DRM_NOUVEAU_CHANNEL_ALLOC      0x02
> +#define DRM_NOUVEAU_CHANNEL_FREE       0x03
>  #define DRM_NOUVEAU_GROBJ_ALLOC        0x04 /* deprecated */
>  #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x05 /* deprecated */
>  #define DRM_NOUVEAU_GPUOBJ_FREE        0x06 /* deprecated */
>  #define DRM_NOUVEAU_NVIF               0x07
>  #define DRM_NOUVEAU_SVM_INIT           0x08
>  #define DRM_NOUVEAU_SVM_BIND           0x09
> +#define DRM_NOUVEAU_VM_INIT            0x10
> +#define DRM_NOUVEAU_VM_BIND            0x11
> +#define DRM_NOUVEAU_EXEC               0x12
>  #define DRM_NOUVEAU_GEM_NEW            0x40
>  #define DRM_NOUVEAU_GEM_PUSHBUF        0x41
>  #define DRM_NOUVEAU_GEM_CPU_PREP       0x42
> @@ -188,6 +456,10 @@ struct drm_nouveau_svm_bind {
>  #define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM               (1UL << 31)
>  
>  
> +#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
> +#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
> +#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
> +
>  #define DRM_IOCTL_NOUVEAU_SVM_INIT           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
>  #define DRM_IOCTL_NOUVEAU_SVM_BIND           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
>  
> @@ -197,6 +469,9 @@ struct drm_nouveau_svm_bind {
>  #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
>  #define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
>  
> +#define DRM_IOCTL_NOUVEAU_VM_INIT            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
> +#define DRM_IOCTL_NOUVEAU_VM_BIND            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
> +#define DRM_IOCTL_NOUVEAU_EXEC               DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
>  #if defined(__cplusplus)
>  }
>  #endif

-- 
Cheers,
 Lyude Paul (she/her)
 Software Engineer at Red Hat



More information about the igt-dev mailing list