[PATCH v1 08/14] drm/xe/uapi: Order sections

Lucas De Marchi lucas.demarchi at intel.com
Thu Dec 14 20:57:24 UTC 2023


On Thu, Dec 07, 2023 at 01:50:03PM +0000, Francois Dugast wrote:
>From: Rodrigo Vivi <rodrigo.vivi at intel.com>
>
>This patch doesn't modify any text or uapi entries themselves.
>It only move things up and down aiming a better organization of the uAPI.
>
>While fixing the documentation I noticed that query_engine_cs_cycles
>was in the middle of the memory_region info. Then I noticed more
>mismatches on the order when compared to the order of the IOCTL
>and QUERY entries declaration. So this patch aims to bring some
>order to the uAPI so it gets easier to read and the documentation
>generated in the end is able to tell a consistent story.
>
>Overall order:
>
>1. IOCTL definition
>2. Extension definition and helper structs
>3. IOCTL's Query structs in the order of the Query's entries.
>4. The rest of IOCTL structs in the order of IOCTL declaration.
>5. uEvents
>6. PMU
>
>Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
>Signed-off-by: Francois Dugast <francois.dugast at intel.com>
>---
> include/uapi/drm/xe_drm.h | 255 ++++++++++++++++++++------------------
> 1 file changed, 132 insertions(+), 123 deletions(-)
>
>diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
>index f9178fe024a5..b36f3bd53611 100644
>--- a/include/uapi/drm/xe_drm.h
>+++ b/include/uapi/drm/xe_drm.h
>@@ -12,19 +12,51 @@
> extern "C" {
> #endif
>
>-/* Please note that modifications to all structs defined here are
>+/*
>+ * Please note that modifications to all structs defined here are
>  * subject to backwards-compatibility constraints.
>+ *
>+ * Sections in this file are organized as follows:
>+ *   1. IOCTL definition
>+ *   2. Extension definition and helper structs
>+ *   3. IOCTL's Query structs in the order of the Query's entries.
>+ *   4. The rest of IOCTL structs in the order of IOCTL declaration.
>+ *   5. uEvents
>+ *   6. PMU
>  */
>
>-/**
>- * DOC: uevent generated by xe on it's pci node.
>+/*
>+ * xe specific ioctls.
>  *
>- * DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
>- * fails. The value supplied with the event is always "NEEDS_RESET".
>- * Additional information supplied is tile id and gt id of the gt unit for
>- * which reset has failed.
>+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
>+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
>+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
>  */
>-#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
>+#define DRM_XE_DEVICE_QUERY		0x00
>+#define DRM_XE_GEM_CREATE		0x01
>+#define DRM_XE_GEM_MMAP_OFFSET		0x02
>+#define DRM_XE_VM_CREATE		0x03
>+#define DRM_XE_VM_DESTROY		0x04
>+#define DRM_XE_VM_BIND			0x05
>+#define DRM_XE_EXEC_QUEUE_CREATE	0x06
>+#define DRM_XE_EXEC_QUEUE_DESTROY	0x07
>+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY	0x08
>+#define DRM_XE_EXEC			0x09
>+#define DRM_XE_WAIT_USER_FENCE		0x0a
>+/* Must be kept compact -- no holes */
>+
>+#define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
>+#define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
>+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
>+#define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
>+#define DRM_IOCTL_XE_VM_DESTROY			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
>+#define DRM_IOCTL_XE_VM_BIND			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
>+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
>+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY		DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
>+#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY	DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
>+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
>+#define DRM_IOCTL_XE_EXEC			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
>+#define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
>
> /**
>  * struct xe_user_extension - Base class for defining a chain of extensions
>@@ -90,38 +122,25 @@ struct xe_user_extension {
> 	__u32 pad;
> };
>
>-/*
>- * xe specific ioctls.
>- *
>- * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
>- * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
>- * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
>+/**
>+ * struct drm_xe_ext_set_property - XE set property extension
>  */
>-#define DRM_XE_DEVICE_QUERY		0x00
>-#define DRM_XE_GEM_CREATE		0x01
>-#define DRM_XE_GEM_MMAP_OFFSET		0x02
>-#define DRM_XE_VM_CREATE		0x03
>-#define DRM_XE_VM_DESTROY		0x04
>-#define DRM_XE_VM_BIND			0x05
>-#define DRM_XE_EXEC_QUEUE_CREATE	0x06
>-#define DRM_XE_EXEC_QUEUE_DESTROY	0x07
>-#define DRM_XE_EXEC_QUEUE_GET_PROPERTY	0x08
>-#define DRM_XE_EXEC			0x09
>-#define DRM_XE_WAIT_USER_FENCE		0x0a
>-/* Must be kept compact -- no holes */
>+struct drm_xe_ext_set_property {
>+	/** @base: base user extension */
>+	struct xe_user_extension base;
>
>-#define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
>-#define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
>-#define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
>-#define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
>-#define DRM_IOCTL_XE_VM_DESTROY			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
>-#define DRM_IOCTL_XE_VM_BIND			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
>-#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
>-#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY		DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
>-#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY	DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
>-#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
>-#define DRM_IOCTL_XE_EXEC			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
>-#define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
>+	/** @property: property to set */
>+	__u32 property;
>+
>+	/** @pad: MBZ */
>+	__u32 pad;
>+
>+	/** @value: property value */
>+	__u64 value;
>+
>+	/** @reserved: Reserved */
>+	__u64 reserved[2];
>+};
>
> /**
>  * struct drm_xe_engine_class_instance - instance of an engine class
>@@ -279,57 +298,6 @@ struct drm_xe_mem_region {
> 	__u64 reserved[6];
> };
>
>-/**
>- * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
>- *
>- * If a query is made with a struct drm_xe_device_query where .query is equal to
>- * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
>- * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
>- * .data points to this allocated structure.
>- *
>- * The query returns the engine cycles, which along with GT's @reference_clock,
>- * can be used to calculate the engine timestamp. In addition the
>- * query returns a set of cpu timestamps that indicate when the command
>- * streamer cycle count was captured.
>- */
>-struct drm_xe_query_engine_cycles {
>-	/**
>-	 * @eci: This is input by the user and is the engine for which command
>-	 * streamer cycles is queried.
>-	 */
>-	struct drm_xe_engine_class_instance eci;
>-
>-	/**
>-	 * @clockid: This is input by the user and is the reference clock id for
>-	 * CPU timestamp. For definition, see clock_gettime(2) and
>-	 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
>-	 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
>-	 */
>-	__s32 clockid;
>-
>-	/** @width: Width of the engine cycle counter in bits. */
>-	__u32 width;
>-
>-	/**
>-	 * @engine_cycles: Engine cycles as read from its register
>-	 * at 0x358 offset.
>-	 */
>-	__u64 engine_cycles;
>-
>-	/**
>-	 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
>-	 * reading the engine_cycles register using the reference clockid set by the
>-	 * user.
>-	 */
>-	__u64 cpu_timestamp;
>-
>-	/**
>-	 * @cpu_delta: Time delta in ns captured around reading the lower dword
>-	 * of the engine_cycles register.
>-	 */
>-	__u64 cpu_delta;
>-};
>-
> /**
>  * struct drm_xe_query_mem_regions - describe memory regions
>  *
>@@ -487,6 +455,57 @@ struct drm_xe_query_topology_mask {
> 	__u8 mask[];
> };
>
>+/**
>+ * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
>+ *
>+ * If a query is made with a struct drm_xe_device_query where .query is equal to
>+ * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
>+ * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
>+ * .data points to this allocated structure.
>+ *
>+ * The query returns the engine cycles, which along with GT's @reference_clock,
>+ * can be used to calculate the engine timestamp. In addition the
>+ * query returns a set of cpu timestamps that indicate when the command
>+ * streamer cycle count was captured.
>+ */
>+struct drm_xe_query_engine_cycles {
>+	/**
>+	 * @eci: This is input by the user and is the engine for which command
>+	 * streamer cycles is queried.
>+	 */
>+	struct drm_xe_engine_class_instance eci;
>+
>+	/**
>+	 * @clockid: This is input by the user and is the reference clock id for
>+	 * CPU timestamp. For definition, see clock_gettime(2) and
>+	 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
>+	 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
>+	 */
>+	__s32 clockid;
>+
>+	/** @width: Width of the engine cycle counter in bits. */
>+	__u32 width;
>+
>+	/**
>+	 * @engine_cycles: Engine cycles as read from its register
>+	 * at 0x358 offset.
>+	 */
>+	__u64 engine_cycles;
>+
>+	/**
>+	 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
>+	 * reading the engine_cycles register using the reference clockid set by the
>+	 * user.
>+	 */
>+	__u64 cpu_timestamp;
>+
>+	/**
>+	 * @cpu_delta: Time delta in ns captured around reading the lower dword
>+	 * of the engine_cycles register.
>+	 */
>+	__u64 cpu_delta;
>+};
>+
> /**
>  * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
>  * structure to query device information
>@@ -673,26 +692,6 @@ struct drm_xe_gem_mmap_offset {
> 	__u64 reserved[2];
> };
>
>-/**
>- * struct drm_xe_ext_set_property - XE set property extension
>- */
>-struct drm_xe_ext_set_property {
>-	/** @base: base user extension */
>-	struct xe_user_extension base;
>-
>-	/** @property: property to set */
>-	__u32 property;
>-
>-	/** @pad: MBZ */
>-	__u32 pad;
>-
>-	/** @value: property value */
>-	__u64 value;
>-
>-	/** @reserved: Reserved */
>-	__u64 reserved[2];
>-};
>-
> /**
>  * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
>  *
>@@ -984,6 +983,20 @@ struct drm_xe_exec_queue_create {
> 	__u64 reserved[2];
> };
>
>+/**
>+ * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
>+ */
>+struct drm_xe_exec_queue_destroy {
>+	/** @exec_queue_id: Exec queue ID */
>+	__u32 exec_queue_id;
>+
>+	/** @pad: MBZ */
>+	__u32 pad;
>+
>+	/** @reserved: Reserved */
>+	__u64 reserved[2];
>+};
>+
> /**
>  * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
>  *
>@@ -1008,20 +1021,6 @@ struct drm_xe_exec_queue_get_property {
> 	__u64 reserved[2];
> };
>
>-/**
>- * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
>- */
>-struct drm_xe_exec_queue_destroy {
>-	/** @exec_queue_id: Exec queue ID */
>-	__u32 exec_queue_id;
>-
>-	/** @pad: MBZ */
>-	__u32 pad;
>-
>-	/** @reserved: Reserved */
>-	__u64 reserved[2];
>-};
>-
> /**
>  * struct drm_xe_sync - sync object
>  *
>@@ -1200,6 +1199,16 @@ struct drm_xe_wait_user_fence {
> 	__u64 reserved[2];
> };
>
>+/**
>+ * DOC: uevent generated by xe on its pci node.
>+ *
>+ * DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
>+ * fails. The value supplied with the event is always "NEEDS_RESET".
>+ * Additional information supplied is tile id and gt id of the gt unit for
>+ * which reset has failed.
>+ */
>+#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"

Unrelated to this.... Aravind has a patch renaming this to make it
future proof. We will need to either rename it now, or remove and leave
its addition for later.


Reviewed-by: Lucas De Marchi <lucas.demarchi at intel.com>


Lucas De Marchi

> /**
>  * DOC: XE PMU event config IDs
>  *
>-- 
>2.34.1
>


More information about the Intel-xe mailing list