[igt-dev] [PATCH v2 07/64] drm-uapi/xe: Add missing DRM_ prefix in uAPI constants

Francois Dugast francois.dugast at intel.com
Fri Nov 3 14:43:02 UTC 2023


Align with commit ("Add missing DRM_ prefix in uAPI constants")

Signed-off-by: Francois Dugast <francois.dugast at intel.com>
---
 include/drm-uapi/xe_drm.h        | 136 +++++++++++++++----------------
 lib/intel_batchbuffer.c          |   8 +-
 lib/intel_blt.c                  |   2 +-
 lib/xe/xe_ioctl.c                |  22 ++---
 lib/xe/xe_query.c                |  14 ++--
 lib/xe/xe_query.h                |   4 +-
 lib/xe/xe_util.c                 |  10 +--
 lib/xe/xe_util.h                 |   4 +-
 tests/intel/xe_access_counter.c  |   4 +-
 tests/intel/xe_ccs.c             |   4 +-
 tests/intel/xe_copy_basic.c      |   4 +-
 tests/intel/xe_debugfs.c         |  14 ++--
 tests/intel/xe_exec_basic.c      |   8 +-
 tests/intel/xe_exec_fault_mode.c |   4 +-
 tests/intel/xe_exec_reset.c      |  20 ++---
 tests/intel/xe_exec_threads.c    |   4 +-
 tests/intel/xe_exercise_blt.c    |   4 +-
 tests/intel/xe_perf_pmu.c        |   8 +-
 tests/intel/xe_pm.c              |   2 +-
 tests/intel/xe_query.c           |  48 +++++------
 tests/intel/xe_vm.c              |  10 +--
 21 files changed, 167 insertions(+), 167 deletions(-)

diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index bd8c705ee..599ee980c 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -19,12 +19,12 @@ extern "C" {
 /**
  * DOC: uevent generated by xe on it's pci node.
  *
- * XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
+ * DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
  * fails. The value supplied with the event is always "NEEDS_RESET".
  * Additional information supplied is tile id and gt id of the gt unit for
  * which reset has failed.
  */
-#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
+#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
 
 /**
  * struct xe_user_extension - Base class for defining a chain of extensions
@@ -103,8 +103,8 @@ struct xe_user_extension {
 #define DRM_XE_VM_CREATE		0x03
 #define DRM_XE_VM_DESTROY		0x04
 #define DRM_XE_VM_BIND			0x05
-#define DRM_XE_EXEC_QUEUE_CREATE		0x06
-#define DRM_XE_EXEC_QUEUE_DESTROY		0x07
+#define DRM_XE_EXEC_QUEUE_CREATE	0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY	0x07
 #define DRM_XE_EXEC			0x08
 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY	0x09
 #define DRM_XE_WAIT_USER_FENCE		0x0a
@@ -150,14 +150,14 @@ struct drm_xe_engine_class_instance {
  * enum drm_xe_memory_class - Supported memory classes.
  */
 enum drm_xe_memory_class {
-	/** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
-	XE_MEM_REGION_CLASS_SYSMEM = 0,
+	/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
+	DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
 	/**
-	 * @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
+	 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
 	 * represents the memory that is local to the device, which we
 	 * call VRAM. Not valid on integrated platforms.
 	 */
-	XE_MEM_REGION_CLASS_VRAM
+	DRM_XE_MEM_REGION_CLASS_VRAM
 };
 
 /**
@@ -217,7 +217,7 @@ struct drm_xe_query_mem_region {
 	 * always equal the @total_size, since all of it will be CPU
 	 * accessible.
 	 *
-	 * Note this is only tracked for XE_MEM_REGION_CLASS_VRAM
+	 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
 	 * regions (for other types the value here will always equal
 	 * zero).
 	 */
@@ -229,7 +229,7 @@ struct drm_xe_query_mem_region {
 	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
 	 * accounting. Without this the value here will always equal
 	 * zero.  Note this is only currently tracked for
-	 * XE_MEM_REGION_CLASS_VRAM regions (for other types the value
+	 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
 	 * here will always be zero).
 	 */
 	__u64 cpu_visible_used;
@@ -322,36 +322,36 @@ struct drm_xe_query_config {
 	 * Device ID (lower 16 bits) and the device revision (next
 	 * 8 bits)
 	 */
-#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID	0
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID		0
 	/*
 	 * Flags describing the device configuration, see list below
 	 */
-#define XE_QUERY_CONFIG_FLAGS			1
+#define DRM_XE_QUERY_CONFIG_FLAGS			1
 	/*
 	 * Flag is set if the device has usable VRAM
 	 */
-	#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM		(0x1 << 0)
+	#define DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM	(0x1 << 0)
 	/*
 	 * Minimal memory alignment required by this device,
 	 * typically SZ_4K or SZ_64K
 	 */
-#define XE_QUERY_CONFIG_MIN_ALIGNMENT		2
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT		2
 	/*
 	 * Maximum bits of a virtual address
 	 */
-#define XE_QUERY_CONFIG_VA_BITS			3
+#define DRM_XE_QUERY_CONFIG_VA_BITS			3
 	/*
 	 * Total number of GTs for the entire device
 	 */
-#define XE_QUERY_CONFIG_GT_COUNT		4
+#define DRM_XE_QUERY_CONFIG_GT_COUNT			4
 	/*
 	 * Total number of accessible memory regions
 	 */
-#define XE_QUERY_CONFIG_MEM_REGION_COUNT	5
+#define DRM_XE_QUERY_CONFIG_MEM_REGION_COUNT		5
 	/*
 	 * Value of the highest available exec queue priority
 	 */
-#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY	6
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY	6
 	/** @info: array of elements containing the config info */
 	__u64 info[];
 };
@@ -365,9 +365,9 @@ struct drm_xe_query_config {
  * implementing graphics and/or media operations.
  */
 struct drm_xe_query_gt {
-#define XE_QUERY_GT_TYPE_MAIN		0
-#define XE_QUERY_GT_TYPE_REMOTE		1
-#define XE_QUERY_GT_TYPE_MEDIA		2
+#define DRM_XE_QUERY_GT_TYPE_MAIN		0
+#define DRM_XE_QUERY_GT_TYPE_REMOTE		1
+#define DRM_XE_QUERY_GT_TYPE_MEDIA		2
 	/** @type: GT type: Main, Remote, or Media */
 	__u16 type;
 	/** @gt_id: Unique ID of this GT within the PCI Device */
@@ -432,7 +432,7 @@ struct drm_xe_query_topology_mask {
 	 *   DSS_GEOMETRY    ff ff ff ff 00 00 00 00
 	 * means 32 DSS are available for geometry.
 	 */
-#define XE_TOPO_DSS_GEOMETRY	(1 << 0)
+#define DRM_XE_TOPO_DSS_GEOMETRY	(1 << 0)
 	/*
 	 * To query the mask of Dual Sub Slices (DSS) available for compute
 	 * operations. For example a query response containing the following
@@ -440,7 +440,7 @@ struct drm_xe_query_topology_mask {
 	 *   DSS_COMPUTE    ff ff ff ff 00 00 00 00
 	 * means 32 DSS are available for compute.
 	 */
-#define XE_TOPO_DSS_COMPUTE	(1 << 1)
+#define DRM_XE_TOPO_DSS_COMPUTE		(1 << 1)
 	/*
 	 * To query the mask of Execution Units (EU) available per Dual Sub
 	 * Slices (DSS). For example a query response containing the following
@@ -448,7 +448,7 @@ struct drm_xe_query_topology_mask {
 	 *   EU_PER_DSS    ff ff 00 00 00 00 00 00
 	 * means each DSS has 16 EU.
 	 */
-#define XE_TOPO_EU_PER_DSS	(1 << 2)
+#define DRM_XE_TOPO_EU_PER_DSS		(1 << 2)
 	/** @type: type of mask */
 	__u16 type;
 
@@ -584,8 +584,8 @@ struct drm_xe_gem_create {
 	 */
 	__u64 size;
 
-#define XE_GEM_CREATE_FLAG_DEFER_BACKING	(0x1 << 24)
-#define XE_GEM_CREATE_FLAG_SCANOUT		(0x1 << 25)
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING		(0x1 << 24)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT			(0x1 << 25)
 /*
  * When using VRAM as a possible placement, ensure that the corresponding VRAM
  * allocation will always use the CPU accessible part of VRAM. This is important
@@ -601,7 +601,7 @@ struct drm_xe_gem_create {
  * display surfaces, therefore the kernel requires setting this flag for such
  * objects, otherwise an error is thrown on small-bar systems.
  */
-#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM	(0x1 << 26)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM	(0x1 << 26)
 	/**
 	 * @flags: Flags, currently a mask of memory instances of where BO can
 	 * be placed
@@ -668,14 +668,14 @@ struct drm_xe_ext_set_property {
 };
 
 struct drm_xe_vm_create {
-#define XE_VM_EXTENSION_SET_PROPERTY	0
+#define DRM_XE_VM_EXTENSION_SET_PROPERTY	0
 	/** @extensions: Pointer to the first extension struct, if any */
 	__u64 extensions;
 
-#define DRM_XE_VM_CREATE_SCRATCH_PAGE	(0x1 << 0)
-#define DRM_XE_VM_CREATE_COMPUTE_MODE	(0x1 << 1)
-#define DRM_XE_VM_CREATE_ASYNC_DEFAULT	(0x1 << 2)
-#define DRM_XE_VM_CREATE_FAULT_MODE	(0x1 << 3)
+#define DRM_XE_VM_CREATE_SCRATCH_PAGE		(0x1 << 0)
+#define DRM_XE_VM_CREATE_COMPUTE_MODE		(0x1 << 1)
+#define DRM_XE_VM_CREATE_ASYNC_DEFAULT		(0x1 << 2)
+#define DRM_XE_VM_CREATE_FAULT_MODE		(0x1 << 3)
 	/** @flags: Flags */
 	__u32 flags;
 
@@ -734,29 +734,29 @@ struct drm_xe_vm_bind_op {
 	 */
 	__u64 tile_mask;
 
-#define XE_VM_BIND_OP_MAP		0x0
-#define XE_VM_BIND_OP_UNMAP		0x1
-#define XE_VM_BIND_OP_MAP_USERPTR	0x2
-#define XE_VM_BIND_OP_UNMAP_ALL		0x3
-#define XE_VM_BIND_OP_PREFETCH		0x4
+#define DRM_XE_VM_BIND_OP_MAP		0x0
+#define DRM_XE_VM_BIND_OP_UNMAP		0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR	0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL	0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH	0x4
 	/** @op: Bind operation to perform */
 	__u32 op;
 
-#define XE_VM_BIND_FLAG_READONLY	(0x1 << 0)
-#define XE_VM_BIND_FLAG_ASYNC		(0x1 << 1)
+#define DRM_XE_VM_BIND_FLAG_READONLY	(0x1 << 0)
+#define DRM_XE_VM_BIND_FLAG_ASYNC	(0x1 << 1)
 	/*
 	 * Valid on a faulting VM only, do the MAP operation immediately rather
 	 * than deferring the MAP to the page fault handler.
 	 */
-#define XE_VM_BIND_FLAG_IMMEDIATE	(0x1 << 2)
+#define DRM_XE_VM_BIND_FLAG_IMMEDIATE	(0x1 << 2)
 	/*
 	 * When the NULL flag is set, the page tables are setup with a special
 	 * bit which indicates writes are dropped and all reads return zero.  In
-	 * the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
+	 * the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP
 	 * operations, the BO handle MBZ, and the BO offset MBZ. This flag is
 	 * intended to implement VK sparse bindings.
 	 */
-#define XE_VM_BIND_FLAG_NULL		(0x1 << 3)
+#define DRM_XE_VM_BIND_FLAG_NULL	(0x1 << 3)
 	/** @flags: Bind flags */
 	__u32 flags;
 
@@ -823,14 +823,14 @@ struct drm_xe_exec_queue_set_property {
 	/** @exec_queue_id: Exec queue ID */
 	__u32 exec_queue_id;
 
-#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY		0
-#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1
-#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
-#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE		3
-#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT		4
-#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER		5
-#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY		6
-#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY	7
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY			0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE		3
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT		4
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER		5
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY		6
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY		7
 	/** @property: property to set */
 	__u32 property;
 
@@ -842,7 +842,7 @@ struct drm_xe_exec_queue_set_property {
 };
 
 struct drm_xe_exec_queue_create {
-#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
 	/** @extensions: Pointer to the first extension struct, if any */
 	__u64 extensions;
 
@@ -881,7 +881,7 @@ struct drm_xe_exec_queue_get_property {
 	/** @exec_queue_id: Exec queue ID */
 	__u32 exec_queue_id;
 
-#define XE_EXEC_QUEUE_GET_PROPERTY_BAN			0
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN	0
 	/** @property: property to get */
 	__u32 property;
 
@@ -1070,8 +1070,8 @@ struct drm_xe_vm_madvise {
 	 * For DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS usage, see enum
 	 * drm_xe_memory_class.
 	 */
-#define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS	0
-#define DRM_XE_VM_MADVISE_PREFERRED_GT		1
+#define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS		0
+#define DRM_XE_VM_MADVISE_PREFERRED_GT			1
 	/*
 	 * In this case lower 32 bits are mem class, upper 32 are GT.
 	 * Combination provides a single IOCTL plus migrate VMA to preferred
@@ -1082,25 +1082,25 @@ struct drm_xe_vm_madvise {
 	 * The CPU will do atomic memory operations to this VMA. Must be set on
 	 * some devices for atomics to behave correctly.
 	 */
-#define DRM_XE_VM_MADVISE_CPU_ATOMIC		3
+#define DRM_XE_VM_MADVISE_CPU_ATOMIC			3
 	/*
 	 * The device will do atomic memory operations to this VMA. Must be set
 	 * on some devices for atomics to behave correctly.
 	 */
-#define DRM_XE_VM_MADVISE_DEVICE_ATOMIC		4
+#define DRM_XE_VM_MADVISE_DEVICE_ATOMIC			4
 	/*
 	 * Priority WRT to eviction (moving from preferred memory location due
 	 * to memory pressure). The lower the priority, the more likely to be
 	 * evicted.
 	 */
-#define DRM_XE_VM_MADVISE_PRIORITY		5
-#define		DRM_XE_VMA_PRIORITY_LOW		0
+#define DRM_XE_VM_MADVISE_PRIORITY			5
+#define		DRM_XE_VMA_PRIORITY_LOW			0
 		/* Default */
-#define		DRM_XE_VMA_PRIORITY_NORMAL	1
+#define		DRM_XE_VMA_PRIORITY_NORMAL		1
 		/* Must be user with elevated privileges */
-#define		DRM_XE_VMA_PRIORITY_HIGH	2
+#define		DRM_XE_VMA_PRIORITY_HIGH		2
 	/* Pin the VMA in memory, must be user with elevated privileges */
-#define DRM_XE_VM_MADVISE_PIN			6
+#define DRM_XE_VM_MADVISE_PIN				6
 	/** @property: property to set */
 	__u32 property;
 
@@ -1121,7 +1121,7 @@ struct drm_xe_vm_madvise {
  * in 'struct perf_event_attr' as part of perf_event_open syscall to read a
  * particular event.
  *
- * For example to open the XE_PMU_INTERRUPTS(0):
+ * For example to open the DRM_XE_PMU_INTERRUPTS(0):
  *
  * .. code-block:: C
  *
@@ -1135,7 +1135,7 @@ struct drm_xe_vm_madvise {
  *	attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
  *	attr.use_clockid = 1;
  *	attr.clockid = CLOCK_MONOTONIC;
- *	attr.config = XE_PMU_INTERRUPTS(0);
+ *	attr.config = DRM_XE_PMU_INTERRUPTS(0);
  *
  *	fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
  */
@@ -1148,11 +1148,11 @@ struct drm_xe_vm_madvise {
 #define ___XE_PMU_OTHER(gt, x) \
 	(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
 
-#define XE_PMU_INTERRUPTS(gt)			___XE_PMU_OTHER(gt, 0)
-#define XE_PMU_RENDER_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 1)
-#define XE_PMU_COPY_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 2)
-#define XE_PMU_MEDIA_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 3)
-#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt)	___XE_PMU_OTHER(gt, 4)
+#define DRM_XE_PMU_INTERRUPTS(gt)		___XE_PMU_OTHER(gt, 0)
+#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt)	___XE_PMU_OTHER(gt, 1)
+#define DRM_XE_PMU_COPY_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 2)
+#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 3)
+#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt)	___XE_PMU_OTHER(gt, 4)
 
 #if defined(__cplusplus)
 }
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index c32d04302..eb47ede50 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1286,7 +1286,7 @@ static struct drm_xe_vm_bind_op *xe_alloc_bind_ops(struct intel_bb *ibb,
 {
 	struct drm_i915_gem_exec_object2 **objects = ibb->objects;
 	struct drm_xe_vm_bind_op *bind_ops, *ops;
-	bool set_obj = (op & 0xffff) == XE_VM_BIND_OP_MAP;
+	bool set_obj = (op & 0xffff) == DRM_XE_VM_BIND_OP_MAP;
 
 	bind_ops = calloc(ibb->num_objects, sizeof(*bind_ops));
 	igt_assert(bind_ops);
@@ -1325,8 +1325,8 @@ static void __unbind_xe_objects(struct intel_bb *ibb)
 
 	if (ibb->num_objects > 1) {
 		struct drm_xe_vm_bind_op *bind_ops;
-		uint32_t op = XE_VM_BIND_OP_UNMAP;
-		uint32_t flags = XE_VM_BIND_FLAG_ASYNC;
+		uint32_t op = DRM_XE_VM_BIND_OP_UNMAP;
+		uint32_t flags = DRM_XE_VM_BIND_FLAG_ASYNC;
 
 		bind_ops = xe_alloc_bind_ops(ibb, op, flags, 0);
 		xe_vm_bind_array(ibb->fd, ibb->vm_id, 0, bind_ops,
@@ -2357,7 +2357,7 @@ __xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync)
 
 	syncs[0].handle = syncobj_create(ibb->fd, 0);
 	if (ibb->num_objects > 1) {
-		bind_ops = xe_alloc_bind_ops(ibb, XE_VM_BIND_OP_MAP, XE_VM_BIND_FLAG_ASYNC, 0);
+		bind_ops = xe_alloc_bind_ops(ibb, DRM_XE_VM_BIND_OP_MAP, DRM_XE_VM_BIND_FLAG_ASYNC, 0);
 		xe_vm_bind_array(ibb->fd, ibb->vm_id, 0, bind_ops,
 				 ibb->num_objects, syncs, 1);
 		free(bind_ops);
diff --git a/lib/intel_blt.c b/lib/intel_blt.c
index 5b682c2b6..2edcd72f3 100644
--- a/lib/intel_blt.c
+++ b/lib/intel_blt.c
@@ -1804,7 +1804,7 @@ blt_create_object(const struct blt_copy_data *blt, uint32_t region,
 		uint64_t flags = region;
 
 		if (create_mapping && region != system_memory(blt->fd))
-			flags |= XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+			flags |= DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
 
 		size = ALIGN(size, xe_get_default_alignment(blt->fd));
 		handle = xe_bo_create_flags(blt->fd, 0, size, flags);
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 895e3bd4e..21a85225f 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -67,7 +67,7 @@ void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t exec_queue,
 			    uint32_t num_syncs)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, bo, 0, 0, 0,
-			    XE_VM_BIND_OP_UNMAP_ALL, XE_VM_BIND_FLAG_ASYNC,
+			    DRM_XE_VM_BIND_OP_UNMAP_ALL, DRM_XE_VM_BIND_FLAG_ASYNC,
 			    sync, num_syncs, 0, 0);
 }
 
@@ -130,7 +130,7 @@ void xe_vm_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
 		struct drm_xe_sync *sync, uint32_t num_syncs)
 {
 	__xe_vm_bind_assert(fd, vm, 0, bo, offset, addr, size,
-			    XE_VM_BIND_OP_MAP, 0, sync, num_syncs, 0, 0);
+			    DRM_XE_VM_BIND_OP_MAP, 0, sync, num_syncs, 0, 0);
 }
 
 void xe_vm_unbind(int fd, uint32_t vm, uint64_t offset,
@@ -138,7 +138,7 @@ void xe_vm_unbind(int fd, uint32_t vm, uint64_t offset,
 		  struct drm_xe_sync *sync, uint32_t num_syncs)
 {
 	__xe_vm_bind_assert(fd, vm, 0, 0, offset, addr, size,
-			    XE_VM_BIND_OP_UNMAP, 0, sync, num_syncs, 0, 0);
+			    DRM_XE_VM_BIND_OP_UNMAP, 0, sync, num_syncs, 0, 0);
 }
 
 void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue, uint64_t offset,
@@ -147,7 +147,7 @@ void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue, uint64_t off
 			  uint32_t region)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, 0, offset, addr, size,
-			    XE_VM_BIND_OP_PREFETCH, XE_VM_BIND_FLAG_ASYNC,
+			    DRM_XE_VM_BIND_OP_PREFETCH, DRM_XE_VM_BIND_FLAG_ASYNC,
 			    sync, num_syncs, region, 0);
 }
 
@@ -156,7 +156,7 @@ void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
 		      struct drm_xe_sync *sync, uint32_t num_syncs)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, bo, offset, addr, size,
-			    XE_VM_BIND_OP_MAP, XE_VM_BIND_FLAG_ASYNC, sync,
+			    DRM_XE_VM_BIND_OP_MAP, DRM_XE_VM_BIND_FLAG_ASYNC, sync,
 			    num_syncs, 0, 0);
 }
 
@@ -166,7 +166,7 @@ void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t exec_queue, uint32_t b
 			    uint32_t flags)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, bo, offset, addr, size,
-			    XE_VM_BIND_OP_MAP, XE_VM_BIND_FLAG_ASYNC | flags,
+			    DRM_XE_VM_BIND_OP_MAP, DRM_XE_VM_BIND_FLAG_ASYNC | flags,
 			    sync, num_syncs, 0, 0);
 }
 
@@ -175,7 +175,7 @@ void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t exec_queue,
 			      struct drm_xe_sync *sync, uint32_t num_syncs)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, 0, userptr, addr, size,
-			    XE_VM_BIND_OP_MAP_USERPTR, XE_VM_BIND_FLAG_ASYNC,
+			    DRM_XE_VM_BIND_OP_MAP_USERPTR, DRM_XE_VM_BIND_FLAG_ASYNC,
 			    sync, num_syncs, 0, 0);
 }
 
@@ -185,7 +185,7 @@ void xe_vm_bind_userptr_async_flags(int fd, uint32_t vm, uint32_t exec_queue,
 				    uint32_t num_syncs, uint32_t flags)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, 0, userptr, addr, size,
-			    XE_VM_BIND_OP_MAP_USERPTR, XE_VM_BIND_FLAG_ASYNC |
+			    DRM_XE_VM_BIND_OP_MAP_USERPTR, DRM_XE_VM_BIND_FLAG_ASYNC |
 			    flags, sync, num_syncs, 0, 0);
 }
 
@@ -194,7 +194,7 @@ void xe_vm_unbind_async(int fd, uint32_t vm, uint32_t exec_queue,
 			struct drm_xe_sync *sync, uint32_t num_syncs)
 {
 	__xe_vm_bind_assert(fd, vm, exec_queue, 0, offset, addr, size,
-			    XE_VM_BIND_OP_UNMAP, XE_VM_BIND_FLAG_ASYNC, sync,
+			    DRM_XE_VM_BIND_OP_UNMAP, DRM_XE_VM_BIND_FLAG_ASYNC, sync,
 			    num_syncs, 0, 0);
 }
 
@@ -208,13 +208,13 @@ static void __xe_vm_bind_sync(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
 void xe_vm_bind_sync(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
 		     uint64_t addr, uint64_t size)
 {
-	__xe_vm_bind_sync(fd, vm, bo, offset, addr, size, XE_VM_BIND_OP_MAP);
+	__xe_vm_bind_sync(fd, vm, bo, offset, addr, size, DRM_XE_VM_BIND_OP_MAP);
 }
 
 void xe_vm_unbind_sync(int fd, uint32_t vm, uint64_t offset,
 		       uint64_t addr, uint64_t size)
 {
-	__xe_vm_bind_sync(fd, vm, 0, offset, addr, size, XE_VM_BIND_OP_UNMAP);
+	__xe_vm_bind_sync(fd, vm, 0, offset, addr, size, DRM_XE_VM_BIND_OP_UNMAP);
 }
 
 void xe_vm_destroy(int fd, uint32_t vm)
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index cd0e29dcc..bb54f0fd6 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -247,9 +247,9 @@ struct xe_device *xe_device_get(int fd)
 
 	xe_dev->fd = fd;
 	xe_dev->config = xe_query_config_new(fd);
-	xe_dev->number_gt = xe_dev->config->info[XE_QUERY_CONFIG_GT_COUNT];
-	xe_dev->va_bits = xe_dev->config->info[XE_QUERY_CONFIG_VA_BITS];
-	xe_dev->dev_id = xe_dev->config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
+	xe_dev->number_gt = xe_dev->config->info[DRM_XE_QUERY_CONFIG_GT_COUNT];
+	xe_dev->va_bits = xe_dev->config->info[DRM_XE_QUERY_CONFIG_VA_BITS];
+	xe_dev->dev_id = xe_dev->config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
 	xe_dev->gt_list = xe_query_gt_list_new(fd);
 	xe_dev->memory_regions = __memory_regions(xe_dev->gt_list);
 	xe_dev->hw_engines = xe_query_engines_new(fd, &xe_dev->number_hw_engines);
@@ -413,7 +413,7 @@ static uint64_t __xe_visible_vram_size(int fd, int gt)
  * @gt: gt id
  *
  * Returns vram memory bitmask for xe device @fd and @gt id, with
- * XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM also set, to ensure that CPU access is
+ * DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM also set, to ensure that CPU access is
  * possible.
  */
 uint64_t visible_vram_memory(int fd, int gt)
@@ -423,7 +423,7 @@ uint64_t visible_vram_memory(int fd, int gt)
 	 * has landed.
 	 */
 	if (__xe_visible_vram_size(fd, gt))
-		return vram_memory(fd, gt) | XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+		return vram_memory(fd, gt) | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
 	else
 		return vram_memory(fd, gt); /* older kernel */
 }
@@ -448,7 +448,7 @@ uint64_t vram_if_possible(int fd, int gt)
  *
  * Returns vram memory bitmask for xe device @fd and @gt id or system memory if
  * there's no vram memory available for @gt. Also attaches the
- * XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM to ensure that CPU access is possible
+ * DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM to ensure that CPU access is possible
  * when using vram.
  */
 uint64_t visible_vram_if_possible(int fd, int gt)
@@ -462,7 +462,7 @@ uint64_t visible_vram_if_possible(int fd, int gt)
 	 * has landed.
 	 */
 	if (__xe_visible_vram_size(fd, gt))
-		return vram ? vram | XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM : system_memory;
+		return vram ? vram | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM : system_memory;
 	else
 		return vram ? vram : system_memory; /* older kernel */
 }
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index 7c0849bc1..c63b97044 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -74,8 +74,8 @@ struct xe_device {
 	for (uint64_t __i = 0; __i < igt_fls(__memreg); __i++) \
 		for_if(__r = (__memreg & (1ull << __i)))
 
-#define XE_IS_CLASS_SYSMEM(__region) ((__region)->mem_class == XE_MEM_REGION_CLASS_SYSMEM)
-#define XE_IS_CLASS_VRAM(__region) ((__region)->mem_class == XE_MEM_REGION_CLASS_VRAM)
+#define XE_IS_CLASS_SYSMEM(__region) ((__region)->mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM)
+#define XE_IS_CLASS_VRAM(__region) ((__region)->mem_class == DRM_XE_MEM_REGION_CLASS_VRAM)
 
 unsigned int xe_number_gt(int fd);
 uint64_t all_memory_regions(int fd);
diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
index 5fa4d4610..780125f92 100644
--- a/lib/xe/xe_util.c
+++ b/lib/xe/xe_util.c
@@ -134,12 +134,12 @@ static struct drm_xe_vm_bind_op *xe_alloc_bind_ops(struct igt_list_head *obj_lis
 		ops = &bind_ops[i];
 
 		if (obj->bind_op == XE_OBJECT_BIND) {
-			op = XE_VM_BIND_OP_MAP;
-			flags = XE_VM_BIND_FLAG_ASYNC;
+			op = DRM_XE_VM_BIND_OP_MAP;
+			flags = DRM_XE_VM_BIND_FLAG_ASYNC;
 			ops->obj = obj->handle;
 		} else {
-			op = XE_VM_BIND_OP_UNMAP;
-			flags = XE_VM_BIND_FLAG_ASYNC;
+			op = DRM_XE_VM_BIND_OP_UNMAP;
+			flags = DRM_XE_VM_BIND_FLAG_ASYNC;
 		}
 
 		ops->op = op;
@@ -211,7 +211,7 @@ void xe_bind_unbind_async(int xe, uint32_t vm, uint32_t bind_engine,
 		  tabsyncs[0].handle, tabsyncs[1].handle);
 
 	if (num_binds == 1) {
-		if ((bind_ops[0].op & 0xffff) == XE_VM_BIND_OP_MAP)
+		if ((bind_ops[0].op & 0xffff) == DRM_XE_VM_BIND_OP_MAP)
 			xe_vm_bind_async(xe, vm, bind_engine, bind_ops[0].obj, 0,
 					 bind_ops[0].addr, bind_ops[0].range,
 					 syncs, num_syncs);
diff --git a/lib/xe/xe_util.h b/lib/xe/xe_util.h
index e97d236b8..21b312071 100644
--- a/lib/xe/xe_util.h
+++ b/lib/xe/xe_util.h
@@ -13,9 +13,9 @@
 #include <xe_drm.h>
 
 #define XE_IS_SYSMEM_MEMORY_REGION(fd, region) \
-	(xe_region_class(fd, region) == XE_MEM_REGION_CLASS_SYSMEM)
+	(xe_region_class(fd, region) == DRM_XE_MEM_REGION_CLASS_SYSMEM)
 #define XE_IS_VRAM_MEMORY_REGION(fd, region) \
-	(xe_region_class(fd, region) == XE_MEM_REGION_CLASS_VRAM)
+	(xe_region_class(fd, region) == DRM_XE_MEM_REGION_CLASS_VRAM)
 
 struct igt_collection *
 __xe_get_memory_region_set(int xe, uint32_t *mem_regions_type, int num_regions);
diff --git a/tests/intel/xe_access_counter.c b/tests/intel/xe_access_counter.c
index b738ebc86..8966bfc9c 100644
--- a/tests/intel/xe_access_counter.c
+++ b/tests/intel/xe_access_counter.c
@@ -47,8 +47,8 @@ igt_main
 
 		struct drm_xe_ext_set_property ext = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY,
 			.value = SIZE_64M + 1,
 		};
 
diff --git a/tests/intel/xe_ccs.c b/tests/intel/xe_ccs.c
index 876c239e4..bb844b641 100644
--- a/tests/intel/xe_ccs.c
+++ b/tests/intel/xe_ccs.c
@@ -634,8 +634,8 @@ igt_main_args("bf:pst:W:H:", NULL, help_str, opt_handler, NULL)
 		xe_device_get(xe);
 
 		set = xe_get_memory_region_set(xe,
-					       XE_MEM_REGION_CLASS_SYSMEM,
-					       XE_MEM_REGION_CLASS_VRAM);
+					       DRM_XE_MEM_REGION_CLASS_SYSMEM,
+					       DRM_XE_MEM_REGION_CLASS_VRAM);
 	}
 
 	igt_describe("Check block-copy uncompressed blit");
diff --git a/tests/intel/xe_copy_basic.c b/tests/intel/xe_copy_basic.c
index fe78ac50f..1dafbb276 100644
--- a/tests/intel/xe_copy_basic.c
+++ b/tests/intel/xe_copy_basic.c
@@ -164,8 +164,8 @@ igt_main
 		fd = drm_open_driver(DRIVER_XE);
 		xe_device_get(fd);
 		set = xe_get_memory_region_set(fd,
-					       XE_MEM_REGION_CLASS_SYSMEM,
-					       XE_MEM_REGION_CLASS_VRAM);
+					       DRM_XE_MEM_REGION_CLASS_SYSMEM,
+					       DRM_XE_MEM_REGION_CLASS_VRAM);
 	}
 
 	for (int i = 0; i < ARRAY_SIZE(size); i++) {
diff --git a/tests/intel/xe_debugfs.c b/tests/intel/xe_debugfs.c
index e5bbb364c..9ffc6f6d8 100644
--- a/tests/intel/xe_debugfs.c
+++ b/tests/intel/xe_debugfs.c
@@ -91,20 +91,20 @@ test_base(int fd, struct drm_xe_query_config *config)
 
 	igt_assert(config);
 	sprintf(reference, "devid 0x%llx",
-			config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff);
+			config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff);
 	igt_assert(igt_debugfs_search(fd, "info", reference));
 
 	sprintf(reference, "revid %lld",
-			config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16);
+			config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16);
 	igt_assert(igt_debugfs_search(fd, "info", reference));
 
-	sprintf(reference, "is_dgfx %s", config->info[XE_QUERY_CONFIG_FLAGS] &
-		XE_QUERY_CONFIG_FLAGS_HAS_VRAM ? "yes" : "no");
+	sprintf(reference, "is_dgfx %s", config->info[DRM_XE_QUERY_CONFIG_FLAGS] &
+		DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM ? "yes" : "no");
 
 	igt_assert(igt_debugfs_search(fd, "info", reference));
 
 	if (!AT_LEAST_GEN(devid, 20)) {
-		switch (config->info[XE_QUERY_CONFIG_VA_BITS]) {
+		switch (config->info[DRM_XE_QUERY_CONFIG_VA_BITS]) {
 		case 48:
 			val = 3;
 			break;
@@ -121,13 +121,13 @@ test_base(int fd, struct drm_xe_query_config *config)
 	igt_assert(igt_debugfs_search(fd, "info", reference));
 
 	igt_assert(igt_debugfs_exists(fd, "gt0", O_RDONLY));
-	if (config->info[XE_QUERY_CONFIG_GT_COUNT] > 1)
+	if (config->info[DRM_XE_QUERY_CONFIG_GT_COUNT] > 1)
 		igt_assert(igt_debugfs_exists(fd, "gt1", O_RDONLY));
 
 	igt_assert(igt_debugfs_exists(fd, "gtt_mm", O_RDONLY));
 	igt_debugfs_dump(fd, "gtt_mm");
 
-	if (config->info[XE_QUERY_CONFIG_FLAGS] & XE_QUERY_CONFIG_FLAGS_HAS_VRAM) {
+	if (config->info[DRM_XE_QUERY_CONFIG_FLAGS] & DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM) {
 		igt_assert(igt_debugfs_exists(fd, "vram0_mm", O_RDONLY));
 		igt_debugfs_dump(fd, "vram0_mm");
 	}
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index 8dbce524d..232ddde8e 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -138,7 +138,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 
 		bo_flags = visible_vram_if_possible(fd, eci->gt_id);
 		if (flags & DEFER_ALLOC)
-			bo_flags |= XE_GEM_CREATE_FLAG_DEFER_BACKING;
+			bo_flags |= DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING;
 
 		bo = xe_bo_create_flags(fd, n_vm == 1 ? vm[0] : 0,
 					bo_size, bo_flags);
@@ -172,9 +172,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		if (flags & SPARSE)
 			__xe_vm_bind_assert(fd, vm[i], bind_exec_queues[i],
 					    0, 0, sparse_addr[i], bo_size,
-					    XE_VM_BIND_OP_MAP,
-					    XE_VM_BIND_FLAG_ASYNC |
-					    XE_VM_BIND_FLAG_NULL, sync,
+					    DRM_XE_VM_BIND_OP_MAP,
+					    DRM_XE_VM_BIND_FLAG_ASYNC |
+					    DRM_XE_VM_BIND_FLAG_NULL, sync,
 					    1, 0, 0);
 	}
 
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index 92d8690a1..92359d1a7 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -175,12 +175,12 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		if (bo)
 			xe_vm_bind_async_flags(fd, vm, bind_exec_queues[0], bo, 0,
 					       addr, bo_size, sync, 1,
-					       XE_VM_BIND_FLAG_IMMEDIATE);
+					       DRM_XE_VM_BIND_FLAG_IMMEDIATE);
 		else
 			xe_vm_bind_userptr_async_flags(fd, vm, bind_exec_queues[0],
 						       to_user_pointer(data),
 						       addr, bo_size, sync, 1,
-						       XE_VM_BIND_FLAG_IMMEDIATE);
+						       DRM_XE_VM_BIND_FLAG_IMMEDIATE);
 	} else {
 		if (bo)
 			xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr,
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index 44248776b..39647b736 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -187,14 +187,14 @@ test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
 	for (i = 0; i < n_exec_queues; i++) {
 		struct drm_xe_ext_set_property job_timeout = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT,
 			.value = 50,
 		};
 		struct drm_xe_ext_set_property preempt_timeout = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
 			.value = 1000,
 		};
 		struct drm_xe_exec_queue_create create = {
@@ -374,14 +374,14 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
 	for (i = 0; i < n_exec_queues; i++) {
 		struct drm_xe_ext_set_property job_timeout = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT,
 			.value = 50,
 		};
 		struct drm_xe_ext_set_property preempt_timeout = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
 			.value = 1000,
 		};
 		uint64_t ext = 0;
@@ -542,8 +542,8 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
 	for (i = 0; i < n_exec_queues; i++) {
 		struct drm_xe_ext_set_property preempt_timeout = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
 			.value = 1000,
 		};
 		uint64_t ext = 0;
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index a0c96d08d..b814dcdf5 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -520,8 +520,8 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 	for (i = 0; i < n_exec_queues; i++) {
 		struct drm_xe_ext_set_property preempt_timeout = {
 			.base.next_extension = 0,
-			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
-			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
+			.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+			.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
 			.value = 1000,
 		};
 		uint64_t ext = to_user_pointer(&preempt_timeout);
diff --git a/tests/intel/xe_exercise_blt.c b/tests/intel/xe_exercise_blt.c
index 2f349b16d..df774130f 100644
--- a/tests/intel/xe_exercise_blt.c
+++ b/tests/intel/xe_exercise_blt.c
@@ -358,8 +358,8 @@ igt_main_args("b:pst:W:H:", NULL, help_str, opt_handler, NULL)
 		xe_device_get(xe);
 
 		set = xe_get_memory_region_set(xe,
-					       XE_MEM_REGION_CLASS_SYSMEM,
-					       XE_MEM_REGION_CLASS_VRAM);
+					       DRM_XE_MEM_REGION_CLASS_SYSMEM,
+					       DRM_XE_MEM_REGION_CLASS_VRAM);
 	}
 
 	igt_describe("Check fast-copy blit");
diff --git a/tests/intel/xe_perf_pmu.c b/tests/intel/xe_perf_pmu.c
index 0b25a859f..a0dd30e50 100644
--- a/tests/intel/xe_perf_pmu.c
+++ b/tests/intel/xe_perf_pmu.c
@@ -51,15 +51,15 @@ static uint64_t engine_group_get_config(int gt, int class)
 
 	switch (class) {
 	case DRM_XE_ENGINE_CLASS_COPY:
-		config = XE_PMU_COPY_GROUP_BUSY(gt);
+		config = DRM_XE_PMU_COPY_GROUP_BUSY(gt);
 		break;
 	case DRM_XE_ENGINE_CLASS_RENDER:
 	case DRM_XE_ENGINE_CLASS_COMPUTE:
-		config = XE_PMU_RENDER_GROUP_BUSY(gt);
+		config = DRM_XE_PMU_RENDER_GROUP_BUSY(gt);
 		break;
 	case DRM_XE_ENGINE_CLASS_VIDEO_DECODE:
 	case DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE:
-		config = XE_PMU_MEDIA_GROUP_BUSY(gt);
+		config = DRM_XE_PMU_MEDIA_GROUP_BUSY(gt);
 		break;
 	}
 
@@ -112,7 +112,7 @@ static void test_any_engine_busyness(int fd, struct drm_xe_engine_class_instance
 	sync[0].handle = syncobj_create(fd, 0);
 	xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
 
-	pmu_fd = open_pmu(fd, XE_PMU_ANY_ENGINE_GROUP_BUSY(eci->gt_id));
+	pmu_fd = open_pmu(fd, DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(eci->gt_id));
 	idle = pmu_read(pmu_fd);
 	igt_assert(!idle);
 
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index b2976ec84..d07ed4535 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -400,7 +400,7 @@ static void test_vram_d3cold_threshold(device_t device, int sysfs_fd)
 	igt_assert_eq(igt_ioctl(device.fd_xe, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
 
 	for (i = 0; i < mem_usage->num_regions; i++) {
-		if (mem_usage->regions[i].mem_class == XE_MEM_REGION_CLASS_VRAM) {
+		if (mem_usage->regions[i].mem_class == DRM_XE_MEM_REGION_CLASS_VRAM) {
 			vram_used_mb +=  (mem_usage->regions[i].used / (1024 * 1024));
 			vram_total_mb += (mem_usage->regions[i].total_size / (1024 * 1024));
 		}
diff --git a/tests/intel/xe_query.c b/tests/intel/xe_query.c
index dc19a9d32..c8d886bfe 100644
--- a/tests/intel/xe_query.c
+++ b/tests/intel/xe_query.c
@@ -163,9 +163,9 @@ void process_hwconfig(void *data, uint32_t len)
 const char *get_topo_name(int value)
 {
 	switch(value) {
-	case XE_TOPO_DSS_GEOMETRY: return "DSS_GEOMETRY";
-	case XE_TOPO_DSS_COMPUTE: return "DSS_COMPUTE";
-	case XE_TOPO_EU_PER_DSS: return "EU_PER_DSS";
+	case DRM_XE_TOPO_DSS_GEOMETRY: return "DSS_GEOMETRY";
+	case DRM_XE_TOPO_DSS_COMPUTE: return "DSS_COMPUTE";
+	case DRM_XE_TOPO_EU_PER_DSS: return "EU_PER_DSS";
 	}
 	return "??";
 }
@@ -221,9 +221,9 @@ test_query_mem_usage(int fd)
 	for (i = 0; i < mem_usage->num_regions; i++) {
 		igt_info("mem region %d: %s\t%#llx / %#llx\n", i,
 			mem_usage->regions[i].mem_class ==
-			XE_MEM_REGION_CLASS_SYSMEM ? "SYSMEM"
+			DRM_XE_MEM_REGION_CLASS_SYSMEM ? "SYSMEM"
 			:mem_usage->regions[i].mem_class ==
-			XE_MEM_REGION_CLASS_VRAM ? "VRAM" : "?",
+			DRM_XE_MEM_REGION_CLASS_VRAM ? "VRAM" : "?",
 			mem_usage->regions[i].used,
 			mem_usage->regions[i].total_size
 		);
@@ -359,27 +359,27 @@ test_query_config(int fd)
 	query.data = to_user_pointer(config);
 	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
 
-	igt_info("XE_QUERY_CONFIG_REV_AND_DEVICE_ID\t%#llx\n",
-		config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID]);
+	igt_info("DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID\t%#llx\n",
+		config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID]);
 	igt_info("  REV_ID\t\t\t\t%#llx\n",
-		config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16);
+		config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16);
 	igt_info("  DEVICE_ID\t\t\t\t%#llx\n",
-		config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff);
-	igt_info("XE_QUERY_CONFIG_FLAGS\t\t\t%#llx\n",
-		config->info[XE_QUERY_CONFIG_FLAGS]);
-	igt_info("  XE_QUERY_CONFIG_FLAGS_HAS_VRAM\t%s\n",
-		config->info[XE_QUERY_CONFIG_FLAGS] &
-		XE_QUERY_CONFIG_FLAGS_HAS_VRAM ? "ON":"OFF");
-	igt_info("XE_QUERY_CONFIG_MIN_ALIGNMENT\t\t%#llx\n",
-		config->info[XE_QUERY_CONFIG_MIN_ALIGNMENT]);
-	igt_info("XE_QUERY_CONFIG_VA_BITS\t\t\t%llu\n",
-		config->info[XE_QUERY_CONFIG_VA_BITS]);
-	igt_info("XE_QUERY_CONFIG_GT_COUNT\t\t%llu\n",
-		config->info[XE_QUERY_CONFIG_GT_COUNT]);
-	igt_info("XE_QUERY_CONFIG_MEM_REGION_COUNT\t%llu\n",
-		config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT]);
-	igt_info("XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY\t%llu\n",
-		config->info[XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY]);
+		config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff);
+	igt_info("DRM_XE_QUERY_CONFIG_FLAGS\t\t\t%#llx\n",
+		config->info[DRM_XE_QUERY_CONFIG_FLAGS]);
+	igt_info("  DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM\t%s\n",
+		config->info[DRM_XE_QUERY_CONFIG_FLAGS] &
+		DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM ? "ON":"OFF");
+	igt_info("DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT\t\t%#llx\n",
+		config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT]);
+	igt_info("DRM_XE_QUERY_CONFIG_VA_BITS\t\t\t%llu\n",
+		config->info[DRM_XE_QUERY_CONFIG_VA_BITS]);
+	igt_info("DRM_XE_QUERY_CONFIG_GT_COUNT\t\t%llu\n",
+		config->info[DRM_XE_QUERY_CONFIG_GT_COUNT]);
+	igt_info("DRM_XE_QUERY_CONFIG_MEM_REGION_COUNT\t%llu\n",
+		config->info[DRM_XE_QUERY_CONFIG_MEM_REGION_COUNT]);
+	igt_info("DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY\t%llu\n",
+		config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY]);
 	dump_hex_debug(config, query.size);
 
 	free(config);
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index dd3302337..73dc23771 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -316,7 +316,7 @@ static void userptr_invalid(int fd)
 	vm = xe_vm_create(fd, 0, 0);
 	munmap(data, size);
 	ret = __xe_vm_bind(fd, vm, 0, 0, to_user_pointer(data), 0x40000,
-			   size, XE_VM_BIND_OP_MAP_USERPTR, 0, NULL, 0, 0, 0);
+			   size, DRM_XE_VM_BIND_OP_MAP_USERPTR, 0, NULL, 0, 0, 0);
 	igt_assert(ret == -EFAULT);
 
 	xe_vm_destroy(fd, vm);
@@ -752,8 +752,8 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
 		bind_ops[i].range = bo_size;
 		bind_ops[i].addr = addr;
 		bind_ops[i].tile_mask = 0x1 << eci->gt_id;
-		bind_ops[i].op = XE_VM_BIND_OP_MAP;
-		bind_ops[i].flags = XE_VM_BIND_FLAG_ASYNC;
+		bind_ops[i].op = DRM_XE_VM_BIND_OP_MAP;
+		bind_ops[i].flags = DRM_XE_VM_BIND_FLAG_ASYNC;
 		bind_ops[i].region = 0;
 		bind_ops[i].reserved[0] = 0;
 		bind_ops[i].reserved[1] = 0;
@@ -797,8 +797,8 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
 
 	for (i = 0; i < n_execs; ++i) {
 		bind_ops[i].obj = 0;
-		bind_ops[i].op = XE_VM_BIND_OP_UNMAP;
-		bind_ops[i].flags = XE_VM_BIND_FLAG_ASYNC;
+		bind_ops[i].op = DRM_XE_VM_BIND_OP_UNMAP;
+		bind_ops[i].flags = DRM_XE_VM_BIND_FLAG_ASYNC;
 	}
 
 	syncobj_reset(fd, &sync[0].handle, 1);
-- 
2.34.1



More information about the igt-dev mailing list