[PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
Himal Prasad Ghimiray
himal.prasad.ghimiray at intel.com
Tue May 27 16:40:01 UTC 2025
Introduce the DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS ioctl to allow userspace
to query memory attributes of VMAs within a specified virtual address
range.
If num_vmas == 0 and vector_of_vma_mem_attr == NULL, the ioctl returns
the number of VMAs in the specified range.
If num_vmas > 0 and a valid user pointer is provided in
vector_of_vma_mem_attr, the ioctl fills the buffer with memory
attributes for each VMA.
This two-step interface allows userspace to first query the required
buffer size, then retrieve detailed attributes efficiently.
v2 (Matthew Brost)
- Use same ioctl to overload functionality
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 1 +
drivers/gpu/drm/xe/xe_vm.c | 87 ++++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm.h | 2 +-
include/uapi/drm/xe_drm.h | 82 ++++++++++++++++++++++++++++++++
4 files changed, 171 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b9791c614749..8c965d15c187 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -199,6 +199,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_QUERY_VMAS_ATTRS, xe_vm_query_vmas_attrs_ioctl, DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 4520e475399e..9611d7ca2bed 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2162,6 +2162,93 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static void xe_vm_query_vmas(struct xe_vm *vm, u32 *num_vmas, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+
+ lockdep_assert_held(&vm->lock);
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
+ (*num_vmas)++;
+}
+
+static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
+ u64 end, struct drm_xe_vma_mem_attr *mem_attrs)
+{
+ struct drm_gpuva *gpuva;
+ int i = 0;
+
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (i == *num_vmas)
+ return -EINVAL;
+
+ mem_attrs[i].start = xe_vma_start(vma);
+ mem_attrs[i].end = xe_vma_end(vma);
+ mem_attrs[i].atomic.val = vma->attr.atomic_access;
+ mem_attrs[i].pat_index.val = vma->attr.pat_index;
+ mem_attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
+ mem_attrs[i].preferred_mem_loc.migration_policy = vma->attr.preferred_loc.migration_policy;
+
+ i++;
+ }
+
+ if (i < (*num_vmas - 1))
+ *num_vmas = i;
+ return 0;
+}
+
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vma_mem_attr *mem_attrs;
+ struct drm_xe_vm_query_vmas_attr *args = data;
+ u64 __user *attrs_user = NULL;
+ struct xe_vm *vm;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe, args->num_vmas < 0))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ down_read(&vm->lock);
+
+ attrs_user = u64_to_user_ptr(args->vector_of_vma_mem_attr);
+
+ if (args->num_vmas == 0 && !attrs_user) {
+ xe_vm_query_vmas(vm, &args->num_vmas, args->start, args->start + args->range);
+ goto unlock_vm;
+ }
+
+ mem_attrs = kvmalloc_array(args->num_vmas, sizeof(struct drm_xe_vma_mem_attr),
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!mem_attrs) {
+ err = args->num_vmas > 1 ? -ENOBUFS : -ENOMEM;
+ goto unlock_vm;
+ }
+
+ err = get_mem_attrs(vm, &args->num_vmas, args->start,
+ args->start + args->range, mem_attrs);
+ if (err)
+ goto free_mem_attrs;
+
+ err = __copy_to_user(attrs_user, mem_attrs,
+ sizeof(struct drm_xe_vma_mem_attr) * args->num_vmas);
+
+free_mem_attrs:
+ kvfree(mem_attrs);
+unlock_vm:
+ up_read(&vm->lock);
+ return err;
+}
+
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
{
if (page_addr > xe_vma_end(vma) - 1 ||
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 340ac34936f4..b1e94b536c80 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -195,7 +195,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 03adfdc20dde..6ff9ff0c09dd 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -82,6 +82,7 @@ extern "C" {
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
* - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS
*/
/*
@@ -104,6 +105,7 @@ extern "C" {
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_VMAS_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -120,6 +122,7 @@ extern "C" {
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_VMAS_ATTRS, struct drm_xe_vm_query_vmas_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -2066,6 +2069,85 @@ struct drm_xe_madvise {
};
+struct drm_xe_vma_mem_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the vma */
+ __u64 start;
+
+ /** @size: end of the vma */
+ __u64 end;
+
+ struct {
+ struct {
+ /** @val: value of atomic operation*/
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ struct {
+ /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } purge_state_val;
+
+ struct {
+ /** @pat_index */
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ __u32 devmem_fd;
+
+ __u32 migration_policy;
+ } preferred_mem_loc;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_vmas_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * Get memory attributes to a virtual address range
+ */
+struct drm_xe_vm_query_vmas_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_vmas: number of vmas in range returned in @num_vmas */
+ __u32 num_vmas;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @size: size of the virtual address range */
+ __u64 range;
+
+ /**
+ * @vector_of_ops: userptr to array of struct
+ * drm_xe_vma_mem_attr
+ */
+ __u64 vector_of_vma_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
--
2.34.1
More information about the Intel-xe
mailing list