[PATCH 15/17] drm/xe/oa/uapi: OA buffer mmap

Umesh Nerlige Ramappa umesh.nerlige.ramappa at intel.com
Sat Dec 23 02:39:14 UTC 2023


On Thu, Dec 07, 2023 at 10:43:27PM -0800, Ashutosh Dixit wrote:
>Allow the OA buffer to be mmap'd to userspace. This is needed for the MMIO
>trigger use case. Even otherwise, with whitelisted OA head/tail ptr
>registers, userspace can receive/interpret OA data from the mmap'd buffer
>without issuing read()'s on the OA stream fd.
>
>Suggested-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>
>Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
>---
> drivers/gpu/drm/xe/xe_oa.c | 53 ++++++++++++++++++++++++++++++++++++++
> 1 file changed, 53 insertions(+)
>
>diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
>index 42f32d4359f2c..97779cbb83ee8 100644
>--- a/drivers/gpu/drm/xe/xe_oa.c
>+++ b/drivers/gpu/drm/xe/xe_oa.c
>@@ -898,6 +898,8 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
> 		return PTR_ERR(bo);
>
> 	stream->oa_buffer.bo = bo;
>+	/* mmap implementation requires OA buffer to be in system memory */
>+	xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0);
> 	stream->oa_buffer.vaddr = bo->vmap.vaddr;
> 	return 0;
> }
>@@ -1174,6 +1176,9 @@ static int xe_oa_release(struct inode *inode, struct file *file)
> 	struct xe_oa_stream *stream = file->private_data;
> 	struct xe_gt *gt = stream->gt;
>
>+	/* Zap mmap's */
>+	unmap_mapping_range(file->f_mapping, 0, -1, 1);
>+
> 	mutex_lock(&gt->oa.gt_lock);
> 	xe_oa_destroy_locked(stream);
> 	mutex_unlock(&gt->oa.gt_lock);
>@@ -1184,6 +1189,53 @@ static int xe_oa_release(struct inode *inode, struct file *file)
> 	return 0;
> }
>
>+static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
>+{
>+	struct xe_oa_stream *stream = file->private_data;
>+	struct xe_bo *bo = stream->oa_buffer.bo;
>+	unsigned long start = vma->vm_start;
>+	int i, ret;
>+
>+	if (xe_perf_stream_paranoid && !perfmon_capable()) {
>+		drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n");
>+		return -EACCES;
>+	}
>+
>+	/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
>+	if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
>+		drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
>+		return -EINVAL;
>+	}
>+
>+	/* Only support VM_READ, enforce MAP_PRIVATE by checking for VM_MAYSHARE */
>+	if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED | VM_MAYSHARE)) {
>+		drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n");
>+		return -EINVAL;
>+	}
>+
>+	vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
>+
>+	/*
>+	 * If the privileged parent forks and child drops root privilege, we do not want
>+	 * the child to retain access to the mapped OA buffer. Explicitly set VM_DONTCOPY
>+	 * to avoid such cases.
>+	 */
>+	vm_flags_set(vma, vma->vm_flags | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY);

Would help to just use the vm_flags_mod where you can specify both set 
and clear flags.

And then just to be paranoid about it, maybe add an assert to check that 
the flags applied correctly.

Assuming you ran the existing mmap tests for this. I think we should 
also add an mremap case. I think that should fail with EINVAL since this 
is a private mapping.

>+
>+	xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages ==
>+		  (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
>+	for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
>+		ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]),
>+				      PAGE_SIZE, vma->vm_page_prot);

vma->vm_page_prot is set to the state of vm_flags that existed at the 
mmap_region() level. We have modified those flags here and we must 
update the vma_page_prot with vm_get_page_prot(vma->vm_flags).

Thanks,
Umesh

>+		if (ret)
>+			break;
>+
>+		start += PAGE_SIZE;
>+	}
>+
>+	return ret;
>+}
>+
> static const struct file_operations xe_oa_fops = {
> 	.owner		= THIS_MODULE,
> 	.llseek		= no_llseek,
>@@ -1191,6 +1243,7 @@ static const struct file_operations xe_oa_fops = {
> 	.poll		= xe_oa_poll,
> 	.read		= xe_oa_read,
> 	.unlocked_ioctl	= xe_oa_ioctl,
>+	.mmap		= xe_oa_mmap,
> };
>
> static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
>-- 
>2.41.0
>


More information about the Intel-xe mailing list