[PATCH 15/17] drm/xe/oa/uapi: OA buffer mmap
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Jan 2 11:16:12 UTC 2024
On Thu, 2023-12-07 at 22:43 -0800, Ashutosh Dixit wrote:
> Allow the OA buffer to be mmap'd to userspace. This is needed for the
> MMIO
> trigger use case. Even otherwise, with whitelisted OA head/tail ptr
> registers, userspace can receive/interpret OA data from the mmap'd
> buffer
> without issuing read()'s on the OA stream fd.
>
> Suggested-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>
> Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
> ---
> drivers/gpu/drm/xe/xe_oa.c | 53
> ++++++++++++++++++++++++++++++++++++++
> 1 file changed, 53 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
> index 42f32d4359f2c..97779cbb83ee8 100644
> --- a/drivers/gpu/drm/xe/xe_oa.c
> +++ b/drivers/gpu/drm/xe/xe_oa.c
> @@ -898,6 +898,8 @@ static int xe_oa_alloc_oa_buffer(struct
> xe_oa_stream *stream)
> return PTR_ERR(bo);
>
> stream->oa_buffer.bo = bo;
> + /* mmap implementation requires OA buffer to be in system
> memory */
> + xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0);
> stream->oa_buffer.vaddr = bo->vmap.vaddr;
> return 0;
> }
> @@ -1174,6 +1176,9 @@ static int xe_oa_release(struct inode *inode,
> struct file *file)
> struct xe_oa_stream *stream = file->private_data;
> struct xe_gt *gt = stream->gt;
>
> + /* Zap mmap's */
> + unmap_mapping_range(file->f_mapping, 0, -1, 1);
> +
Can release() get called at all if there is a live mapping()? Meaning
the unmap_mapping_range() shouldn't be needed?
/Thomas
> mutex_lock(>->oa.gt_lock);
> xe_oa_destroy_locked(stream);
> mutex_unlock(>->oa.gt_lock);
> @@ -1184,6 +1189,53 @@ static int xe_oa_release(struct inode *inode,
> struct file *file)
> return 0;
> }
>
> +static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
> +{
> + struct xe_oa_stream *stream = file->private_data;
> + struct xe_bo *bo = stream->oa_buffer.bo;
> + unsigned long start = vma->vm_start;
> + int i, ret;
> +
> + if (xe_perf_stream_paranoid && !perfmon_capable()) {
> + drm_dbg(&stream->oa->xe->drm, "Insufficient
> privilege to map OA buffer\n");
> + return -EACCES;
> + }
> +
> + /* Can mmap the entire OA buffer or nothing (no partial OA
> buffer mmaps) */
> + if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
> + drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must
> be OA buffer size\n");
> + return -EINVAL;
> + }
> +
> + /* Only support VM_READ, enforce MAP_PRIVATE by checking for
> VM_MAYSHARE */
> + if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED |
> VM_MAYSHARE)) {
> + drm_dbg(&stream->oa->xe->drm, "mmap must be read
> only\n");
> + return -EINVAL;
> + }
> +
> + vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
> +
> + /*
> + * If the privileged parent forks and child drops root
> privilege, we do not want
> + * the child to retain access to the mapped OA buffer.
> Explicitly set VM_DONTCOPY
> + * to avoid such cases.
> + */
> + vm_flags_set(vma, vma->vm_flags | VM_PFNMAP | VM_DONTEXPAND
> | VM_DONTDUMP | VM_DONTCOPY);
> +
> + xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages ==
> + (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
> + for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
> + ret = remap_pfn_range(vma, start, page_to_pfn(bo-
> >ttm.ttm->pages[i]),
> + PAGE_SIZE, vma->vm_page_prot);
> + if (ret)
> + break;
> +
> + start += PAGE_SIZE;
> + }
> +
> + return ret;
> +}
> +
> static const struct file_operations xe_oa_fops = {
> .owner = THIS_MODULE,
> .llseek = no_llseek,
> @@ -1191,6 +1243,7 @@ static const struct file_operations xe_oa_fops
> = {
> .poll = xe_oa_poll,
> .read = xe_oa_read,
> .unlocked_ioctl = xe_oa_ioctl,
> + .mmap = xe_oa_mmap,
> };
>
> static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
More information about the Intel-xe
mailing list