[PATCH] drm/amdgpu: replace iova debugfs file with iomem (v2)
Tom St Denis
tom.stdenis at amd.com
Fri Feb 9 17:28:58 UTC 2018
On 09/02/18 12:27 PM, Tom St Denis wrote:
> From: Christian König <ckoenig.leichtzumerken at gmail.com>
Oops, I'll remove this from the commit message before pushing :-)
I did give you credit below though.
Tom
>
> This allows access to pages allocated through the driver with optional
> IOMMU mapping.
>
> v2: Fix number of bytes copied and add write method
>
> Original-by: Christian König <christian.koenig at amd.com>
> Signed-off-by: Tom St Denis <tom.stdenis at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 110 ++++++++++++++++++++++++++------
> 1 file changed, 89 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index b372d8d650a5..d6c56b001a2c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1929,38 +1929,106 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
>
> #endif
>
> -static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
> - size_t size, loff_t *pos)
> +static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
> + size_t size, loff_t *pos)
> {
> struct amdgpu_device *adev = file_inode(f)->i_private;
> - int r;
> - uint64_t phys;
> struct iommu_domain *dom;
> + ssize_t result = 0;
> + int r;
>
> - // always return 8 bytes
> - if (size != 8)
> - return -EINVAL;
> + dom = iommu_get_domain_for_dev(adev->dev);
>
> - // only accept page addresses
> - if (*pos & 0xFFF)
> - return -EINVAL;
> + while (size) {
> + phys_addr_t addr = *pos & PAGE_MASK;
> + loff_t off = *pos & ~PAGE_MASK;
> + size_t bytes = PAGE_SIZE - off;
> + unsigned long pfn;
> + struct page *p;
> + void *ptr;
> +
> + bytes = bytes < size ? bytes : size;
> +
> + addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
> +
> + pfn = addr >> PAGE_SHIFT;
> + if (!pfn_valid(pfn))
> + return -EPERM;
> +
> + p = pfn_to_page(pfn);
> + if (p->mapping != adev->mman.bdev.dev_mapping)
> + return -EPERM;
> +
> + ptr = kmap(p);
> + if (ptr) {
> + r = copy_to_user(buf, ptr, bytes);
> + kunmap(p);
> + if (r)
> + return -EFAULT;
> + } else {
> + return -EFAULT;
> + }
> +
> + size -= bytes;
> + *pos += bytes;
> + result += bytes;
> + }
> +
> + return result;
> +}
> +
> +static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
> + size_t size, loff_t *pos)
> +{
> + struct amdgpu_device *adev = file_inode(f)->i_private;
> + struct iommu_domain *dom;
> + ssize_t result = 0;
> + int r;
>
> dom = iommu_get_domain_for_dev(adev->dev);
> - if (dom)
> - phys = iommu_iova_to_phys(dom, *pos);
> - else
> - phys = *pos;
>
> - r = copy_to_user(buf, &phys, 8);
> - if (r)
> - return -EFAULT;
> + while (size) {
> + phys_addr_t addr = *pos & PAGE_MASK;
> + loff_t off = *pos & ~PAGE_MASK;
> + size_t bytes = PAGE_SIZE - off;
> + unsigned long pfn;
> + struct page *p;
> + void *ptr;
> +
> + bytes = bytes < size ? bytes : size;
> +
> + addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
>
> - return 8;
> + pfn = addr >> PAGE_SHIFT;
> + if (!pfn_valid(pfn))
> + return -EPERM;
> +
> + p = pfn_to_page(pfn);
> + if (p->mapping != adev->mman.bdev.dev_mapping)
> + return -EPERM;
> +
> + ptr = kmap(p);
> + if (ptr) {
> + r = copy_from_user(ptr, buf, bytes);
> + kunmap(p);
> + if (r)
> + return -EFAULT;
> + } else {
> + return -EFAULT;
> + }
> +
> + size -= bytes;
> + *pos += bytes;
> + result += bytes;
> + }
> +
> + return result;
> }
>
> -static const struct file_operations amdgpu_ttm_iova_fops = {
> +static const struct file_operations amdgpu_ttm_iomem_fops = {
> .owner = THIS_MODULE,
> - .read = amdgpu_iova_to_phys_read,
> + .read = amdgpu_iomem_read,
> + .write = amdgpu_iomem_write,
> .llseek = default_llseek
> };
>
> @@ -1973,7 +2041,7 @@ static const struct {
> #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
> #endif
> - { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
> + { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
> };
>
> #endif
>
More information about the dri-devel
mailing list