[PATCH 2/4] drm/amd/amdgpu: add support for iova_to_phys to replace TTM trace (v3)
Christian König
ckoenig.leichtzumerken at gmail.com
Tue Sep 19 11:11:54 UTC 2017
Am 18.09.2017 um 19:33 schrieb Tom St Denis:
> Signed-off-by: Tom St Denis <tom.stdenis at amd.com>
>
> (v2): Add domain to iova debugfs
> (v3): Add true read/write methods to access system memory of pages
> mapped to the device
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 104 ++++++++++++++++++++++++++++++++
> 1 file changed, 104 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 50d20903de4f..02ae32378e1c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -43,6 +43,7 @@
> #include <linux/swap.h>
> #include <linux/pagemap.h>
> #include <linux/debugfs.h>
> +#include <linux/iommu.h>
> #include "amdgpu.h"
> #include "amdgpu_trace.h"
> #include "bif/bif_4_1_d.h"
> @@ -1810,6 +1811,108 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
> #endif
>
>
> +static void *transform_page(uint64_t phys)
> +{
> + if (PageHighMem(pfn_to_page(PFN_DOWN(phys))))
> + return kmap(pfn_to_page(PFN_DOWN(phys)));
> + else
> + return __va(phys);
> +}
> +
> +static void untransform_page(uint64_t phys)
> +{
> + if (PageHighMem(pfn_to_page(PFN_DOWN(phys))))
> + return kunmap(pfn_to_page(PFN_DOWN(phys)));
> +}
No need for the extra PageHighMem check, just use kmap()/kunmap() they
should do the right thing IIRC.
> +
> +static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
> + size_t size, loff_t *pos)
> +{
> + struct amdgpu_device *adev = file_inode(f)->i_private;
> + ssize_t result, n;
> + int r;
> + uint64_t phys;
> + void *ptr;
> +
> + result = 0;
> + while (size) {
> + // get physical address and map
> + phys = iommu_iova_to_phys(iommu_get_domain_for_dev(adev->dev), *pos);
Not sure what iommu_get_domain_for_dev does exactly, but the iommu
domain for the device should always be the same so I would call the
function only once before the loop.
Also failing with -ENODEV here when iommu_get_domain_for_dev() returns
NULL sounds like a good idea to me.
> +
> + // copy upto one page
> + if (size > PAGE_SIZE)
> + n = PAGE_SIZE;
> + else
> + n = size;
> +
> + // to end of the page
> + if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE)
> + n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1));
> +
> + ptr = transform_page(phys);
> + if (!ptr)
> + return -EFAULT;
> +
> + r = copy_to_user(buf, ptr, n);
> + untransform_page(phys);
> + if (r)
> + return -EFAULT;
> +
> + *pos += n;
> + size -= n;
> + result += n;
> + }
> +
> + return result;
> +}
> +
> +static ssize_t amdgpu_iova_to_phys_write(struct file *f, const char __user *buf,
> + size_t size, loff_t *pos)
> +{
> + struct amdgpu_device *adev = file_inode(f)->i_private;
> + ssize_t result, n;
> + int r;
> + uint64_t phys;
> + void *ptr;
> +
> + result = 0;
> + while (size) {
> + // get physical address and map
> + phys = iommu_iova_to_phys(iommu_get_domain_for_dev(adev->dev), *pos);
Same comment as above.
Apart from that looks good to me,
Christian.
> +
> + // copy upto one page
> + if (size > PAGE_SIZE)
> + n = PAGE_SIZE;
> + else
> + n = size;
> +
> + // to end of the page
> + if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE)
> + n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1));
> +
> + ptr = transform_page(phys);
> + if (!ptr)
> + return -EFAULT;
> +
> + r = copy_from_user(ptr, buf, n);
> + untransform_page(phys);
> + if (r)
> + return -EFAULT;
> +
> + *pos += n;
> + size -= n;
> + result += n;
> + }
> +
> + return result;
> +}
> +
> +static const struct file_operations amdgpu_ttm_iova_fops = {
> + .owner = THIS_MODULE,
> + .read = amdgpu_iova_to_phys_read,
> + .write = amdgpu_iova_to_phys_write,
> + .llseek = default_llseek
> +};
>
> static const struct {
> char *name;
> @@ -1820,6 +1923,7 @@ static const struct {
> #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
> #endif
> + { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
> };
>
> #endif
More information about the amd-gfx
mailing list