[PATCH v8 32/42] drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON
Rob Clark
robin.clark at oss.qualcomm.com
Sun Jun 29 14:03:35 UTC 2025
From: Rob Clark <robdclark at chromium.org>
With user managed VMs and multiple queues, it is in theory possible to
trigger map/unmap errors. These will (in a later patch) mark the VM as
unusable. But we want to tell the io-pgtable helpers not to spam the
log. In addition, in the unmap path, we don't want to bail early from
the unmap, to ensure we don't leave some dangling pages mapped.
Signed-off-by: Rob Clark <robdclark at chromium.org>
Signed-off-by: Rob Clark <robin.clark at oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103 at gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103 at gmail.com>
---
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +-
drivers/gpu/drm/msm/msm_iommu.c | 23 ++++++++++++++++++-----
drivers/gpu/drm/msm/msm_mmu.h | 2 +-
3 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 62b5f294a2aa..5e115abe7692 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2280,7 +2280,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
{
struct msm_mmu *mmu;
- mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu);
+ mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a0c74ecdb11b..bd67431cb25f 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -94,15 +94,24 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ int ret = 0;
while (size) {
- size_t unmapped, pgsize, count;
+ size_t pgsize, count;
+ ssize_t unmapped;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
- if (!unmapped)
- break;
+ if (unmapped <= 0) {
+ ret = -EINVAL;
+ /*
+ * Continue attempting to unamp the remained of the
+ * range, so we don't end up with some dangling
+ * mapped pages
+ */
+ unmapped = PAGE_SIZE;
+ }
iova += unmapped;
size -= unmapped;
@@ -110,7 +119,7 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (size == 0) ? 0 : -EINVAL;
+ return ret;
}
static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
@@ -324,7 +333,7 @@ static const struct iommu_flush_ops tlb_ops = {
static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -358,6 +367,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &tlb_ops;
+ if (!kernel_managed) {
+ ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
+ }
+
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, pagetable);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 9d61999f4d42..04dce0faaa3a 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -51,7 +51,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
--
2.50.0
More information about the dri-devel
mailing list