[PATCH v2 7/8] drm/panfrost: Rework page table flushing and runtime PM interaction
Rob Herring
robh at kernel.org
Fri Aug 23 02:12:15 UTC 2019
There is no point in resuming the h/w just to do flush operations and
doing so takes several locks which cause lockdep issues with the shrinker.
Rework the flush operations to only happen when the h/w is already awake.
This avoids taking any locks associated with resuming.
Cc: Tomeu Vizoso <tomeu.vizoso at collabora.com>
Cc: Steven Price <steven.price at arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig at collabora.com>
Cc: David Airlie <airlied at linux.ie>
Cc: Daniel Vetter <daniel at ffwll.ch>
Signed-off-by: Rob Herring <robh at kernel.org>
---
v2: new patch
drivers/gpu/drm/panfrost/panfrost_mmu.c | 41 ++++++++++++-------------
1 file changed, 20 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 842bdd7cf6be..ccf671a9c3fb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -220,6 +220,23 @@ static size_t get_pgsize(u64 addr, size_t size)
return SZ_2M;
}
+void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ struct panfrost_mmu *mmu,
+ u64 iova, size_t size)
+{
+ if (mmu->as < 0)
+ return;
+
+ /* Flush the PTs only if we're already awake */
+ if (!pm_runtime_get_if_in_use(pfdev->dev))
+ return;
+
+ mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
u64 iova, int prot, struct sg_table *sgt)
{
@@ -246,11 +263,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
}
}
- mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova,
- AS_COMMAND_FLUSH_PT);
-
mutex_unlock(&mmu->lock);
+ panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
+
return 0;
}
@@ -259,7 +275,6 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
- int ret;
int prot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(bo->is_mapped))
@@ -272,14 +287,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- ret = pm_runtime_get_sync(pfdev->dev);
- if (ret < 0)
- return ret;
-
mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
-
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
bo->is_mapped = true;
return 0;
@@ -293,17 +301,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
u64 iova = bo->node.start << PAGE_SHIFT;
size_t len = bo->node.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- int ret;
if (WARN_ON(!bo->is_mapped))
return;
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
- ret = pm_runtime_get_sync(pfdev->dev);
- if (ret < 0)
- return;
-
mutex_lock(&bo->mmu->lock);
while (unmapped_len < len) {
@@ -318,13 +321,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize;
}
- mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT,
- bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
-
mutex_unlock(&bo->mmu->lock);
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
+ panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
bo->is_mapped = false;
}
--
2.20.1
More information about the dri-devel
mailing list