[PATCH v1 6/6] drm/msm/a6xx: Support per-instance pagetables
Jordan Crouse
jcrouse at codeaurora.org
Tue Jan 28 22:16:10 UTC 2020
Add support for per-instance pagetables for a6xx targets. Add support
to handle split pagetables and create a new instance if the needed
IOMMU support exists and insert the necessary PM4 commands to trigger
a pagetable switch at the beginning of a user command.
Signed-off-by: Jordan Crouse <jcrouse at codeaurora.org>
---
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 89 +++++++++++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 9bec603c..e1a257e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -12,6 +12,62 @@
#define GPU_PAS_ID 13
+static void a6xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_file_private *ctx)
+{
+ u64 ttbr;
+ u32 asid;
+
+ if (!msm_iommu_get_ptinfo(ctx->aspace->mmu, &ttbr, &asid))
+ return;
+
+ ttbr = ttbr | ((u64) asid) << 48;
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn on APIV mode to access critical regions */
+ OUT_PKT4(ring, REG_A6XX_CP_MISC_CNTL, 1);
+ OUT_RING(ring, 1);
+
+ /* Make sure the ME is synchronized before staring the update */
+ OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+ OUT_RING(ring, lower_32_bits(ttbr));
+ OUT_RING(ring, upper_32_bits(ttbr));
+ /* CONTEXTIDR is currently unused */
+ OUT_RING(ring, 0);
+ /* CONTEXTBANK is currently unused */
+ OUT_RING(ring, 0);
+
+ /*
+ * Write the new TTBR0 to the preemption records - this will be used to
+ * reload the pagetable if the current ring gets preempted out.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
+ OUT_RING(ring, lower_32_bits(ttbr));
+ OUT_RING(ring, upper_32_bits(ttbr));
+
+ /* Invalidate the draw state so we start off fresh */
+ OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+ OUT_RING(ring, 0x40000);
+ OUT_RING(ring, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off APRIV */
+ OUT_PKT4(ring, REG_A6XX_CP_MISC_CNTL, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+}
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -89,6 +145,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
+ a6xx_set_pagetable(gpu, ring, ctx);
+
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_start));
@@ -878,6 +936,36 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
+static struct msm_gem_address_space*
+a6xx_create_instance_space(struct msm_gpu *gpu)
+{
+ struct msm_gem_address_space *aspace;
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+
+ if (!iommu_dev_has_feature(&gpu->pdev->dev, IOMMU_DEV_FEAT_AUX))
+ return gpu->aspace;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return gpu->aspace;
+
+ mmu = msm_iommu_new_instance(&gpu->pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return gpu->aspace;
+ }
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ 0x100000000ULL, 0x1ffffffffULL);
+ if (IS_ERR(aspace)) {
+ mmu->funcs->destroy(mmu);
+ return gpu->aspace;
+ }
+
+ return aspace;
+}
+
static struct msm_gem_address_space *
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
@@ -951,6 +1039,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_put = a6xx_gpu_state_put,
#endif
.create_address_space = a6xx_create_address_space,
+ .create_instance_space = a6xx_create_instance_space,
},
.get_timestamp = a6xx_get_timestamp,
};
--
2.7.4
More information about the dri-devel
mailing list