[Freedreno] [v2 7/7] drm/msm/a6xx: Add support for using system cache(LLC)
Jordan Crouse
jcrouse at codeaurora.org
Mon Oct 8 14:18:02 UTC 2018
On Mon, Oct 08, 2018 at 07:29:03PM +0530, Sharat Masetty wrote:
>
>
> On 10/5/2018 8:37 PM, Jordan Crouse wrote:
> >On Fri, Oct 05, 2018 at 06:38:35PM +0530, Sharat Masetty wrote:
> >>The last level system cache can be partitioned to 32 different slices
> >>of which GPU has two slices preallocated. One slice is used for caching GPU
> >>buffers and the other slice is used for caching the GPU SMMU pagetables.
> >>This patch talks to the core system cache driver to acquire the slice handles,
> >>configure the SCID's to those slices and activates and deactivates the slices
> >>upon GPU power collapse and restore.
> >>
> >>Some support from the IOMMU driver is also needed to make use of the
> >>system cache. IOMMU_SYS_CACHE is a buffer protection flag which enables
> >>caching GPU data buffers in the system cache with memory attributes such
> >>as outer cacheable, read-allocate, write-allocate for buffers. The GPU
> >>then has the ability to override a few cacheability parameters which it
> >>does to override write-allocate to write-no-allocate as the GPU hardware
> >>does not benefit much from it.
> >>
> >>Similarly DOMAIN_ATTR_USE_SYS_CACHE is another domain level attribute
> >>used by the IOMMU driver to set the right attributes to cache the hardware
> >>pagetables into the system cache.
> >>
> >>Signed-off-by: Sharat Masetty <smasetty at codeaurora.org>
> >>---
> >> drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 159 +++++++++++++++++++++++++++++++++-
> >> drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 9 ++
> >> drivers/gpu/drm/msm/msm_iommu.c | 13 +++
> >> drivers/gpu/drm/msm/msm_mmu.h | 3 +
> >> 4 files changed, 183 insertions(+), 1 deletion(-)
> >>
> >>diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> >>index 177dbfc..1790dde 100644
> >>--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> >>+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> >>@@ -8,6 +8,7 @@
> >> #include "a6xx_gmu.xml.h"
> >> #include <linux/devfreq.h>
> >>+#include <linux/soc/qcom/llcc-qcom.h>
> >> static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
> >> {
> >>@@ -674,6 +675,151 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
> >> ~0
> >> };
> >>+#define A6XX_LLC_NUM_GPU_SCIDS 5
> >>+#define A6XX_GPU_LLC_SCID_NUM_BITS 5
> >>+
> >>+#define A6XX_GPU_LLC_SCID_MASK \
> >>+ ((1 << (A6XX_LLC_NUM_GPU_SCIDS * A6XX_GPU_LLC_SCID_NUM_BITS)) - 1)
> >>+
> >>+#define A6XX_GPUHTW_LLC_SCID_SHIFT 25
> >>+#define A6XX_GPUHTW_LLC_SCID_MASK \
> >>+ (((1 << A6XX_GPU_LLC_SCID_NUM_BITS) - 1) << A6XX_GPUHTW_LLC_SCID_SHIFT)
> >>+
> >>+static inline void a6xx_gpu_cx_rmw(struct a6xx_llc *llc,
> >>+ u32 reg, u32 mask, u32 or)
> >>+{
> >>+ msm_rmw(llc->mmio + (reg << 2), mask, or);
> >>+}
> >>+
> >>+static void a6xx_llc_deactivate(struct msm_gpu *gpu)
> >>+{
> >>+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> >>+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> >>+ struct a6xx_llc *llc = &a6xx_gpu->llc;
> >>+
> >>+ llcc_slice_deactivate(llc->gpu_llc_slice);
> >>+ llcc_slice_deactivate(llc->gpuhtw_llc_slice);
> >>+}
> >>+
> >>+static void a6xx_llc_activate(struct msm_gpu *gpu)
> >>+{
> >>+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> >>+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> >>+ struct a6xx_llc *llc = &a6xx_gpu->llc;
> >>+
> >>+ if (!llc->mmio)
> >>+ return;
> >>+
> >>+ /*
> >>+ * If the LLCC_GPU slice activated, program the sub-cache ID for all
> >>+ * GPU blocks
> >>+ */
> >>+ if (!llcc_slice_activate(llc->gpu_llc_slice))
> >>+ a6xx_gpu_cx_rmw(llc,
> >>+ REG_A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
> >>+ A6XX_GPU_LLC_SCID_MASK,
> >>+ (llc->cntl1_regval &
> >>+ A6XX_GPU_LLC_SCID_MASK));
> >>+
> >>+ /*
> >>+ * If the LLCC_GPUHTW slice activated, program the sub-cache ID for the
> >>+ * GPU pagetables
> >>+ */
> >>+ if (!llcc_slice_activate(llc->gpuhtw_llc_slice))
> >>+ a6xx_gpu_cx_rmw(llc,
> >>+ REG_A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
> >>+ A6XX_GPUHTW_LLC_SCID_MASK,
> >>+ (llc->cntl1_regval &
> >>+ A6XX_GPUHTW_LLC_SCID_MASK));
> >>+
> >>+ /* Program cacheability overrides */
> >>+ a6xx_gpu_cx_rmw(llc, REG_A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF,
> >>+ llc->cntl0_regval);
> >>+}
> >>+
> >>+void a6xx_llc_slices_destroy(struct a6xx_llc *llc)
> >>+{
> >>+ if (llc->mmio) {
> >>+ iounmap(llc->mmio);
> >>+ llc->mmio = NULL;
> >>+ }
> >>+
> >>+ llcc_slice_putd(llc->gpu_llc_slice);
> >>+ llc->gpu_llc_slice = NULL;
> >
> >I don't think these need to be put back to NULL - we shouldn't touch them again
> >after this point.
> >
> >>+
> >>+ llcc_slice_putd(llc->gpuhtw_llc_slice);
> >>+ llc->gpuhtw_llc_slice = NULL;
> >>+}
> >>+
> >>+static int a6xx_llc_slices_init(struct platform_device *pdev,
> >>+ struct a6xx_llc *llc)
> >>+{
> >>+ int i;
> >>+
> >>+ /* Map registers */
> >>+ llc->mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
> >>+ if (IS_ERR(llc->mmio)) {
> >>+ llc->mmio = NULL;
> >>+ return -1;
> >
> >Return a valid error code here even if we don't care what it is. -ENODEV maybe.
> >And in fact, if we don't care what it is (LLCC is very optional) then just don't
> >return anything at all.
> Hi Jordan,
>
> We do need the error code, as we have to let iommu layer know, so
> that it can set additional properties for the buffers and the page
> tables.
Oh, I see what you did (you should set the flag outside of the function call
so it is clearer what is going on).
But why is the MMU flag conditional? If we don't end up activating our slice
does it hurt to set the UPSTREAM_HINT in the pagetable?
Jordan
--
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
More information about the dri-devel
mailing list