[PATCH 2/7] drm/msm/gpu: Name GMU bos
Rob Clark
robdclark at gmail.com
Wed Nov 24 21:41:29 UTC 2021
From: Rob Clark <robdclark at chromium.org>
Signed-off-by: Rob Clark <robdclark at chromium.org>
---
drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 22 +++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 71e52b2b2025..e1774ea342b1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1146,7 +1146,7 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
- size_t size, u64 iova)
+ size_t size, u64 iova, const char *name)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct drm_device *dev = a6xx_gpu->base.base.dev;
@@ -1181,6 +1181,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
+ msm_gem_object_set_name(bo->obj, name);
+
return 0;
}
@@ -1515,7 +1517,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu)) {
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
+ 0x60400000, "debug");
if (ret)
goto err_memory;
@@ -1523,23 +1526,24 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
}
/* Allocate memory for the GMU dummy page */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
+ 0x60000000, "dummy");
if (ret)
goto err_memory;
if (adreno_is_a650_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_16M - SZ_16K, 0x04000);
+ SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
} else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_256K - SZ_16K, 0x04000);
+ SZ_256K - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
- SZ_256K - SZ_16K, 0x44000);
+ SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
} else {
@@ -1547,18 +1551,18 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->legacy = true;
/* Allocate memory for the GMU debug region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
if (ret)
goto err_memory;
}
/* Allocate memory for for the HFI queues */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
if (ret)
goto err_memory;
/* Allocate memory for the GMU log region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
if (ret)
goto err_memory;
--
2.33.1
More information about the dri-devel
mailing list