[PATCH 1/2] drm/nouveau: explicitly specify caching to use

Christian König ckoenig.leichtzumerken at gmail.com
Fri Sep 11 15:24:45 UTC 2020


Instead of letting TTM masking the caching bits
specify directly what the driver needs.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/nouveau/nouveau_bo.c  | 28 +++++++++++++++++++++------
 drivers/gpu/drm/nouveau/nouveau_ttm.c | 19 +++---------------
 2 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 97e1908eada0..b062ea8afffd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -340,18 +340,33 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 }
 
 static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain,
-		   uint32_t flags)
+set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
+		   uint32_t domain, uint32_t flags)
 {
 	*n = 0;
 
 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+		struct nvif_mmu *mmu = &drm->client.mmu;
+		const u8 type = mmu->type[drm->ttm.type_vram].type;
+
 		pl[*n].mem_type = TTM_PL_VRAM;
-		pl[(*n)++].flags = flags;
+		pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
+
+		/* Some BARs do not support being ioremapped WC */
+		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+		    type & NVIF_MEM_UNCACHED)
+			pl[*n].flags &= ~TTM_PL_FLAG_WC;
+
+		(*n)++;
 	}
 	if (domain & NOUVEAU_GEM_DOMAIN_GART) {
 		pl[*n].mem_type = TTM_PL_TT;
-		pl[(*n)++].flags = flags;
+		pl[*n].flags = flags;
+
+		if (drm->agp.bridge)
+			pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
+
+		(*n)++;
 	}
 	if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
 		pl[*n].mem_type = TTM_PL_SYSTEM;
@@ -397,17 +412,18 @@ void
 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
 			 uint32_t busy)
 {
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct ttm_placement *pl = &nvbo->placement;
 	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
 						 TTM_PL_MASK_CACHING) |
 			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 
 	pl->placement = nvbo->placements;
-	set_placement_list(nvbo->placements, &pl->num_placement,
+	set_placement_list(drm, nvbo->placements, &pl->num_placement,
 			   domain, flags);
 
 	pl->busy_placement = nvbo->busy_placements;
-	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
+	set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
 			   domain | busy, flags);
 
 	set_placement_range(nvbo, domain);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index cf18f75cd0f1..1b00f32b3849 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -194,19 +194,13 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 static int
 nouveau_ttm_init_vram(struct nouveau_drm *drm)
 {
-	struct nvif_mmu *mmu = &drm->client.mmu;
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		/* Some BARs do not support being ioremapped WC */
-		const u8 type = mmu->type[drm->ttm.type_vram].type;
 		struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
+
 		if (!man)
 			return -ENOMEM;
 
 		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
-
-		if (type & NVIF_MEM_UNCACHED)
-			man->available_caching = TTM_PL_FLAG_UNCACHED;
-
 		man->func = &nouveau_vram_manager;
 
 		ttm_resource_manager_init(man,
@@ -243,13 +237,6 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
 	struct ttm_resource_manager *man;
 	unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
 	const struct ttm_resource_manager_func *func = NULL;
-	unsigned available_caching;
-
-	if (drm->agp.bridge)
-		available_caching = TTM_PL_FLAG_UNCACHED |
-			TTM_PL_FLAG_WC;
-	else
-		available_caching = TTM_PL_MASK_CACHING;
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 		func = &nouveau_gart_manager;
@@ -257,7 +244,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
 		func = &nv04_gart_manager;
 	else
 		return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT,
-					  available_caching, true,
+					  TTM_PL_MASK_CACHING, true,
 					  size_pages);
 
 	man = kzalloc(sizeof(*man), GFP_KERNEL);
@@ -265,7 +252,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
 		return -ENOMEM;
 
 	man->func = func;
-	man->available_caching = available_caching;
+	man->available_caching = TTM_PL_MASK_CACHING;
 	man->use_tt = true;
 	ttm_resource_manager_init(man, size_pages);
 	ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
-- 
2.17.1



More information about the dri-devel mailing list