[PATCH] drm/ttm: ioremap buffer according to TTM mem caching setting
Oak Zeng
Oak.Zeng at amd.com
Wed Mar 3 21:12:05 UTC 2021
If tbo.mem.bus.caching is cached, buffer is intended to be mapped
as cached from CPU. Map it with ioremap_cache.
This wasn't necessary before as device memory was never mapped
as cached from CPU side. It becomes necessary for aldebaran as
device memory is mapped cached from CPU.
Signed-off-by: Oak Zeng <Oak.Zeng at amd.com>
Reviewed-by: Christian Konig <Christian.Koenig at amd.com>
---
drivers/gpu/drm/ttm/ttm_bo_util.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 031e581..7c848e2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -91,6 +91,10 @@ static int ttm_resource_ioremap(struct ttm_device *bdev,
if (mem->bus.caching == ttm_write_combined)
addr = ioremap_wc(mem->bus.offset, bus_size);
+#ifdef __x86_64__
+ else if (mem->bus.caching == ttm_cached)
+ addr = ioremap_cache(mem->bus.offset, bus_size);
+#endif
else
addr = ioremap(mem->bus.offset, bus_size);
if (!addr) {
@@ -372,6 +376,11 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
+#ifdef __x86_64__
+ else if (mem->bus.caching == ttm_cached)
+ map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
+ size);
+#endif
else
map->virtual = ioremap(bo->mem.bus.offset + offset,
size);
@@ -490,6 +499,11 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
else if (mem->bus.caching == ttm_write_combined)
vaddr_iomem = ioremap_wc(mem->bus.offset,
bo->base.size);
+ else if (mem->bus.caching == ttm_cached)
+#ifdef __x86_64__
+ vaddr_iomem = ioremap_cache(mem->bus.offset,
+ bo->base.size);
+#endif
else
vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
--
2.7.4
More information about the amd-gfx
mailing list