[Intel-gfx] [PATCH 7/7] drm/i915: Avoid dereferencing io mapped memory

Balasubramani Vivekanandan balasubramani.vivekanandan at intel.com
Tue Feb 22 14:52:06 UTC 2022


Pointer passed to zlib_deflate() for compression could point to io
mapped memory and might end up in direct derefencing.
io mapped memory is copied to a temporary buffer, which is then shared
to zlib_deflate(), only for the case where platform supports fast copy
using non-temporal instructions. If the platform lacks support,
then io mapped memory is directly used.

Direct dereferencing of io memory makes driver not portable outside
x86 and should be avoided.

With this patch, io memory is always copied to a temporary buffer
irrespective of platform support for fast copy. The
i915_has_memcpy_from_wc() check is removed. And
drm_memcpy_from_wc_vaddr() is now used for copying instead of
i915_memcpy_from_wc() for 2 reasons.
- i915_memcpy_from_wc() will be deprecated.
- drm_memcpy_from_wc_vaddr() will not fail if the fast copy is not
supported instead continues copying using memcpy_fromio as fallback.

Signed-off-by: Balasubramani Vivekanandan <balasubramani.vivekanandan at intel.com>
---
 drivers/gpu/drm/i915/i915_gpu_error.c | 45 +++++++++++++++------------
 1 file changed, 25 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1d042551619e..0c5917a7a545 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -258,9 +258,12 @@ static bool compress_init(struct i915_vma_compress *c)
 		return false;
 	}
 
-	c->tmp = NULL;
-	if (i915_has_memcpy_from_wc())
-		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
+	c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
+	if (!c->tmp) {
+		kfree(zstream->workspace);
+		pool_fini(&c->pool);
+		return false;
+	}
 
 	return true;
 }
@@ -292,15 +295,17 @@ static void *compress_next_page(struct i915_vma_compress *c,
 }
 
 static int compress_page(struct i915_vma_compress *c,
-			 void *src,
-			 struct i915_vma_coredump *dst,
-			 bool wc)
+			 struct iosys_map *src,
+			 struct i915_vma_coredump *dst)
 {
 	struct z_stream_s *zstream = &c->zstream;
 
-	zstream->next_in = src;
-	if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+	if (src->is_iomem) {
+		drm_memcpy_from_wc_vaddr(c->tmp, src, PAGE_SIZE);
 		zstream->next_in = c->tmp;
+	} else {
+		zstream->next_in = src->vaddr;
+	}
 	zstream->avail_in = PAGE_SIZE;
 
 	do {
@@ -389,9 +394,8 @@ static bool compress_start(struct i915_vma_compress *c)
 }
 
 static int compress_page(struct i915_vma_compress *c,
-			 void *src,
-			 struct i915_vma_coredump *dst,
-			 bool wc)
+			 struct iosys_map *src,
+			 struct i915_vma_coredump *dst)
 {
 	void *ptr;
 
@@ -399,8 +403,7 @@ static int compress_page(struct i915_vma_compress *c,
 	if (!ptr)
 		return -ENOMEM;
 
-	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
-		memcpy(ptr, src, PAGE_SIZE);
+	drm_memcpy_from_wc_vaddr(ptr, src, PAGE_SIZE);
 	list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
 	cond_resched();
 
@@ -1054,6 +1057,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 	if (drm_mm_node_allocated(&ggtt->error_capture)) {
 		void __iomem *s;
 		dma_addr_t dma;
+		struct iosys_map src;
 
 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
 			mutex_lock(&ggtt->error_mutex);
@@ -1062,9 +1066,8 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 			mb();
 
 			s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
-			ret = compress_page(compress,
-					    (void  __force *)s, dst,
-					    true);
+			iosys_map_set_vaddr_iomem(&src, s);
+			ret = compress_page(compress, &src, dst);
 			io_mapping_unmap(s);
 
 			mb();
@@ -1076,6 +1079,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 	} else if (vma_res->bi.lmem) {
 		struct intel_memory_region *mem = vma_res->mr;
 		dma_addr_t dma;
+		struct iosys_map src;
 
 		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
 			void __iomem *s;
@@ -1083,15 +1087,15 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 			s = io_mapping_map_wc(&mem->iomap,
 					      dma - mem->region.start,
 					      PAGE_SIZE);
-			ret = compress_page(compress,
-					    (void __force *)s, dst,
-					    true);
+			iosys_map_set_vaddr_iomem(&src, s);
+			ret = compress_page(compress, &src, dst);
 			io_mapping_unmap(s);
 			if (ret)
 				break;
 		}
 	} else {
 		struct page *page;
+		struct iosys_map src;
 
 		for_each_sgt_page(page, iter, vma_res->bi.pages) {
 			void *s;
@@ -1099,7 +1103,8 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 			drm_clflush_pages(&page, 1);
 
 			s = kmap(page);
-			ret = compress_page(compress, s, dst, false);
+			iosys_map_set_vaddr(&src, s);
+			ret = compress_page(compress, &src, dst);
 			kunmap(page);
 
 			drm_clflush_pages(&page, 1);
-- 
2.25.1



More information about the Intel-gfx mailing list