[PATCH] drm/ttm: set TTM allocated pages as reserved

Christian König ckoenig.leichtzumerken at gmail.com
Wed Mar 29 13:54:01 UTC 2023


KVM tries to grab references to pages in VMAs marked with VM_PFNMAP.

This is illegal and can cause data corruption with TTM pages because
only some of them are actually reference counted.

Mark all pages allocated by TTM as reserved, this way KVM handles the
PFNs like they would point to MMIO space.

This still results in a warning, but at least no other problem.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/ttm/ttm_pool.c | 62 ++++++++++++++++++++--------------
 1 file changed, 36 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index aa116a7bbae3..c665a8bf366a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -82,6 +82,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
 	struct ttm_pool_dma *dma;
 	struct page *p;
+	unsigned int i;
 	void *vaddr;
 
 	/* Don't set the __GFP_COMP flag for higher order allocations.
@@ -94,38 +95,43 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
 
 	if (!pool->use_dma_alloc) {
 		p = alloc_pages(gfp_flags, order);
-		if (p)
-			p->private = order;
-		return p;
-	}
+		if (!p)
+			return NULL;
 
-	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
-	if (!dma)
-		return NULL;
+		p->private = order;
+	} else {
 
-	if (order)
-		attr |= DMA_ATTR_NO_WARN;
+		dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+		if (!dma)
+			return NULL;
 
-	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
-				&dma->addr, gfp_flags, attr);
-	if (!vaddr)
-		goto error_free;
+		if (order)
+			attr |= DMA_ATTR_NO_WARN;
 
-	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
-	 * TTM page fault handling and extend the DMA API to clean this up.
-	 */
-	if (is_vmalloc_addr(vaddr))
-		p = vmalloc_to_page(vaddr);
-	else
-		p = virt_to_page(vaddr);
+		vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
+					&dma->addr, gfp_flags, attr);
+		if (!vaddr) {
+			kfree(dma);
+			return NULL;
+		}
 
-	dma->vaddr = (unsigned long)vaddr | order;
-	p->private = (unsigned long)dma;
-	return p;
+		/* TODO: This is an illegal abuse of the DMA API, but we need
+		 * to rework TTM page fault handling and extend the DMA API to
+		 * clean this up.
+		 */
+		if (is_vmalloc_addr(vaddr))
+			p = vmalloc_to_page(vaddr);
+		else
+			p = virt_to_page(vaddr);
 
-error_free:
-	kfree(dma);
-	return NULL;
+		dma->vaddr = (unsigned long)vaddr | order;
+		p->private = (unsigned long)dma;
+	}
+
+	for (i = 0; i < (1 << order); ++i)
+		SetPageReserved(&p[i]);
+
+	return p;
 }
 
 /* Reset the caching and pages of size 1 << order */
@@ -134,6 +140,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
 {
 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
 	struct ttm_pool_dma *dma;
+	unsigned int i;
 	void *vaddr;
 
 #ifdef CONFIG_X86
@@ -144,6 +151,9 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
 		set_pages_wb(p, 1 << order);
 #endif
 
+	for (i = 0; i < (1 << order); ++i)
+		ClearPageReserved(&p[i]);
+
 	if (!pool || !pool->use_dma_alloc) {
 		__free_pages(p, order);
 		return;
-- 
2.34.1



More information about the dri-devel mailing list