[PATCH 2/2] drm/amdgpu: add full TMZ support into amdgpu_ttm_map_buffer

Christian König ckoenig.leichtzumerken at gmail.com
Thu Mar 19 13:41:02 UTC 2020


This should allow us to also support VRAM->GTT moves.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 36 +++++++++++++++++++------
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2b5974268e63..c7ed4e2f8460 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -307,21 +307,21 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 				 unsigned window, struct amdgpu_ring *ring,
 				 bool tmz, uint64_t *addr)
 {
-	struct ttm_dma_tt *dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_job *job;
 	unsigned num_dw, num_bytes;
-	dma_addr_t *dma_address;
 	struct dma_fence *fence;
 	uint64_t src_addr, dst_addr;
+	void *cpu_addr;
 	uint64_t flags;
+	unsigned int i;
 	int r;
 
 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
 
 	/* Map only what can't be accessed directly */
-	if (mem->start != AMDGPU_BO_INVALID_OFFSET) {
+	if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
 		*addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
 		return 0;
 	}
@@ -349,15 +349,35 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
 
-	dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
 	if (tmz)
 		flags |= AMDGPU_PTE_TMZ;
 
-	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
-			    &job->ibs[0].ptr[num_dw]);
-	if (r)
-		goto error_free;
+	cpu_addr = &job->ibs[0].ptr[num_dw];
+
+	if (mem->mem_type == TTM_PL_TT) {
+		struct ttm_dma_tt *dma;
+		dma_addr_t *dma_address;
+
+		dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+		dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+		r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+				    cpu_addr);
+		if (r)
+			goto error_free;
+	} else {
+		dma_addr_t dma_address;
+
+		dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+		for (i = 0; i < num_pages; ++i) {
+			r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+					    &dma_address, flags, cpu_addr);
+			if (r)
+				goto error_free;
+
+			dma_address += PAGE_SIZE;
+		}
+	}
 
 	r = amdgpu_job_submit(job, &adev->mman.entity,
 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-- 
2.17.1



More information about the amd-gfx mailing list