[PATCH 2/3] drm/amdgpu: add TMZ handling to amdgpu_move_blit

Christian König ckoenig.leichtzumerken at gmail.com
Mon Mar 2 12:17:58 UTC 2020


This way we should be at least able to move buffers from VRAM to GTT.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 28 +++++++++++++++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  2 +-
 2 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 18f6caa7b6a2..25d3b93f446e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -65,7 +65,7 @@
 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 			     struct ttm_mem_reg *mem, unsigned num_pages,
 			     uint64_t offset, unsigned window,
-			     struct amdgpu_ring *ring,
+			     struct amdgpu_ring *ring, bool tmz,
 			     uint64_t *addr);
 
 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -295,17 +295,23 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
 
 /**
  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
  *
  * The function copies @size bytes from {src->mem + src->offset} to
  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
  * move and different for a BO to BO copy.
  *
- * @f: Returns the last fence if multiple jobs are submitted.
  */
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 			       struct amdgpu_copy_mem *src,
 			       struct amdgpu_copy_mem *dst,
-			       uint64_t size,
+			       uint64_t size, bool tmz,
 			       struct dma_resv *resv,
 			       struct dma_fence **f)
 {
@@ -357,7 +363,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 		if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
 			r = amdgpu_map_buffer(src->bo, src->mem,
 					PFN_UP(cur_size + src_page_offset),
-					src_node_start, 0, ring,
+					src_node_start, 0, ring, tmz,
 					&from);
 			if (r)
 				goto error;
@@ -370,7 +376,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 		if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
 			r = amdgpu_map_buffer(dst->bo, dst->mem,
 					PFN_UP(cur_size + dst_page_offset),
-					dst_node_start, 1, ring,
+					dst_node_start, 1, ring, tmz,
 					&to);
 			if (r)
 				goto error;
@@ -378,7 +384,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 		}
 
 		r = amdgpu_copy_buffer(ring, from, to, cur_size,
-				       resv, &next, false, true, false);
+				       resv, &next, false, true, tmz);
 		if (r)
 			goto error;
 
@@ -430,6 +436,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 			    struct ttm_mem_reg *old_mem)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 	struct amdgpu_copy_mem src, dst;
 	struct dma_fence *fence = NULL;
 	int r;
@@ -443,14 +450,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
 	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
 				       new_mem->num_pages << PAGE_SHIFT,
+				       amdgpu_bo_encrypted(abo),
 				       bo->base.resv, &fence);
 	if (r)
 		goto error;
 
 	/* clear the space being freed */
 	if (old_mem->mem_type == TTM_PL_VRAM &&
-	    (ttm_to_amdgpu_bo(bo)->flags &
-	     AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
 		struct dma_fence *wipe_fence = NULL;
 
 		r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -2023,7 +2030,7 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 			     struct ttm_mem_reg *mem, unsigned num_pages,
 			     uint64_t offset, unsigned window,
-			     struct amdgpu_ring *ring,
+			     struct amdgpu_ring *ring, bool tmz,
 			     uint64_t *addr)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
@@ -2064,6 +2071,9 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 
 	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
+	if (tmz)
+		flags |= AMDGPU_PTE_TMZ;
+
 	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
 			    &job->ibs[0].ptr[num_dw]);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2c4ad5b589d0..6001ab9bd932 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -91,7 +91,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 			       struct amdgpu_copy_mem *src,
 			       struct amdgpu_copy_mem *dst,
-			       uint64_t size,
+			       uint64_t size, bool tmz,
 			       struct dma_resv *resv,
 			       struct dma_fence **f);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-- 
2.17.1



More information about the amd-gfx mailing list