[PATCH 3/3] drm/amdgpu: Implement clearing of sensitive VRAM

Kuehling, Felix Felix.Kuehling at amd.com
Tue Jul 9 05:32:53 UTC 2019


Clear VRAM memory containing sensitive data before freeing it. Doing
this in the VRAM manager's put_node callback covers all cases that
release memory, including freeing, moving or evicting BOs. To
minimize the performance impact, use the mman->move fence to delay
future memory allocations rather than waiting for completion inside
the memory free code path. This is the same mechanism used to wait
for pipelined evictions from VRAM.

Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 94 ++++++++++++++++++++
 1 file changed, 94 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 1150e34bc28f..02594230d29f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -359,6 +359,12 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	atomic64_add(vis_usage, &mgr->vis_usage);
 
 	mem->mm_node = nodes;
+	/* Remember the sensitive flag in the color of the first
+	 * node. While the node is allocated, drm_mm doesn't use
+	 * it for anything else.
+	 */
+	nodes[0].color = (ttm_to_amdgpu_bo(tbo)->flags &
+			  AMDGPU_GEM_CREATE_VRAM_SENSITIVE);
 
 	return 0;
 
@@ -372,6 +378,69 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	return r == -ENOSPC ? 0 : r;
 }
 
+/**
+ * clear_mem_reg - clear VRAM memory from a mem_reg
+ *
+ * @adev: amdgpu_device_pointer
+ * @mem: TTM memory object
+ * @fence: used to store a pointer to the fence signaling completion
+ *
+ * This is the fast version using SDMA.
+ *
+ * Returns:
+ * 0 on success, negavite error code otherwise.
+ */
+static int clear_mem_reg(struct amdgpu_device *adev, struct ttm_mem_reg *mem,
+			 struct dma_fence **fence)
+{
+	const uint32_t max_pages =
+		adev->mman.buffer_funcs->fill_max_bytes >> PAGE_SHIFT;
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+	struct drm_mm_node *nodes;
+	struct amdgpu_job *job;
+	unsigned int pages, num_dw = 64; /* for IB padding */
+	int r;
+
+	if (unlikely(!adev->mman.buffer_funcs_enabled))
+		return -ENODEV;
+
+	for (pages = mem->num_pages, nodes = mem->mm_node; pages;
+	     pages -= nodes->size, ++nodes)
+		num_dw += DIV_ROUND_UP(nodes->size, max_pages) *
+			adev->mman.buffer_funcs->fill_num_dw;
+
+	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+	if (unlikely(r))
+		return r;
+
+	for (pages = mem->num_pages, nodes = mem->mm_node; pages;
+	     pages -= nodes->size, ++nodes) {
+		uint64_t page_count = nodes->size;
+		uint64_t dst_addr = (nodes->start << PAGE_SHIFT) +
+			adev->gmc.vram_start;
+
+		while (page_count) {
+			uint32_t cur_size_in_pages =
+				min_t(uint64_t, page_count, max_pages);
+
+			amdgpu_emit_fill_buffer(adev, &job->ibs[0], 0, dst_addr,
+						cur_size_in_pages
+						<< PAGE_SHIFT);
+			dst_addr += (uint64_t)cur_size_in_pages << PAGE_SHIFT;
+			page_count -= cur_size_in_pages;
+		}
+	}
+
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+	WARN_ON(job->ibs[0].length_dw > num_dw);
+	r = amdgpu_job_submit(job, &adev->mman.entity,
+			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+	if (unlikely(r))
+		amdgpu_job_free(job);
+
+	return r;
+}
+
 /**
  * amdgpu_vram_mgr_del - free ranges
  *
@@ -390,10 +459,35 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
 	struct drm_mm_node *nodes = mem->mm_node;
 	uint64_t usage = 0, vis_usage = 0;
 	unsigned pages = mem->num_pages;
+	struct dma_fence *fence;
 
 	if (!mem->mm_node)
 		return;
 
+	if (nodes[0].color & AMDGPU_GEM_CREATE_VRAM_SENSITIVE) {
+		int r = clear_mem_reg(adev, mem, &fence);
+
+		/* FIXME: It may be better to leak memory than to
+		 * allow reusing it without clearing.
+		 */
+		WARN_ON(r);
+
+		/* Clearing memory happens in the background. But new
+		 * memory cannot be allocated until clearing is
+		 * completed. Update the move fence to ensure that.
+		 */
+		if (!r && fence) {
+			spin_lock(&man->move_lock);
+			if (!man->move ||
+			    dma_fence_is_later(fence, man->move)) {
+				dma_fence_put(man->move);
+				man->move = dma_fence_get(fence);
+			}
+			spin_unlock(&man->move_lock);
+			dma_fence_put(fence);
+		}
+	}
+
 	spin_lock(&mgr->lock);
 	while (pages) {
 		pages -= nodes->size;
-- 
2.17.1



More information about the amd-gfx mailing list