[PATCH 1/4] drm/amdgpu: fix amdgpu_fill_buffer

Christian König deathsimple at vodafone.de
Thu Nov 17 15:00:07 UTC 2016


From: Christian König <christian.koenig at amd.com>

The mem start is relative to the domain in the address space, so this
worked only when VRAM was mapped at offset 0.

It also didn't handled multiple drm_mm_nodes for split VRAM.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 70 ++++++++++++++++++++++-----------
 1 file changed, 48 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5951666..c4dccb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1378,29 +1378,40 @@ error_free:
 	return r;
 }
 
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-		uint32_t src_data,
-		struct reservation_object *resv,
-		struct fence **fence)
+int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data,
+		       struct reservation_object *resv,
+		       struct fence **fence)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-	struct amdgpu_job *job;
+	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 
-	uint32_t max_bytes, byte_count;
-	uint64_t dst_offset;
+	struct drm_mm_node *mm_node;
+	unsigned long num_pages;
 	unsigned int num_loops, num_dw;
-	unsigned int i;
+
+	struct amdgpu_job *job;
 	int r;
 
-	byte_count = bo->tbo.num_pages << PAGE_SHIFT;
-	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
-	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+	if (!ring->ready) {
+		DRM_ERROR("Trying to clear memory with ring turned off.\n");
+		return -EINVAL;
+	}
+
+	num_pages = bo->tbo.num_pages;
+	mm_node = bo->tbo.mem.mm_node;
+	num_loops = 0;
+	while (num_pages) {
+		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+
+		num_loops += DIV_ROUND_UP(byte_count, max_bytes);
+		num_pages -= mm_node->size;
+		++mm_node;
+	}
 	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
 
 	/* for IB padding */
-	while (num_dw & 0x7)
-		num_dw++;
+	num_dw += 64;
 
 	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
 	if (r)
@@ -1408,28 +1419,43 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
 	if (resv) {
 		r = amdgpu_sync_resv(adev, &job->sync, resv,
-				AMDGPU_FENCE_OWNER_UNDEFINED);
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
 		if (r) {
 			DRM_ERROR("sync failed (%d).\n", r);
 			goto error_free;
 		}
 	}
 
-	dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
-	for (i = 0; i < num_loops; i++) {
-		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+	num_pages = bo->tbo.num_pages;
+	mm_node = bo->tbo.mem.mm_node;
 
-		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
-				dst_offset, cur_size_in_bytes);
+	while (num_pages) {
+		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+		uint64_t dst_addr;
 
-		dst_offset += cur_size_in_bytes;
-		byte_count -= cur_size_in_bytes;
+		r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
+					&bo->tbo.mem, &dst_addr);
+		if (r)
+			return r;
+
+		while (byte_count) {
+			uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+						dst_addr, cur_size_in_bytes);
+
+			dst_addr += cur_size_in_bytes;
+			byte_count -= cur_size_in_bytes;
+		}
+
+		num_pages -= mm_node->size;
+		++mm_node;
 	}
 
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
 	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
-			AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
 	if (r)
 		goto error_free;
 
-- 
2.5.0



More information about the amd-gfx mailing list