[PATCH 4/5] drm/amdgpu: enable amdgpu_move_blit to handle multiple MM nodes
Felix Kuehling
felix.kuehling at amd.com
Mon Aug 29 22:09:12 UTC 2016
This requires an assumption that there is no partial overlap between the
the mm_nodes in the old and new memory. As long as BOs are always split
into fixed size portions that should work OK for copying between linear
and split BOs. But it can fail if you copy between split BOs that are
split in different size portions.
For example this won't work:
+-------+-------+-------+
Old: | o0 | o1 | o2 |
+-------+-------+-------+
+-----------+-----------+
New: | n0 | n1 |
+-----------+-----------+
Regards,
Felix
On 16-08-29 05:20 AM, Christian König wrote:
> From: Christian König <christian.koenig at amd.com>
>
> This allows us to move scattered buffers around.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 96 ++++++++++++++++++++++-----------
> 1 file changed, 64 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index ea480bb..db8638b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -239,52 +239,84 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
> new_mem->mm_node = NULL;
> }
>
> -static int amdgpu_move_blit(struct ttm_buffer_object *bo,
> - bool evict, bool no_wait_gpu,
> - struct ttm_mem_reg *new_mem,
> - struct ttm_mem_reg *old_mem)
> +static uint64_t amdgpu_mm_node_addr(struct amdgpu_device *adev,
> + struct drm_mm_node *mm_node,
> + struct ttm_mem_reg *mem)
> {
> - struct amdgpu_device *adev;
> - struct amdgpu_ring *ring;
> - uint64_t old_start, new_start;
> - struct fence *fence;
> - int r;
> + uint64_t addr = mm_node->start << PAGE_SHIFT;
>
> - adev = amdgpu_get_adev(bo->bdev);
> - ring = adev->mman.buffer_funcs_ring;
> - old_start = (u64)old_mem->start << PAGE_SHIFT;
> - new_start = (u64)new_mem->start << PAGE_SHIFT;
> -
> - switch (old_mem->mem_type) {
> + switch (mem->mem_type) {
> case TTM_PL_VRAM:
> case TTM_PL_TT:
> - old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
> + addr += adev->mman.bdev.man[mem->mem_type].gpu_offset;
> break;
> default:
> - DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
> - return -EINVAL;
> - }
> - switch (new_mem->mem_type) {
> - case TTM_PL_VRAM:
> - case TTM_PL_TT:
> - new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
> + DRM_ERROR("Unknown placement %d\n", mem->mem_type);
> break;
> - default:
> - DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
> - return -EINVAL;
> }
> +
> + return addr;
> +}
> +
> +static int amdgpu_move_blit(struct ttm_buffer_object *bo,
> + bool evict, bool no_wait_gpu,
> + struct ttm_mem_reg *new_mem,
> + struct ttm_mem_reg *old_mem)
> +{
> + struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev);
> + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
> +
> + struct drm_mm_node *old_mm, *new_mm;
> + uint64_t old_start, new_start;
> + unsigned long num_pages;
> + struct fence *fence = NULL;
> + int r;
> +
> + BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
> +
> if (!ring->ready) {
> DRM_ERROR("Trying to move memory with ring turned off.\n");
> return -EINVAL;
> }
>
> - BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
> + old_mm = old_mem->mm_node;
> + new_mm = new_mem->mm_node;
> + num_pages = new_mem->num_pages;
>
> - r = amdgpu_copy_buffer(ring, old_start, new_start,
> - new_mem->num_pages * PAGE_SIZE, /* bytes */
> - bo->resv, &fence, false);
> - if (r)
> - return r;
> + old_start = amdgpu_mm_node_addr(adev, old_mm, old_mem);
> + new_start = amdgpu_mm_node_addr(adev, new_mm, new_mem);
> +
> + while (num_pages) {
> + unsigned long cur_pages = min(old_mm->size, new_mm->size);
> + struct fence *next;
> +
> + r = amdgpu_copy_buffer(ring, old_start, new_start,
> + cur_pages * PAGE_SIZE,
> + bo->resv, &next, false);
> + if (r) {
> + if (fence)
> + fence_wait(fence, false);
> + fence_put(fence);
> + return r;
> + }
> + fence_put(fence);
> + fence = next;
> +
> + num_pages -= cur_pages;
> + if (num_pages) {
> + old_start += cur_pages * PAGE_SIZE;
> + if (old_start == ((old_mm->start + old_mm->size) *
> + PAGE_SIZE))
> + old_start = amdgpu_mm_node_addr(adev, ++old_mm,
> + old_mem);
> +
> + new_start += cur_pages * PAGE_SIZE;
> + if (new_start == ((new_mm->start + new_mm->size) *
> + PAGE_SIZE))
> + new_start = amdgpu_mm_node_addr(adev, ++new_mm,
> + new_mem);
> + }
> + }
>
> r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
> fence_put(fence);
More information about the amd-gfx
mailing list