[PATCH] drm/amdgpu: add enumerate for PDB/PTB v2

Christian König ckoenig.leichtzumerken at gmail.com
Thu Dec 14 09:07:07 UTC 2017


Am 14.12.2017 um 06:32 schrieb Chunming Zhou:
> v2:
>    remove SUBPTB member
>
> Change-Id: Ic1f39d3bc853e9e4259d3e03a22920eda822eec5
> Signed-off-by: Chunming Zhou <david1.zhou at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 70 +++++++++++++++++++++++++---------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 12 ++++++
>   2 files changed, 63 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 709587d8a77f..7e4a78179296 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -148,12 +148,23 @@ struct amdgpu_prt_cb {
>   static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
>   				      unsigned level)
>   {
> -	if (level != adev->vm_manager.num_level)
> -		return 9 * (adev->vm_manager.num_level - level - 1) +
> +	unsigned shift = 0xff;
> +
> +	switch (level) {
> +	case AMDGPU_VM_PDB2:
> +	case AMDGPU_VM_PDB1:
> +	case AMDGPU_VM_PDB0:
> +		shift = 9 * (adev->vm_manager.last_level - level - 1) +
>   			adev->vm_manager.block_size;
> -	else
> -		/* For the page tables on the leaves */
> -		return 0;
> +		break;
> +	case AMDGPU_VM_PTB:
> +		shift = 0;
> +		break;
> +	default:
> +		dev_err(adev->dev, "the level%d isn't supported.\n", level);
> +	}
> +
> +	return shift;
>   }
>   
>   /**
> @@ -166,12 +177,13 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
>   static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
>   				      unsigned level)
>   {
> -	unsigned shift = amdgpu_vm_level_shift(adev, 0);
> +	unsigned shift = amdgpu_vm_level_shift(adev,
> +					       adev->vm_manager.root_level);
>   
> -	if (level == 0)
> +	if (level == adev->vm_manager.root_level)
>   		/* For the root directory */
>   		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
> -	else if (level != adev->vm_manager.num_level)
> +	else if (level != adev->vm_manager.last_level)
>   		/* Everything in between */
>   		return 512;
>   	else
> @@ -343,7 +355,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
>   
>   	if (vm->pte_support_ats) {
>   		init_value = AMDGPU_PTE_DEFAULT_ATC;
> -		if (level != adev->vm_manager.num_level)
> +		if (level != adev->vm_manager.last_level)
>   			init_value |= AMDGPU_PDE_PTE;
>   
>   	}
> @@ -385,7 +397,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
>   			spin_unlock(&vm->status_lock);
>   		}
>   
> -		if (level < adev->vm_manager.num_level) {
> +		if (level < adev->vm_manager.last_level) {
>   			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
>   			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
>   				((1 << shift) - 1);
> @@ -431,7 +443,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
>   	saddr /= AMDGPU_GPU_PAGE_SIZE;
>   	eaddr /= AMDGPU_GPU_PAGE_SIZE;
>   
> -	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
> +	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
> +				      adev->vm_manager.root_level);
>   }
>   
>   /**
> @@ -1091,6 +1104,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
>   	for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
>   		pbo = pbo->parent;
>   
> +	level += params->adev->vm_manager.root_level;
>   	pt = amdgpu_bo_gpu_offset(bo);
>   	flags = AMDGPU_PTE_VALID;
>   	amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
> @@ -1247,7 +1261,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
>   	return 0;
>   
>   error:
> -	amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
> +	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
> +				   adev->vm_manager.root_level);
>   	amdgpu_job_free(job);
>   	return r;
>   }
> @@ -1266,7 +1281,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
>   			 struct amdgpu_vm_pt **entry,
>   			 struct amdgpu_vm_pt **parent)
>   {
> -	unsigned level = 0;
> +	unsigned level = p->adev->vm_manager.root_level;
>   
>   	*parent = NULL;
>   	*entry = &p->vm->root;
> @@ -1278,7 +1293,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
>   		addr &= (1ULL << shift) - 1;
>   	}
>   
> -	if (level != p->adev->vm_manager.num_level)
> +	if (level != p->adev->vm_manager.last_level)
>   		*entry = NULL;
>   }
>   
> @@ -1320,7 +1335,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
>   		return;
>   	entry->huge = !!(flags & AMDGPU_PDE_PTE);
>   
> -	amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.num_level - 1,
> +	amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.last_level - 1,
>   			       &dst, &flags);
>   
>   	if (use_cpu_update) {
> @@ -1636,7 +1651,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>   
>   error_free:
>   	amdgpu_job_free(job);
> -	amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
> +	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
> +				   adev->vm_manager.root_level);
>   	return r;
>   }
>   
> @@ -2552,7 +2568,20 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
>   		tmp >>= amdgpu_vm_block_size - 9;
>   	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
>   	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
> -
> +	switch (adev->vm_manager.num_level) {
> +	case 3:
> +		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
> +		break;
> +	case 2:
> +		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
> +		break;
> +	case 1:
> +		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
> +		break;
> +	default:
> +		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
> +	}
> +	adev->vm_manager.last_level = AMDGPU_VM_PTB;

I would drop the last_level variable and use AMDGPU_VM_PTB directly in 
the code.

That is way more descriptive and makes it easier to understand what is 
going on.

>   	/* block size depends on vm size and hw setup*/
>   	if (amdgpu_vm_block_size != -1)
>   		adev->vm_manager.block_size =
> @@ -2646,7 +2675,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
>   				AMDGPU_GEM_CREATE_SHADOW);
>   
> -	r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
> +	r = amdgpu_bo_create(adev,
> +			     amdgpu_vm_bo_size(adev, adev->vm_manager.root_level),
> +			     align, true,
>   			     AMDGPU_GEM_DOMAIN_VRAM,
>   			     flags,
>   			     NULL, NULL, init_pde_value, &vm->root.base.bo);
> @@ -2782,7 +2813,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>   	if (r) {
>   		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
>   	} else {
> -		amdgpu_vm_free_levels(adev, &vm->root, 0);
> +		amdgpu_vm_free_levels(adev, &vm->root,
> +				      adev->vm_manager.root_level);
>   		amdgpu_bo_unreserve(root);
>   	}
>   	amdgpu_bo_unref(&root);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index e52bf980669f..a06e81688fdf 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -120,6 +120,16 @@ struct amdgpu_bo_list_entry;
>   #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
>   #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
>   
> +/* VMPT level enumerate, and the hiberachy is:
> + * PDB2->PDB1->PDB0->PTB
> + */
> +enum amdgpu_vm_level {
> +	AMDGPU_VM_PDB2,
> +	AMDGPU_VM_PDB1,
> +	AMDGPU_VM_PDB0,
> +	AMDGPU_VM_PTB
> +};
> +
>   /* base structure for tracking BO usage in a VM */
>   struct amdgpu_vm_bo_base {
>   	/* constant after initialization */
> @@ -236,6 +246,8 @@ struct amdgpu_vm_manager {
>   	uint32_t				num_level;
>   	uint32_t				block_size;
>   	uint32_t				fragment_size;
> +	uint32_t				root_level;

I would use enum amdgpu_vm_level for root_level instead of uint32_t here.

> +	uint32_t				last_level;

And drop this. Apart from that this change looks really good to me now.

Regards,
Christian.

>   	/* vram base address for page table entry  */
>   	u64					vram_base_offset;
>   	/* vm pte handling */



More information about the amd-gfx mailing list