[PATCH 5/5] drm/amdgpu: Support page table update via CPU

Christian König deathsimple at vodafone.de
Tue May 16 12:55:39 UTC 2017


Am 15.05.2017 um 23:32 schrieb Harish Kasiviswanathan:
> Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan at amd.com>

Reviewed-by: Christian König <christian.koenig at amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 86 +++++++++++++++++++++++++++++++++-
>   1 file changed, 85 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index d72a624..e98d558 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -79,6 +79,12 @@ struct amdgpu_pte_update_params {
>   		     uint64_t flags);
>   	/* indicate update pt or its shadow */
>   	bool shadow;
> +	/* The next two are used during VM update by CPU
> +	 *  DMA addresses to use for mapping
> +	 *  Kernel pointer of PD/PT BO that needs to be updated
> +	 */
> +	dma_addr_t *pages_addr;
> +	void *kptr;
>   };
>   
>   /* Helper to disable partial resident texture feature from a fence callback */
> @@ -974,10 +980,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
>   				   uint64_t flags)
>   {
>   	unsigned int i;
> +	uint64_t value;
>   
>   	for (i = 0; i < count; i++) {
> +		value = params->pages_addr ?
> +			amdgpu_vm_map_gart(params->pages_addr, addr) :
> +			addr;
>   		amdgpu_gart_set_pte_pde(params->adev, (void *)pe,
> -					i, addr, flags);
> +					i, value, flags);
>   		addr += incr;
>   	}
>   
> @@ -1220,6 +1230,59 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
>   }
>   
>   /**
> + * amdgpu_vm_update_ptes_cpu - Update the page tables in the range
> + *  start - @end using CPU.
> + * See amdgpu_vm_update_ptes for parameter description.
> + *
> + */
> +static int amdgpu_vm_update_ptes_cpu(struct amdgpu_pte_update_params *params,
> +				     uint64_t start, uint64_t end,
> +				     uint64_t dst, uint64_t flags)
> +{
> +	struct amdgpu_device *adev = params->adev;
> +	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
> +	void *pe_ptr;
> +	uint64_t addr;
> +	struct amdgpu_bo *pt;
> +	unsigned int nptes;
> +	int r;
> +
> +	/* initialize the variables */
> +	addr = start;
> +
> +	/* walk over the address space and update the page tables */
> +	while (addr < end) {
> +		pt = amdgpu_vm_get_pt(params, addr);
> +		if (!pt) {
> +			pr_err("PT not found, aborting update_ptes\n");
> +			return -EINVAL;
> +		}
> +
> +		WARN_ON(params->shadow);
> +
> +		r = amdgpu_bo_kmap(pt, &pe_ptr);
> +		if (r)
> +			return r;
> +
> +		pe_ptr += (addr & mask) * 8;
> +
> +		if ((addr & ~mask) == (end & ~mask))
> +			nptes = end - addr;
> +		else
> +			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
> +
> +		params->func(params, (uint64_t)pe_ptr, dst, nptes,
> +			     AMDGPU_GPU_PAGE_SIZE, flags);
> +
> +		amdgpu_bo_kunmap(pt);
> +		addr += nptes;
> +		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
> +	}
> +
> +	return 0;
> +}
> +
> +/**
>    * amdgpu_vm_update_ptes - make sure that page tables are valid
>    *
>    * @params: see amdgpu_pte_update_params definition
> @@ -1245,6 +1308,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
>   	unsigned nptes; /* next number of ptes to be updated */
>   	uint64_t next_pe_start;
>   
> +	if (params->func == amdgpu_vm_cpu_set_ptes)
> +		return amdgpu_vm_update_ptes_cpu(params, start, end,
> +						 dst, flags);
> +
>   	/* initialize the variables */
>   	addr = start;
>   	pt = amdgpu_vm_get_pt(params, addr);
> @@ -1431,6 +1498,23 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>   	params.vm = vm;
>   	params.src = src;
>   
> +	if (vm->use_cpu_for_update) {
> +		/* params.src is used as flag to indicate system Memory */
> +		if (pages_addr)
> +			params.src = ~0;
> +
> +		/* Wait for PT BOs to be free. PTs share the same resv. object
> +		 * as the root PD BO
> +		 */
> +		amdgpu_vm_bo_wait(adev, vm->root.bo);
> +		params.func = amdgpu_vm_cpu_set_ptes;
> +		params.pages_addr = pages_addr;
> +		params.shadow = false;
> +		r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
> +		if (!r)
> +			return r;
> +	}
> +
>   	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
>   
>   	/* sync to everything on unmapping */




More information about the amd-gfx mailing list