[PATCH 08/12] drm/amdgpu: rework synchronization of VM updates
Felix Kuehling
felix.kuehling at amd.com
Mon Jan 6 21:38:24 UTC 2020
On 2020-01-06 10:03 a.m., Christian König wrote:
> If provided we only sync to the BOs reservation
> object and no longer to the root PD.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 7 -----
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 34 ++++++++++++---------
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 +--
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 25 ++++++---------
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 15 +++------
> 5 files changed, 35 insertions(+), 50 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index c124f64e7aae..5816df9f8531 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -240,13 +240,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
> owner != AMDGPU_FENCE_OWNER_UNDEFINED)
> continue;
>
> - /* VM updates only sync with moves but not with user
> - * command submissions or KFD evictions fences
> - */
> - if (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
> - owner == AMDGPU_FENCE_OWNER_VM)
> - continue;
> -
> /* Ignore fences depending on the sync mode */
> switch (mode) {
> case AMDGPU_SYNC_ALWAYS:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 54430c9f7a27..a03cfbe670c4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -777,7 +777,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
> params.vm = vm;
> params.direct = direct;
>
> - r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
> + r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
> if (r)
> return r;
>
> @@ -1273,7 +1273,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
> params.vm = vm;
> params.direct = direct;
>
> - r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
> + r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
> if (r)
> return r;
>
> @@ -1524,7 +1524,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
> * @adev: amdgpu_device pointer
> * @vm: requested vm
> * @direct: direct submission in a page fault
> - * @exclusive: fence we need to sync to
> + * @resv: fences we need to sync to
> * @start: start of mapped range
> * @last: last mapped entry
> * @flags: flags for the entries
> @@ -1539,14 +1539,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
> */
> static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
> struct amdgpu_vm *vm, bool direct,
> - struct dma_fence *exclusive,
> + struct dma_resv *resv,
> uint64_t start, uint64_t last,
> uint64_t flags, uint64_t addr,
> dma_addr_t *pages_addr,
> struct dma_fence **fence)
> {
> struct amdgpu_vm_update_params params;
> - void *owner = AMDGPU_FENCE_OWNER_VM;
> + enum amdgpu_sync_mode sync_mode;
> int r;
>
> memset(¶ms, 0, sizeof(params));
> @@ -1557,7 +1557,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>
> /* sync to everything except eviction fences on unmapping */
I think this comment needs to be updated. Something like "Implicitly
sync to command submissions in the same VM before unmapping. Sync to
moving fences before mapping."
> if (!(flags & AMDGPU_PTE_VALID))
> - owner = AMDGPU_FENCE_OWNER_KFD;
> + sync_mode = AMDGPU_SYNC_EQ_OWNER;
> + else
> + sync_mode = AMDGPU_SYNC_EXPLICIT;
>
> mutex_lock(&vm->eviction_lock);
> if (vm->evicting) {
> @@ -1565,7 +1567,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
> goto error_unlock;
> }
>
> - r = vm->update_funcs->prepare(¶ms, owner, exclusive);
> + r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
> if (r)
> goto error_unlock;
>
> @@ -1584,7 +1586,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
> * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
> *
> * @adev: amdgpu_device pointer
> - * @exclusive: fence we need to sync to
> + * @resv: fences we need to sync to
> * @pages_addr: DMA addresses to use for mapping
> * @vm: requested vm
> * @mapping: mapped range and flags to use for the update
> @@ -1600,7 +1602,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
> * 0 for success, -EINVAL for failure.
> */
> static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
> - struct dma_fence *exclusive,
> + struct dma_resv *resv,
> dma_addr_t *pages_addr,
> struct amdgpu_vm *vm,
> struct amdgpu_bo_va_mapping *mapping,
> @@ -1676,7 +1678,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
> }
>
> last = min((uint64_t)mapping->last, start + max_entries - 1);
> - r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
> + r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
> start, last, flags, addr,
> dma_addr, fence);
> if (r)
> @@ -1715,7 +1717,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
> dma_addr_t *pages_addr = NULL;
> struct ttm_mem_reg *mem;
> struct drm_mm_node *nodes;
> - struct dma_fence *exclusive, **last_update;
> + struct dma_fence **last_update;
> + struct dma_resv *resv;
> uint64_t flags;
> struct amdgpu_device *bo_adev = adev;
> int r;
> @@ -1723,7 +1726,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
> if (clear || !bo) {
> mem = NULL;
> nodes = NULL;
> - exclusive = NULL;
> } else {
> struct ttm_dma_tt *ttm;
>
> @@ -1733,7 +1735,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
> ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
> pages_addr = ttm->dma_address;
> }
> - exclusive = bo->tbo.moving;
> }
>
> if (bo) {
> @@ -1743,11 +1744,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
> flags |= AMDGPU_PTE_TMZ;
>
> bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
> + resv = bo->tbo.base.resv;
> } else {
> flags = 0x0;
> + resv = NULL;
> }
>
> - if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
> + if (clear || (bo && bo->tbo.base.resv ==
> + vm->root.base.bo->tbo.base.resv))
> last_update = &vm->last_update;
> else
> last_update = &bo_va->last_pt_update;
> @@ -1761,7 +1765,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
> }
>
> list_for_each_entry(mapping, &bo_va->invalids, list) {
> - r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
> + r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
> mapping, flags, bo_adev, nodes,
> last_update);
> if (r)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index d5613d184e99..8fb09648bc97 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -229,8 +229,8 @@ struct amdgpu_vm_update_params {
>
> struct amdgpu_vm_update_funcs {
> int (*map_table)(struct amdgpu_bo *bo);
> - int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
> - struct dma_fence *exclusive);
> + int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
> + enum amdgpu_sync_mode sync_mode);
> int (*update)(struct amdgpu_vm_update_params *p,
> struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
> unsigned count, uint32_t incr, uint64_t flags);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
> index 68b013be3837..0be72660db4c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
> @@ -44,26 +44,21 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
> * Returns:
> * Negativ errno, 0 for success.
> */
> -static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
> - struct dma_fence *exclusive)
> +static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
> + struct dma_resv *resv,
> + enum amdgpu_sync_mode sync_mode)
> {
> + struct amdgpu_sync sync;
> int r;
>
> - /* Wait for any BO move to be completed */
> - if (exclusive) {
> - r = dma_fence_wait(exclusive, true);
> - if (unlikely(r))
> - return r;
> - }
> -
> - /* Don't wait for submissions during page fault */
> - if (p->direct)
> + if (!resv)
> return 0;
>
> - /* Wait for PT BOs to be idle. PTs share the same resv. object
> - * as the root PD BO
> - */
> - return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
> + amdgpu_sync_create(&sync);
> + amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
> + r = amdgpu_sync_wait(&sync, true);
> + amdgpu_sync_free(&sync);
This looks like it's pretty much duplicating amdgpu_bo_sync_wait. In
fact, when I first wrote that function it was meant as
amdgpu_sync_wait_resv (and the comment above that function still
incorrectly has that name).
Regards,
Felix
> + return r;
> }
>
> /**
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> index 3b61317c0f08..e7a383134521 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> @@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
> * Negativ errno, 0 for success.
> */
> static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
> - void *owner, struct dma_fence *exclusive)
> + struct dma_resv *resv,
> + enum amdgpu_sync_mode sync_mode)
> {
> - struct amdgpu_bo *root = p->vm->root.base.bo;
> unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
> int r;
>
> @@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
>
> p->num_dw_left = ndw;
>
> - /* Wait for moves to be completed */
> - r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
> - if (r)
> - return r;
> -
> - /* Don't wait for any submissions during page fault handling */
> - if (p->direct)
> + if (!resv)
> return 0;
>
> - return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
> - AMDGPU_SYNC_NE_OWNER, owner);
> + return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
> }
>
> /**
More information about the amd-gfx
mailing list