[PATCH 1/6] drm/amdgpu: rework VM state machine lock handling v2

Zhang, Jerry Jerry.Zhang at amd.com
Sat May 19 08:00:27 UTC 2018


> Patch 6:
> I could read that code, but not sure the purpose.

on 2nd thought, perhaps going to reduce the movement for per vm bo for performance increasing.

However, the move lru tail in amdgpu_vm_validate_pt_bos() looks change the per vm bo order again.

IIRC, it expects relative stable order for per vm bo to get a better performance.

Please confirm that.

Regards,
Jerry(Junwei Zhang)

________________________________________
From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> on behalf of Zhang, Jerry (Junwei) <Jerry.Zhang at amd.com>
Sent: Friday, May 18, 2018 5:55:31 PM
To: Christian König; amd-gfx at lists.freedesktop.org
Subject: Re: [PATCH 1/6] drm/amdgpu: rework VM state machine lock handling v2

2, 3, 4, 5 are
Reviewed-by: Junwei Zhang <Jerry.Zhang at amd.com>

Patch 1:
could you show the reserving VM?

Patch 6:
I could read that code, but not sure the purpose.

Jerry

On 05/17/2018 05:49 PM, Christian König wrote:
> Only the moved state needs a separate spin lock protection. All other
> states are protected by reserving the VM anyway.
>
> v2: fix some more incorrect cases
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 +++++++++++-----------------------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 +--
>   2 files changed, 21 insertions(+), 49 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 1a8f4e0dd023..f0deedcaf1c9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
>        * is currently evicted. add the bo to the evicted list to make sure it
>        * is validated on next vm use to avoid fault.
>        * */
> -     spin_lock(&vm->status_lock);
>       list_move_tail(&base->vm_status, &vm->evicted);
> -     spin_unlock(&vm->status_lock);
>   }
>
>   /**
> @@ -228,7 +226,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>       struct ttm_bo_global *glob = adev->mman.bdev.glob;
>       int r;
>
> -     spin_lock(&vm->status_lock);
>       while (!list_empty(&vm->evicted)) {
>               struct amdgpu_vm_bo_base *bo_base;
>               struct amdgpu_bo *bo;
> @@ -236,10 +233,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>               bo_base = list_first_entry(&vm->evicted,
>                                          struct amdgpu_vm_bo_base,
>                                          vm_status);
> -             spin_unlock(&vm->status_lock);
>
>               bo = bo_base->bo;
> -             BUG_ON(!bo);
>               if (bo->parent) {
>                       r = validate(param, bo);
>                       if (r)
> @@ -259,13 +254,14 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>                               return r;
>               }
>
> -             spin_lock(&vm->status_lock);
> -             if (bo->tbo.type != ttm_bo_type_kernel)
> +             if (bo->tbo.type != ttm_bo_type_kernel) {
> +                     spin_lock(&vm->moved_lock);
>                       list_move(&bo_base->vm_status, &vm->moved);
> -             else
> +                     spin_unlock(&vm->moved_lock);
> +             } else {
>                       list_move(&bo_base->vm_status, &vm->relocated);
> +             }
>       }
> -     spin_unlock(&vm->status_lock);
>
>       return 0;
>   }
> @@ -279,13 +275,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>    */
>   bool amdgpu_vm_ready(struct amdgpu_vm *vm)
>   {
> -     bool ready;
> -
> -     spin_lock(&vm->status_lock);
> -     ready = list_empty(&vm->evicted);
> -     spin_unlock(&vm->status_lock);
> -
> -     return ready;
> +     return list_empty(&vm->evicted);
>   }
>
>   /**
> @@ -477,9 +467,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
>                       pt->parent = amdgpu_bo_ref(parent->base.bo);
>
>                       amdgpu_vm_bo_base_init(&entry->base, vm, pt);
> -                     spin_lock(&vm->status_lock);
>                       list_move(&entry->base.vm_status, &vm->relocated);
> -                     spin_unlock(&vm->status_lock);
>               }
>
>               if (level < AMDGPU_VM_PTB) {
> @@ -926,10 +914,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
>               if (!entry->base.bo)
>                       continue;
>
> -             spin_lock(&vm->status_lock);
>               if (list_empty(&entry->base.vm_status))
>                       list_add(&entry->base.vm_status, &vm->relocated);
> -             spin_unlock(&vm->status_lock);
>               amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
>       }
>   }
> @@ -974,7 +960,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
>               params.func = amdgpu_vm_do_set_ptes;
>       }
>
> -     spin_lock(&vm->status_lock);
>       while (!list_empty(&vm->relocated)) {
>               struct amdgpu_vm_bo_base *bo_base, *parent;
>               struct amdgpu_vm_pt *pt, *entry;
> @@ -984,13 +969,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
>                                          struct amdgpu_vm_bo_base,
>                                          vm_status);
>               list_del_init(&bo_base->vm_status);
> -             spin_unlock(&vm->status_lock);
>
>               bo = bo_base->bo->parent;
> -             if (!bo) {
> -                     spin_lock(&vm->status_lock);
> +             if (!bo)
>                       continue;
> -             }
>
>               parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
>                                         bo_list);
> @@ -999,12 +981,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
>
>               amdgpu_vm_update_pde(&params, vm, pt, entry);
>
> -             spin_lock(&vm->status_lock);
>               if (!vm->use_cpu_for_update &&
>                   (ndw - params.ib->length_dw) < 32)
>                       break;
>       }
> -     spin_unlock(&vm->status_lock);
>
>       if (vm->use_cpu_for_update) {
>               /* Flush HDP */
> @@ -1107,9 +1087,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
>               if (entry->huge) {
>                       /* Add the entry to the relocated list to update it. */
>                       entry->huge = false;
> -                     spin_lock(&p->vm->status_lock);
>                       list_move(&entry->base.vm_status, &p->vm->relocated);
> -                     spin_unlock(&p->vm->status_lock);
>               }
>               return;
>       }
> @@ -1588,8 +1566,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>               amdgpu_asic_flush_hdp(adev, NULL);
>       }
>
> -     spin_lock(&vm->status_lock);
> +     spin_lock(&vm->moved_lock);
>       list_del_init(&bo_va->base.vm_status);
> +     spin_unlock(&vm->moved_lock);
>
>       /* If the BO is not in its preferred location add it back to
>        * the evicted list so that it gets validated again on the
> @@ -1599,7 +1578,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>           !(bo->preferred_domains &
>           amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
>               list_add_tail(&bo_va->base.vm_status, &vm->evicted);
> -     spin_unlock(&vm->status_lock);
>
>       list_splice_init(&bo_va->invalids, &bo_va->valids);
>       bo_va->cleared = clear;
> @@ -1811,14 +1789,14 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
>       bool clear;
>       int r = 0;
>
> -     spin_lock(&vm->status_lock);
> +     spin_lock(&vm->moved_lock);
>       while (!list_empty(&vm->moved)) {
>               struct amdgpu_bo_va *bo_va;
>               struct reservation_object *resv;
>
>               bo_va = list_first_entry(&vm->moved,
>                       struct amdgpu_bo_va, base.vm_status);
> -             spin_unlock(&vm->status_lock);
> +             spin_unlock(&vm->moved_lock);
>
>               resv = bo_va->base.bo->tbo.resv;
>
> @@ -1839,9 +1817,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
>               if (!clear && resv != vm->root.base.bo->tbo.resv)
>                       reservation_object_unlock(resv);
>
> -             spin_lock(&vm->status_lock);
> +             spin_lock(&vm->moved_lock);
>       }
> -     spin_unlock(&vm->status_lock);
> +     spin_unlock(&vm->moved_lock);
>
>       return r;
>   }
> @@ -1903,10 +1881,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
>               amdgpu_vm_prt_get(adev);
>
>       if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
> -             spin_lock(&vm->status_lock);
> +             spin_lock(&vm->moved_lock);
>               if (list_empty(&bo_va->base.vm_status))
>                       list_add(&bo_va->base.vm_status, &vm->moved);
> -             spin_unlock(&vm->status_lock);
> +             spin_unlock(&vm->moved_lock);
>       }
>       trace_amdgpu_vm_bo_map(bo_va, mapping);
>   }
> @@ -2216,9 +2194,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
>
>       list_del(&bo_va->base.bo_list);
>
> -     spin_lock(&vm->status_lock);
> +     spin_lock(&vm->moved_lock);
>       list_del(&bo_va->base.vm_status);
> -     spin_unlock(&vm->status_lock);
> +     spin_unlock(&vm->moved_lock);
>
>       list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
>               list_del(&mapping->list);
> @@ -2261,28 +2239,24 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
>
>               bo_base->moved = true;
>               if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
> -                     spin_lock(&bo_base->vm->status_lock);
>                       if (bo->tbo.type == ttm_bo_type_kernel)
>                               list_move(&bo_base->vm_status, &vm->evicted);
>                       else
>                               list_move_tail(&bo_base->vm_status,
>                                              &vm->evicted);
> -                     spin_unlock(&bo_base->vm->status_lock);
>                       continue;
>               }
>
>               if (bo->tbo.type == ttm_bo_type_kernel) {
> -                     spin_lock(&bo_base->vm->status_lock);
>                       if (list_empty(&bo_base->vm_status))
>                               list_add(&bo_base->vm_status, &vm->relocated);
> -                     spin_unlock(&bo_base->vm->status_lock);
>                       continue;
>               }
>
> -             spin_lock(&bo_base->vm->status_lock);
> +             spin_lock(&bo_base->vm->moved_lock);
>               if (list_empty(&bo_base->vm_status))
>                       list_add(&bo_base->vm_status, &vm->moved);
> -             spin_unlock(&bo_base->vm->status_lock);
> +             spin_unlock(&bo_base->vm->moved_lock);
>       }
>   }
>
> @@ -2391,9 +2365,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>       vm->va = RB_ROOT_CACHED;
>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>               vm->reserved_vmid[i] = NULL;
> -     spin_lock_init(&vm->status_lock);
>       INIT_LIST_HEAD(&vm->evicted);
>       INIT_LIST_HEAD(&vm->relocated);
> +     spin_lock_init(&vm->moved_lock);
>       INIT_LIST_HEAD(&vm->moved);
>       INIT_LIST_HEAD(&vm->freed);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index d6827083572a..0196b9a782f2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -168,9 +168,6 @@ struct amdgpu_vm {
>       /* tree of virtual addresses mapped */
>       struct rb_root_cached   va;
>
> -     /* protecting invalidated */
> -     spinlock_t              status_lock;
> -
>       /* BOs who needs a validation */
>       struct list_head        evicted;
>
> @@ -179,6 +176,7 @@ struct amdgpu_vm {
>
>       /* BOs moved, but not yet updated in the PT */
>       struct list_head        moved;
> +     spinlock_t              moved_lock;
>
>       /* BO mappings freed, but not yet updated in the PT */
>       struct list_head        freed;
>
_______________________________________________
amd-gfx mailing list
amd-gfx at lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


More information about the amd-gfx mailing list