[PATCH 3/6] drm/amdgpu: reserve/unreserve vmid by vm ioctl v4
Zhang, Jerry (Junwei)
Jerry.Zhang at amd.com
Thu Apr 27 05:41:40 UTC 2017
On 04/27/2017 01:00 PM, Chunming Zhou wrote:
> add reserve/unreserve vmid funtions.
> v3:
> only reserve vmid from gfxhub
> v4:
> fix racy condition
>
> Change-Id: I5f80dc39dc9d44660a96a2b710b0dbb4d3b9039d
> Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 62 +++++++++++++++++++++++++++-------
> 1 file changed, 49 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 261aaea..e37421e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -546,6 +546,45 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> return r;
> }
>
> +static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
> + struct amdgpu_vm *vm,
> + unsigned vmhub)
> +{
> + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> +
> + mutex_lock(&id_mgr->lock);
> + if (vm->reserved_vmid[vmhub]) {
> + list_add(&vm->reserved_vmid[vmhub]->list,
> + &id_mgr->ids_lru);
> + vm->reserved_vmid[vmhub] = NULL;
> + }
> + mutex_unlock(&id_mgr->lock);
> +}
> +
> +static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
> + struct amdgpu_vm *vm,
> + unsigned vmhub)
> +{
> + struct amdgpu_vm_id_manager *id_mgr;
> + struct amdgpu_vm_id *idle;
> + int r = 0;
> +
> + id_mgr = &adev->vm_manager.id_mgr[vmhub];
> + mutex_lock(&id_mgr->lock);
> + if (vm->reserved_vmid[vmhub])
> + goto unlock;
> + /* Select the first entry VMID */
> + idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
> + list_del_init(&idle->list);
Could you give a hint how to handle the busy id, what if it's not idle when
allocating the reserved vmid?
Jerry
> + vm->reserved_vmid[vmhub] = idle;
> + mutex_unlock(&id_mgr->lock);
> +
> + return 0;
> +unlock:
> + mutex_unlock(&id_mgr->lock);
> + return r;
> +}
> +
> static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -2316,18 +2355,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>
> amdgpu_vm_free_levels(&vm->root);
> fence_put(vm->last_dir_update);
> - for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
> - struct amdgpu_vm_id_manager *id_mgr =
> - &adev->vm_manager.id_mgr[i];
> -
> - mutex_lock(&id_mgr->lock);
> - if (vm->reserved_vmid[i]) {
> - list_add(&vm->reserved_vmid[i]->list,
> - &id_mgr->ids_lru);
> - vm->reserved_vmid[i] = NULL;
> - }
> - mutex_unlock(&id_mgr->lock);
> - }
> + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
> + amdgpu_vm_free_reserved_vmid(adev, vm, i);
> }
>
> /**
> @@ -2400,11 +2429,18 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
> union drm_amdgpu_vm *args = data;
> struct amdgpu_device *adev = dev->dev_private;
> struct amdgpu_fpriv *fpriv = filp->driver_priv;
> + int r;
>
> switch (args->in.op) {
> case AMDGPU_VM_OP_RESERVE_VMID:
> + /* current, we only have requirement to reserve vmid from gfxhub */
> + r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
> + AMDGPU_GFXHUB);
> + if (r)
> + return r;
> + break;
> case AMDGPU_VM_OP_UNRESERVE_VMID:
> - return -EINVAL;
> + amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
> break;
> default:
> return -EINVAL;
>
More information about the amd-gfx
mailing list