[PATCH 3/6] drm/amdgpu: reserve vmid by vm ioctl
Christian König
deathsimple at vodafone.de
Wed Apr 26 08:51:04 UTC 2017
Am 26.04.2017 um 09:09 schrieb Zhang, Jerry (Junwei):
>
>
> On 04/24/2017 01:57 PM, Chunming Zhou wrote:
>> Change-Id: I5f80dc39dc9d44660a96a2b710b0dbb4d3b9039d
>> Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 56
>> ++++++++++++++++++++++++++++++++++
>> 1 file changed, 56 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index acf9102..5f4dcc9 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -397,6 +397,17 @@ static bool amdgpu_vm_had_gpu_reset(struct
>> amdgpu_device *adev,
>> atomic_read(&adev->gpu_reset_counter);
>> }
>>
>> +static bool amdgpu_vm_dedicated_vmid_ready(struct amdgpu_vm *vm)
>> +{
>> + unsigned vmhub;
>> +
>> + for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) {
>> + if (!vm->dedicated_vmid[vmhub])
>> + return false;
>
> Just note:
> Seemingly for pre-gmcv9, it will always allocate one more
> dedicated/reserved vmid for the 2nd un-used vmhub.
> anyway it will not be used either.
It's even worse. IIRC we don't initialize the 2nd hub on pre-gmcv9. So
that could work with uninitialized data.
Christian.
>
> Jerry
>> + }
>> + return true;
>> +}
>> +
>> /**
>> * amdgpu_vm_grab_id - allocate the next free VMID
>> *
>> @@ -546,6 +557,45 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm,
>> struct amdgpu_ring *ring,
>> return r;
>> }
>>
>> +static int amdgpu_vm_alloc_dedicated_vmid(struct amdgpu_device *adev,
>> + struct amdgpu_vm *vm)
>> +{
>> + struct amdgpu_vm_id_manager *id_mgr;
>> + struct amdgpu_vm_id *idle;
>> + unsigned vmhub;
>> + int r;
>> +
>> + for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) {
>> + id_mgr = &adev->vm_manager.id_mgr[vmhub];
>> +
>> + mutex_lock(&id_mgr->lock);
>> + /* Select the first entry VMID */
>> + idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id,
>> + list);
>> + list_del_init(&idle->list);
>> + vm->dedicated_vmid[vmhub] = idle;
>> + mutex_unlock(&id_mgr->lock);
>> +
>> + r = amdgpu_sync_wait(&idle->active);
>> + if (r)
>> + goto err;
>> + }
>> +
>> + return 0;
>> +err:
>> + for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) {
>> + id_mgr = &adev->vm_manager.id_mgr[vmhub];
>> +
>> + mutex_lock(&id_mgr->lock);
>> + if (vm->dedicated_vmid[vmhub])
>> + list_add(&vm->dedicated_vmid[vmhub]->list,
>> + &id_mgr->ids_lru);
>> + vm->dedicated_vmid[vmhub] = NULL;
>> + mutex_unlock(&id_mgr->lock);
>> + }
>> + return r;
>> +}
>> +
>> static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring
>> *ring)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -2379,9 +2429,15 @@ int amdgpu_vm_ioctl(struct drm_device *dev,
>> void *data, struct drm_file *filp)
>> union drm_amdgpu_vm *args = data;
>> struct amdgpu_device *adev = dev->dev_private;
>> struct amdgpu_fpriv *fpriv = filp->driver_priv;
>> + int r;
>>
>> switch (args->in.op) {
>> case AMDGPU_VM_OP_RESERVE_VMID:
>> + if (!amdgpu_vm_dedicated_vmid_ready(&fpriv->vm)) {
>> + r = amdgpu_vm_alloc_dedicated_vmid(adev, &fpriv->vm);
>
> Could you change the return value in this ready checking func?
> Usually we can easily get to know this kind of things.
> if (sth_ready())
> do_sth();
>
>
>> + if (r)
>> + return r;
>> + }
>> break;
>> default:
>> return -EINVAL;
>>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
More information about the amd-gfx
mailing list