[PATCH 13/13] drm/amdgpu: add AMDGPU_VM_OP_ENABLE_SVM IOCTL
Christian König
ckoenig.leichtzumerken at gmail.com
Tue Jan 30 14:02:09 UTC 2018
Am 29.01.2018 um 23:27 schrieb Felix Kuehling:
> Enabling SVM after the VM has been created and the PASID allocated is
> problematic because the IOMMU can support a smaller range of PASIDs than
> the GPU. Ideally SVM would be a flag during VM creation, but I see that
> doesn't work as it's done in amdgpu_driver_open_kms, not in an ioctl.
>
> Could the PASID be changed on an existing VM if necessary?
Yeah, that shouldn't be much of a problem.
Another issue is that the VM can potentially be created by the X server,
but then used by the client with DRI3.
So we would always need a separate IOCTL to note to which process a VM
should bind.
Regards,
Christian.
>
> One more comment inline ...
>
> On 2018-01-26 03:13 PM, Christian König wrote:
>> Add an IOCTL to enable SVM for the current process.
>>
>> One step further towards HMM support.
>>
>> Signed-off-by: Christian König <christian.koenig at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +-
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 94 +++++++++++++++++++++++++++++++--
>> include/uapi/drm/amdgpu_drm.h | 1 +
>> 3 files changed, 94 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index b18920007624..2f424f8248a9 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -897,6 +897,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
>> struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
>> struct amdgpu_bo_list *list;
>> struct amdgpu_bo *pd;
>> + struct pci_dev *pdev;
>> unsigned int pasid;
>> int handle;
>>
>> @@ -923,11 +924,12 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
>> }
>>
>> pasid = fpriv->vm.pasid;
>> + pdev = fpriv->vm.pte_support_ats ? adev->pdev : NULL;
>> pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
>>
>> amdgpu_vm_fini(adev, &fpriv->vm);
>> if (pasid)
>> - amdgpu_pasid_free_delayed(pd->tbo.resv, NULL, pasid);
>> + amdgpu_pasid_free_delayed(pd->tbo.resv, pdev, pasid);
>> amdgpu_bo_unref(&pd);
>>
>> idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 5e53b7a2d4d5..84f41385677c 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -257,6 +257,24 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
>> return ready;
>> }
>>
>> +/**
>> + * amdgpu_vm_root_ats_entries - number of ATS entries in the root PD
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Returns number of entries in the root PD which should be initialized for ATS
>> + * use.
>> + */
>> +static unsigned amdgpu_vm_root_ats_entries(struct amdgpu_device *adev)
>> +{
>> + unsigned level = adev->vm_manager.root_level;
>> + unsigned shift;
>> +
>> + shift = amdgpu_vm_level_shift(adev, level);
>> + shift += AMDGPU_GPU_PAGE_SHIFT;
>> + return AMDGPU_VA_HOLE_START >> shift;
>> +}
>> +
>> /**
>> * amdgpu_vm_clear_bo - initially clear the PDs/PTs
>> *
>> @@ -283,9 +301,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
>>
>> if (pte_support_ats) {
>> if (level == adev->vm_manager.root_level) {
>> - ats_entries = amdgpu_vm_level_shift(adev, level);
>> - ats_entries += AMDGPU_GPU_PAGE_SHIFT;
>> - ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
>> + ats_entries = amdgpu_vm_root_ats_entries(adev);
>> ats_entries = min(ats_entries, entries);
>> entries -= ats_entries;
>> } else {
>> @@ -329,6 +345,9 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
>>
>> amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>>
>> + amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
>> + AMDGPU_FENCE_OWNER_VM, false);
>> +
>> WARN_ON(job->ibs[0].length_dw > 64);
>> r = amdgpu_job_submit(job, ring, &vm->entity,
>> AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
>> @@ -2557,6 +2576,71 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
>> return true;
>> }
>>
>> +/**
>> + * amdgpu_vm_enable_svm - enable SVM
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @vm: VM to enable SVM
>> + *
>> + * Initialize SVM.
>> + */
>> +int amdgpu_vm_enable_svm(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>> +{
>> + int r;
>> +
>> + if (!vm->pasid)
>> + return -ENODEV;
>> +
>> + r = amdgpu_bo_reserve(vm->root.base.bo, false);
>> + if (r)
>> + return r;
>> +
>> + if (vm->pte_support_ats) {
>> + r = -EALREADY;
>> + goto error_unlock;
>> + }
>> +
>> + if (vm->root.entries) {
>> + unsigned i, entries;
>> +
>> + entries = amdgpu_vm_root_ats_entries(adev);
>> + for (i = 0; i < entries; ++i) {
>> + if (vm->root.entries[i].base.bo) {
>> + r = -EEXIST;
>> + goto error_unlock;
>> + }
>> + }
>> +
>> + entries = amdgpu_bo_size(vm->root.base.bo) / 8;
>> + spin_lock(&vm->status_lock);
>> + for (; i < entries; ++i) {
>> + struct amdgpu_vm_pt *pt = &vm->root.entries[i];
>> +
>> + if (pt->base.bo)
>> + list_move(&pt->base.vm_status, &vm->moved);
> I think this is only necessary because you clear the whole root PD BO
> with amdgpu_vm_clear_bo. But could that function be more selective and
> update only the clear the ATS entries? Maybe with an extra parameter?
>
> Regards,
> Felix
>
>> + }
>> + spin_unlock(&vm->status_lock);
>> + }
>> +
>> + r = amdgpu_pasid_bind(adev->pdev, vm->pasid);
>> + if (r)
>> + goto error_unlock;
>> +
>> + r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
>> + adev->vm_manager.root_level,
>> + true);
>> + if (r) {
>> + amdgpu_pasid_unbind(adev->pdev, vm->pasid);
>> + goto error_unlock;
>> + }
>> +
>> + vm->pte_support_ats = true;
>> +
>> +error_unlock:
>> + amdgpu_bo_unreserve(vm->root.base.bo);
>> + return r;
>> +}
>> +
>> /**
>> * amdgpu_vm_manager_init - init the VM manager
>> *
>> @@ -2616,9 +2700,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
>>
>> int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>> {
>> - union drm_amdgpu_vm *args = data;
>> struct amdgpu_device *adev = dev->dev_private;
>> struct amdgpu_fpriv *fpriv = filp->driver_priv;
>> + union drm_amdgpu_vm *args = data;
>> int r;
>>
>> switch (args->in.op) {
>> @@ -2631,6 +2715,8 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>> case AMDGPU_VM_OP_UNRESERVE_VMID:
>> amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
>> break;
>> + case AMDGPU_VM_OP_ENABLE_SVM:
>> + return amdgpu_vm_enable_svm(adev, &fpriv->vm);
>> default:
>> return -EINVAL;
>> }
>> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
>> index fe17b6785441..c5b13ebe8dfc 100644
>> --- a/include/uapi/drm/amdgpu_drm.h
>> +++ b/include/uapi/drm/amdgpu_drm.h
>> @@ -223,6 +223,7 @@ union drm_amdgpu_ctx {
>> /* vm ioctl */
>> #define AMDGPU_VM_OP_RESERVE_VMID 1
>> #define AMDGPU_VM_OP_UNRESERVE_VMID 2
>> +#define AMDGPU_VM_OP_ENABLE_SVM 3
>>
>> struct drm_amdgpu_vm_in {
>> /** AMDGPU_VM_OP_* */
More information about the amd-gfx
mailing list