[PATCH v3 4/7] drm/xe: Move vm bind bo validation to a helper function
Nirmoy Das
nirmoy.das at linux.intel.com
Tue Apr 16 13:32:04 UTC 2024
Hi Matt,
On 4/16/2024 2:55 AM, Matthew Brost wrote:
> On Mon, Apr 15, 2024 at 04:52:11PM +0200, Nirmoy Das wrote:
>> Move vm bind bo validation to a helper function to make the
>> xe_vm_bind_ioctl() more readable.
>>
> Change logs are helpful for reviewers but not going to hold up this
> patch.
Cover letter contains all the changes. I can also copy within the
patches from the next revision.
> With that:
> Reviewed-by: Matthew Brost <matthew.brost at intel.com>
Thanks,
Nirmoy
>
>> Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_vm.c | 77 +++++++++++++++++++++-----------------
>> 1 file changed, 43 insertions(+), 34 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index b1dcaa35b6cc..8380f1d23074 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -2872,6 +2872,46 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
>> return err;
>> }
>>
>> +static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
>> + u64 addr, u64 range, u64 obj_offset,
>> + u16 pat_index)
>> +{
>> + u16 coh_mode;
>> +
>> + if (XE_IOCTL_DBG(xe, range > bo->size) ||
>> + XE_IOCTL_DBG(xe, obj_offset >
>> + bo->size - range)) {
>> + return -EINVAL;
>> + }
>> +
>> + if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
>> + if (XE_IOCTL_DBG(xe, obj_offset &
>> + XE_64K_PAGE_MASK) ||
>> + XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
>> + XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
>> + return -EINVAL;
>> + }
>> + }
>> +
>> + coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
>> + if (bo->cpu_caching) {
>> + if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
>> + bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
>> + return -EINVAL;
>> + }
>> + } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
>> + /*
>> + * Imported dma-buf from a different device should
>> + * require 1way or 2way coherency since we don't know
>> + * how it was mapped on the CPU. Just assume is it
>> + * potentially cached on CPU side.
>> + */
>> + return -EINVAL;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>> {
>> struct xe_device *xe = to_xe_device(dev);
>> @@ -2955,7 +2995,6 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>> u32 obj = bind_ops[i].obj;
>> u64 obj_offset = bind_ops[i].obj_offset;
>> u16 pat_index = bind_ops[i].pat_index;
>> - u16 coh_mode;
>>
>> if (!obj)
>> continue;
>> @@ -2967,40 +3006,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>> }
>> bos[i] = gem_to_xe_bo(gem_obj);
>>
>> - if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
>> - XE_IOCTL_DBG(xe, obj_offset >
>> - bos[i]->size - range)) {
>> - err = -EINVAL;
>> - goto put_obj;
>> - }
>> -
>> - if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
>> - if (XE_IOCTL_DBG(xe, obj_offset &
>> - XE_64K_PAGE_MASK) ||
>> - XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
>> - XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
>> - err = -EINVAL;
>> - goto put_obj;
>> - }
>> - }
>> -
>> - coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
>> - if (bos[i]->cpu_caching) {
>> - if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
>> - bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
>> - err = -EINVAL;
>> - goto put_obj;
>> - }
>> - } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
>> - /*
>> - * Imported dma-buf from a different device should
>> - * require 1way or 2way coherency since we don't know
>> - * how it was mapped on the CPU. Just assume is it
>> - * potentially cached on CPU side.
>> - */
>> - err = -EINVAL;
>> + err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
>> + obj_offset, pat_index);
>> + if (err)
>> goto put_obj;
>> - }
>> }
>>
>> if (args->num_syncs) {
>> --
>> 2.42.0
>>
More information about the Intel-xe
mailing list