[PATCH v4 13/30] drm/xe: Move ufence add to vm_bind_ioctl_ops_install_fences
Zeng, Oak
oak.zeng at intel.com
Tue Mar 26 20:59:13 UTC 2024
> -----Original Message-----
> From: Brost, Matthew <matthew.brost at intel.com>
> Sent: Tuesday, March 26, 2024 2:54 PM
> To: Zeng, Oak <oak.zeng at intel.com>
> Cc: intel-xe at lists.freedesktop.org
> Subject: Re: [PATCH v4 13/30] drm/xe: Move ufence add to
> vm_bind_ioctl_ops_install_fences
>
> On Mon, Mar 25, 2024 at 02:54:44PM -0600, Zeng, Oak wrote:
> > This patch makes sense to me. See two nit-pick inline
> >
> > > -----Original Message-----
> > > From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of
> Matthew
> > > Brost
> > > Sent: Friday, March 8, 2024 12:08 AM
> > > To: intel-xe at lists.freedesktop.org
> > > Cc: Brost, Matthew <matthew.brost at intel.com>
> > > Subject: [PATCH v4 13/30] drm/xe: Move ufence add to
> > > vm_bind_ioctl_ops_install_fences
> > >
> > > Rather than adding a ufence to a VMA in the bind function, add the
> > > ufence to all VMAs in the IOCTL that require binds in
> > > vm_bind_ioctl_ops_install_fences. This will help with the transition to
> > > job 1 per VM bind IOCTL.
> > >
> > > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > > ---
> > > drivers/gpu/drm/xe/xe_sync.c | 15 ++++++++++++
> > > drivers/gpu/drm/xe/xe_sync.h | 1 +
> > > drivers/gpu/drm/xe/xe_vm.c | 44 ++++++++++++++++++++++++++++++--
> ----
> > > 3 files changed, 53 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
> > > index 02c9577fe418..07aa65d9bcab 100644
> > > --- a/drivers/gpu/drm/xe/xe_sync.c
> > > +++ b/drivers/gpu/drm/xe/xe_sync.c
> > > @@ -343,6 +343,21 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync,
> int
> > > num_sync,
> > > return ERR_PTR(-ENOMEM);
> > > }
> > >
> > > +/**
> > > + * __xe_sync_ufence_get() - Get user fence from user fence
> > > + * @ufence: input user fence
> > > + *
> > > + * Get a user fence reference from user fence
> > > + *
> > > + * Return: xe_user_fence pointer with reference
> > > + */
> > > +struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence
> *ufence)
> > > +{
> > > + user_fence_get(ufence);
> > > +
> > > + return ufence;
> > > +}
> >
> > I wonder why this is made part of xe_sync. Isn't just a ufence get function? Can
> we drop _sync_ from the function name?
> >
> >
>
> Typically exported functions should have a prefix matching the header
> file name.
>
> e.g.
>
> xe_sync.h -> all functions should start with xe_sync_*
>
> In this case struct xe_user_fence is private date member to xe_sync.c
> (only define in that C file) and just an opaque pointer to the rest of
> the driver.
That makes sense. Thanks for explaining.
Oak
>
> > > +
> > > /**
> > > * xe_sync_ufence_get() - Get user fence from sync
> > > * @sync: input sync
> > > diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
> > > index 0fd0d51208e6..26e9ec9de1a8 100644
> > > --- a/drivers/gpu/drm/xe/xe_sync.h
> > > +++ b/drivers/gpu/drm/xe/xe_sync.h
> > > @@ -38,6 +38,7 @@ static inline bool xe_sync_is_ufence(struct
> xe_sync_entry
> > > *sync)
> > > return !!sync->ufence;
> > > }
> > >
> > > +struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence
> *ufence);
> > > struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
> > > void xe_sync_ufence_put(struct xe_user_fence *ufence);
> > > int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index 5767955529dd..5b93c71fc5e9 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -1810,17 +1810,10 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma
> *vma,
> > > struct xe_exec_queue *q,
> > > {
> > > struct dma_fence *fence;
> > > struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm,
> > > q);
> > > - struct xe_user_fence *ufence;
> > >
> > > xe_vm_assert_held(vm);
> > > xe_bo_assert_held(bo);
> > >
> > > - ufence = find_ufence_get(syncs, num_syncs);
> > > - if (vma->ufence && ufence)
> > > - xe_sync_ufence_put(vma->ufence);
> > > -
> > > - vma->ufence = ufence ?: vma->ufence;
> > > -
> > > if (immediate) {
> > > fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
> > > first_op, last_op);
> > > @@ -2822,21 +2815,58 @@ struct dma_fence *xe_vm_ops_execute(struct
> > > xe_vm *vm, struct xe_vma_ops *vops)
> > > return fence;
> > > }
> > >
> > > +static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence
> > > *ufence)
> > > +{
> > > + if (vma->ufence)
> > > + xe_sync_ufence_put(vma->ufence);
> >
> > Not sure where/when we introduced xe_sync_ufence_put, for me this can be
> renamed to xe_ufence_put
> >
>
> See above, I think the naming is correct. All of this is a matter of
> opinion, we don't have any offical style guidelines for Xe but we might
> want to think about writing some up / fixing Xe to conform while the
> driver is still relatively small.
>
> Matt
>
> > Oak
> >
> > > + vma->ufence = __xe_sync_ufence_get(ufence);
> > > +}
> > > +
> > > +static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
> > > + struct xe_user_fence *ufence)
> > > +{
> > > + switch (op->base.op) {
> > > + case DRM_GPUVA_OP_MAP:
> > > + vma_add_ufence(op->map.vma, ufence);
> > > + break;
> > > + case DRM_GPUVA_OP_REMAP:
> > > + if (op->remap.prev)
> > > + vma_add_ufence(op->remap.prev, ufence);
> > > + if (op->remap.next)
> > > + vma_add_ufence(op->remap.next, ufence);
> > > + break;
> > > + case DRM_GPUVA_OP_UNMAP:
> > > + break;
> > > + case DRM_GPUVA_OP_PREFETCH:
> > > + vma_add_ufence(gpuva_to_vma(op->base.prefetch.va),
> > > ufence);
> > > + break;
> > > + default:
> > > + drm_warn(&vm->xe->drm, "NOT POSSIBLE");
> > > + }
> > > +}
> > > +
> > > static void vm_bind_ioctl_ops_install_fences(struct xe_vm *vm,
> > > struct xe_vma_ops *vops,
> > > struct dma_fence *fence)
> > > {
> > > struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm,
> > > vops->q);
> > > + struct xe_user_fence *ufence;
> > > struct xe_vma_op *op;
> > > int i;
> > >
> > > + ufence = find_ufence_get(vops->syncs, vops->num_syncs);
> > > list_for_each_entry(op, &vops->list, link) {
> > > + if (ufence)
> > > + op_add_ufence(vm, op, ufence);
> > > +
> > > if (op->base.op == DRM_GPUVA_OP_UNMAP)
> > > xe_vma_destroy(gpuva_to_vma(op->base.unmap.va),
> > > fence);
> > > else if (op->base.op == DRM_GPUVA_OP_REMAP)
> > > xe_vma_destroy(gpuva_to_vma(op-
> > > >base.remap.unmap->va),
> > > fence);
> > > }
> > > + if (ufence)
> > > + xe_sync_ufence_put(ufence);
> > > for (i = 0; i < vops->num_syncs; i++)
> > > xe_sync_entry_signal(vops->syncs + i, NULL, fence);
> > > xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
> > > --
> > > 2.34.1
> >
More information about the Intel-xe
mailing list