[PATCH 11/15] drm/xe: Convert xe_dma_buf.c for exhaustive eviction
Thomas Hellström
thomas.hellstrom at linux.intel.com
Fri Aug 15 15:05:27 UTC 2025
On Wed, 2025-08-13 at 14:37 -0700, Matthew Brost wrote:
> On Wed, Aug 13, 2025 at 12:51:17PM +0200, Thomas Hellström wrote:
> > Convert dma-buf migration to XE_PL_TT and dma-buf import to
> > support exhaustive eviction, using xe_validation_guard().
> > It seems unlikely that the import would result in an -ENOMEM,
> > but convert import anyway for completeness.
> >
> > The dma-buf map_attachment() functionality unfortunately doesn't
> > support passing a drm_exec, which means that foreign devices
> > validating a dma-buf that we exported will not, unless they are
> > xeKMD devices, participate in the exhaustive eviction scheme.
> >
> > Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_dma_buf.c | 59 +++++++++++++++++++++++------
> > ----
> > 1 file changed, 42 insertions(+), 17 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c
> > b/drivers/gpu/drm/xe/xe_dma_buf.c
> > index 78a827d4e726..56df1d84df21 100644
> > --- a/drivers/gpu/drm/xe/xe_dma_buf.c
> > +++ b/drivers/gpu/drm/xe/xe_dma_buf.c
> > @@ -163,16 +163,27 @@ static int xe_dma_buf_begin_cpu_access(struct
> > dma_buf *dma_buf,
> > struct xe_bo *bo = gem_to_xe_bo(obj);
> > bool reads = (direction == DMA_BIDIRECTIONAL ||
> > direction == DMA_FROM_DEVICE);
> > - struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
> > + struct xe_validation_ctx ctx;
> > + struct drm_exec exec;
> > + int ret = 0;
> >
> > if (!reads)
> > return 0;
> >
> > /* Can we do interruptible lock here? */
> > - xe_bo_lock(bo, false);
> > - (void)xe_bo_migrate(bo, XE_PL_TT, exec);
> > - xe_bo_unlock(bo);
> > -
> > + xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec,
> > 0, ret, false) {
> > + ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
> > + drm_exec_retry_on_contention(&exec);
> > + if (ret)
> > + goto out;
> > +
> > + ret = xe_bo_migrate(bo, XE_PL_TT, &exec);
> > + drm_exec_retry_on_contention(&exec);
> > + xe_validation_retry_on_oom(&ctx, &ret);
> > + }
> > +out:
> > + /* If we failed, cpu-access takes place in current
> > placement. */
> > + (void)ret;
>
> Do you need the above line of code? I don't see this often in kernel
> code.
It's merely to annotate that we don't care about the returned value.
But I can remove it.
/Thomas
>
> Nit aside, patch LGTM.
>
> Matt
>
> > return 0;
> > }
> >
> > @@ -211,24 +222,38 @@ xe_dma_buf_init_obj(struct drm_device *dev,
> > struct xe_bo *storage,
> > {
> > struct dma_resv *resv = dma_buf->resv;
> > struct xe_device *xe = to_xe_device(dev);
> > - struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
> > + struct xe_validation_ctx ctx;
> > + struct drm_gem_object *dummy_obj;
> > + struct drm_exec exec;
> > struct xe_bo *bo;
> > - int ret;
> > -
> > - dma_resv_lock(resv, NULL);
> > - bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL,
> > dma_buf->size,
> > - 0, /* Will require 1way or
> > 2way for vm_bind */
> > - ttm_bo_type_sg,
> > XE_BO_FLAG_SYSTEM, exec);
> > - if (IS_ERR(bo)) {
> > - ret = PTR_ERR(bo);
> > - goto error;
> > + int ret = 0;
> > +
> > + dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
> > + if (!dummy_obj)
> > + return ERR_PTR(-ENOMEM);
> > +
> > + dummy_obj->resv = resv;
> > + xe_validation_guard(&ctx, &xe->val, &exec, 0, ret, false)
> > {
> > + ret = drm_exec_lock_obj(&exec, dummy_obj);
> > + drm_exec_retry_on_contention(&exec);
> > + if (ret)
> > + goto error;
> > +
> > + bo = ___xe_bo_create_locked(xe, storage, NULL,
> > resv, NULL, dma_buf->size,
> > + 0, /* Will require
> > 1way or 2way for vm_bind */
> > + ttm_bo_type_sg,
> > XE_BO_FLAG_SYSTEM, &exec);
> > + drm_exec_retry_on_contention(&exec);
> > + if (IS_ERR(bo)) {
> > + ret = PTR_ERR(bo);
> > + xe_validation_retry_on_oom(&ctx, &ret);
> > + goto error;
> > + }
> > }
> > - dma_resv_unlock(resv);
> > + drm_gem_object_put(dummy_obj);
> >
> > return &bo->ttm.base;
> >
> > error:
> > - dma_resv_unlock(resv);
> > return ERR_PTR(ret);
> > }
> >
> > --
> > 2.50.1
> >
More information about the Intel-xe
mailing list