[PATCH v6 04/26] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag
Matthew Brost
matthew.brost at intel.com
Fri Aug 8 05:20:56 UTC 2025
On Thu, Aug 07, 2025 at 10:13:16PM +0530, Himal Prasad Ghimiray wrote:
> - DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE: This flag is used by
> drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
> user-provided range and split the existing non-GEM object VMA if the
> start or end of the input range lies within it. The operations can
> create up to 2 REMAPS and 2 MAPs. The purpose of this operation is to be
> used by the Xe driver to assign attributes to GPUVMA's within the
> user-defined range. Unlike drm_gpuvm_sm_map_ops_flags in default mode,
> the operation with this flag will never have UNMAPs and
> merges, and can be without any final operations.
>
> v2
> - use drm_gpuvm_sm_map_ops_create with flags instead of defining new
> ops_create (Danilo)
> - Add doc (Danilo)
>
> v3
> - Fix doc
> - Fix unmapping check
>
> v4
> - Fix mapping for non madvise ops
>
> v5
> - Fix mapping (Matthew Brost)
> - Rebase on top of struct changes
>
> v6
> - flag moved to map_req
>
I’ll give this an RB—it looks right to me, though it’s a bit hard to be
certain. Before merge, I’d like to see xe_exec_system_allocator add a
section(s) that calls madvise() on each newly allocated memory; that should
creatd enough random fragmentation—particularly with threaded
sections—in the VMA state to be confident this is correct.
With that:
Reviewed-by: Matthew Brost matthew.brost at intel.com
> Cc: Danilo Krummrich <dakr at kernel.org>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Cc: Boris Brezillon <bbrezillon at kernel.org>
> Cc: <dri-devel at lists.freedesktop.org>
> Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray at intel.com>
> ---
> drivers/gpu/drm/drm_gpuvm.c | 87 +++++++++++++++++++++++++++++++------
> include/drm/drm_gpuvm.h | 11 +++++
> 2 files changed, 84 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index b3a01c40001b..d8f5f594a415 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -2110,6 +2110,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> {
> struct drm_gpuva *va, *next;
> u64 req_end = req->op_map.va.addr + req->op_map.va.range;
> + bool is_madvise_ops = (req->flags & DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE);
> + bool needs_map = !is_madvise_ops;
> int ret;
>
> if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->op_map.va.addr, req->op_map.va.range)))
> @@ -2122,26 +2124,35 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> u64 range = va->va.range;
> u64 end = addr + range;
> bool merge = !!va->gem.obj;
> + bool skip_madvise_ops = is_madvise_ops && merge;
>
> + needs_map = !is_madvise_ops;
> if (addr == req->op_map.va.addr) {
> merge &= obj == req->op_map.gem.obj &&
> offset == req->op_map.gem.offset;
>
> if (end == req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> break;
> }
>
> if (end < req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> continue;
> }
>
> if (end > req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> .va.range = range - req->op_map.va.range,
> @@ -2156,6 +2167,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, NULL, &n, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops)
> + needs_map = true;
> break;
> }
> } else if (addr < req->op_map.va.addr) {
> @@ -2173,20 +2187,45 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> u.keep = merge;
>
> if (end == req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> ret = op_remap_cb(ops, priv, &p, NULL, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops)
> + needs_map = true;
> +
> break;
> }
>
> if (end < req_end) {
> + if (skip_madvise_ops)
> + continue;
> +
> ret = op_remap_cb(ops, priv, &p, NULL, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops) {
> + struct drm_gpuvm_map_req map_req = {
> + .op_map.va.addr = req->op_map.va.addr,
> + .op_map.va.range = end - req->op_map.va.addr,
> + };
> +
> + ret = op_map_cb(ops, priv, &map_req);
> + if (ret)
> + return ret;
> + }
> +
> continue;
> }
>
> if (end > req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> .va.range = end - req_end,
> @@ -2198,6 +2237,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, &p, &n, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops)
> + needs_map = true;
> break;
> }
> } else if (addr > req->op_map.va.addr) {
> @@ -2206,20 +2248,29 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> (addr - req->op_map.va.addr);
>
> if (end == req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> +
> break;
> }
>
> if (end < req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> +
> continue;
> }
>
> if (end > req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> .va.range = end - req_end,
> @@ -2234,12 +2285,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, NULL, &n, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops) {
> + struct drm_gpuvm_map_req map_req = {
> + .op_map.va.addr = addr,
> + .op_map.va.range = req_end - addr,
> + };
> +
> + return op_map_cb(ops, priv, &map_req);
> + }
> break;
> }
> }
> }
> -
> - return op_map_cb(ops, priv, req);
> + return needs_map ? op_map_cb(ops, priv, req) : 0;
> }
>
> static int
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 116f77abd570..fa2b74a54534 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -1054,6 +1054,17 @@ enum drm_gpuvm_sm_map_ops_flags {
> * %DRM_GPUVM_SM_MAP_OPS_FLAG_NONE: DEFAULT sm_map ops
> */
> DRM_GPUVM_SM_MAP_OPS_FLAG_NONE = 0,
> +
> + /**
> + * @DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE: This flag is used by
> + * drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
> + * user-provided range and split the existing non-GEM object VMA if the
> + * start or end of the input range lies within it. The operations can
> + * create up to 2 REMAPS and 2 MAPs. Unlike drm_gpuvm_sm_map_ops_flags
> + * in default mode, the operation with this flag will never have UNMAPs
> + * and merges, and can be without any final operations.
> + */
> + DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE = BIT(0),
> };
>
> /**
> --
> 2.34.1
>
More information about the dri-devel
mailing list