[RFC 13/29] drm/gpuvm: Introduce MADVISE Operations
Ghimiray, Himal Prasad
himal.prasad.ghimiray at intel.com
Fri Mar 14 08:46:12 UTC 2025
On 14-03-2025 13:32, Himal Prasad Ghimiray wrote:
> Introduce MADVISE operations that do not unmap the GPU VMA. These
> operations split VMAs if the start or end addresses fall within existing
> VMAs. The operations can create up to 2 REMAPS and 2 MAPs.
>
> If the input range is within the existing range, it creates REMAP:UNMAP,
> REMAP:PREV, REMAP:NEXT, and MAP operations for the input range.
> Example:
> Input Range: 0x00007f0a54000000 to 0x00007f0a54400000
> GPU VMA: 0x0000000000000000 to 0x0000800000000000
> Operations Result:
> - REMAP:UNMAP: addr=0x0000000000000000, range=0x0000800000000000
> - REMAP:PREV: addr=0x0000000000000000, range=0x00007f0a54000000
> - REMAP:NEXT: addr=0x00007f0a54400000, range=0x000000f5abc00000
> - MAP: addr=0x00007f0a54000000, range=0x0000000000400000
>
> If the input range starts at the beginning of one GPU VMA and ends at
> the end of another VMA, covering multiple VMAs, the operations do nothing.
> Example:
> Input Range: 0x00007fc898800000 to 0x00007fc899000000
> GPU VMAs:
> - 0x0000000000000000 to 0x00007fc898800000
> - 0x00007fc898800000 to 0x00007fc898a00000
> - 0x00007fc898a00000 to 0x00007fc898c00000
> - 0x00007fc898c00000 to 0x00007fc899000000
> - 0x00007fc899000000 to 0x00007fc899200000
> Operations Result: None
>
> Cc: Danilo Krummrich <dakr at redhat.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
> drivers/gpu/drm/drm_gpuvm.c | 175 +++++++++++++++++++++++++++++++++++-
> include/drm/drm_gpuvm.h | 6 ++
> 2 files changed, 180 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index f9eb56f24bef..904a26641b21 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -2230,7 +2230,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, NULL, &n, &u);
> if (ret)
> return ret;
> - break;
> + return 0;
> - break;
> + return 0;
Incorrectly left in patch, ignore this part.
> }
> }
> }
> @@ -2240,6 +2240,143 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> req_obj, req_offset);
> }
>
> +static int
> +__drm_gpuvm_skip_split_map(struct drm_gpuvm *gpuvm,
> + const struct drm_gpuvm_ops *ops, void *priv,
> + u64 req_addr, u64 req_range,
> + bool skip_gem_obj_va, u64 req_offset)
> +{
> + struct drm_gpuva *va, *next;
> + u64 req_end = req_addr + req_range;
> + int ret;
> +
> + if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
> + return -EINVAL;
> +
> + drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
> + struct drm_gem_object *obj = va->gem.obj;
> + u64 offset = va->gem.offset;
> + u64 addr = va->va.addr;
> + u64 range = va->va.range;
> + u64 end = addr + range;
> +
> + if (addr == req_addr) {
> + if (end == req_end)
> + return 0;
> +
> + if (end < req_end)
> + continue;
> +
> + if (end > req_end) {
> + if (skip_gem_obj_va && !!obj)
> + return 0;
> +
> + struct drm_gpuva_op_map n = {
> + .va.addr = req_end,
> + .va.range = range - req_range,
> + .gem.obj = obj,
> + .gem.offset = offset + req_range,
> + };
> + struct drm_gpuva_op_unmap u = {
> + .va = va,
> + .keep = false,
> + };
> +
> + ret = op_remap_cb(ops, priv, NULL, &n, &u);
> + if (ret)
> + return ret;
> +
> + break;
> + }
> + } else if (addr < req_addr) {
> + u64 ls_range = req_addr - addr;
> + struct drm_gpuva_op_map p = {
> + .va.addr = addr,
> + .va.range = ls_range,
> + .gem.obj = obj,
> + .gem.offset = offset,
> + };
> + struct drm_gpuva_op_unmap u = { .va = va, .keep = false, };
> +
> + if (end == req_end) {
> + if (skip_gem_obj_va && !!obj)
> + return 0;
> +
> + ret = op_remap_cb(ops, priv, &p, NULL, &u);
> + if (ret)
> + return ret;
> + break;
> + }
> +
> + if (end < req_end) {
> + if (skip_gem_obj_va && !!obj)
> + continue;
> +
> + ret = op_remap_cb(ops, priv, &p, NULL, &u);
> + if (ret)
> + return ret;
> +
> + ret = op_map_cb(ops, priv, req_addr,
> + min(end - req_addr, req_end - end),
> + NULL, req_offset);
> + if (ret)
> + return ret;
> + continue;
> + }
> +
> + if (end > req_end) {
> + if (skip_gem_obj_va && !!obj)
> + return 0;
> +
> + struct drm_gpuva_op_map n = {
> + .va.addr = req_end,
> + .va.range = end - req_end,
> + .gem.obj = obj,
> + .gem.offset = offset + ls_range +
> + req_range,
> + };
> +
> + ret = op_remap_cb(ops, priv, &p, &n, &u);
> + if (ret)
> + return ret;
> + break;
> + }
> + } else if (addr > req_addr) {
> + if (end == req_end)
> + return 0;
> +
> + if (end < req_end)
> + continue;
> +
> + if (end > req_end) {
> + if (skip_gem_obj_va && !!obj)
> + return 0;
> +
> + struct drm_gpuva_op_map n = {
> + .va.addr = req_end,
> + .va.range = end - req_end,
> + .gem.obj = obj,
> + .gem.offset = offset + req_end - addr,
> + };
> + struct drm_gpuva_op_unmap u = {
> + .va = va,
> + .keep = false,
> + };
> +
> + ret = op_remap_cb(ops, priv, NULL, &n, &u);
> + if (ret)
> + return ret;
> + return op_map_cb(ops, priv, addr,
> + (req_end - addr), NULL, req_offset);
> + }
> + }
> + }
> +
> + return op_map_cb(ops, priv,
> + req_addr, req_range,
> + NULL, req_offset);
> +}
> +
> static int
> __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> const struct drm_gpuvm_ops *ops, void *priv,
> @@ -2548,6 +2685,42 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> }
> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
>
> +struct drm_gpuva_ops *
> +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
> + u64 req_addr, u64 req_range,
> + bool skip_gem_obj_va, u64 req_offset)
> +{
> + struct drm_gpuva_ops *ops;
> + struct {
> + struct drm_gpuvm *vm;
> + struct drm_gpuva_ops *ops;
> + } args;
> + int ret;
> +
> + ops = kzalloc(sizeof(*ops), GFP_KERNEL);
> + if (unlikely(!ops))
> + return ERR_PTR(-ENOMEM);
> +
> + INIT_LIST_HEAD(&ops->list);
> +
> + args.vm = gpuvm;
> + args.ops = ops;
> +
> + ret = __drm_gpuvm_skip_split_map(gpuvm, &gpuvm_list_ops, &args,
> + req_addr, req_range,
> + skip_gem_obj_va, req_offset);
> +
> + if (ret || list_empty(&ops->list))
> + goto err_free_ops;
> +
> + return ops;
> +
> +err_free_ops:
> + drm_gpuva_ops_free(gpuvm, ops);
> + return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
> +
> /**
> * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
> * unmap
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 2a9629377633..e521ebabab9e 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -1062,6 +1062,12 @@ struct drm_gpuva_ops *
> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> u64 addr, u64 range,
> struct drm_gem_object *obj, u64 offset);
> +
> +struct drm_gpuva_ops *
> +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
> + u64 addr, u64 range,
> + bool skip_gem_obj_va, u64 offset);
> +
> struct drm_gpuva_ops *
> drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
> u64 addr, u64 range);
More information about the Intel-xe
mailing list