[PATCH 05/12] drm/xe: Encapsulate drm_mm_node inside xe_ggtt_node
Matthew Brost
matthew.brost at intel.com
Tue Jul 16 17:16:57 UTC 2024
On Thu, Jul 11, 2024 at 01:11:48PM -0400, Rodrigo Vivi wrote:
> The xe_ggtt component uses drm_mm to manage the GGTT.
> The drm_mm_node is just a node inside drm_mm, but in Xe we use that
> only in the GGTT context. So, this patch encapsulates the drm_mm_node
> into a xe_ggtt's new struct.
>
> This is the first step towards limiting all the drm_mm access
> through xe_ggtt. The ultimate goal is to have a better control of
> the node insertion and removal, so the removal can be delagated
> to a delayed workqueue.
>
> Cc: Matthew Brost <matthew.brost at intel.com>
Agree with Michal's nits, but the changes in this patch LGTM.
With Michal's comments addressed:
Reviewed-by: Matthew Brost <matthew.brost at intel.com.
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> ---
> .../gpu/drm/xe/compat-i915-headers/i915_vma.h | 7 +-
> drivers/gpu/drm/xe/display/xe_fb_pin.c | 10 +--
> drivers/gpu/drm/xe/xe_bo.c | 2 +-
> drivers/gpu/drm/xe/xe_bo.h | 6 +-
> drivers/gpu/drm/xe/xe_bo_types.h | 5 +-
> drivers/gpu/drm/xe/xe_device_types.h | 2 +-
> drivers/gpu/drm/xe/xe_ggtt.c | 72 +++++++++----------
> drivers/gpu/drm/xe/xe_ggtt.h | 12 ++--
> drivers/gpu/drm/xe/xe_ggtt_types.h | 8 +++
> drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 39 +++++-----
> .../gpu/drm/xe/xe_gt_sriov_pf_config_types.h | 4 +-
> 11 files changed, 90 insertions(+), 77 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
> index a20d2638ea7a..97193e660f6c 100644
> --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
> +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
> @@ -7,7 +7,8 @@
> #define I915_VMA_H
>
> #include <uapi/drm/i915_drm.h>
> -#include <drm/drm_mm.h>
> +
> +#include <xe_ggtt_types.h>
>
> /* We don't want these from i915_drm.h in case of Xe */
> #undef I915_TILING_X
> @@ -19,7 +20,7 @@ struct xe_bo;
>
> struct i915_vma {
> struct xe_bo *bo, *dpt;
> - struct drm_mm_node node;
> + struct xe_ggtt_node node;
> };
>
> #define i915_ggtt_clear_scanout(bo) do { } while (0)
> @@ -28,7 +29,7 @@ struct i915_vma {
>
> static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
> {
> - return vma->node.start;
> + return vma->node.base.start;
> }
>
> #endif
> diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
> index 42d431ff14e7..a93923fb8721 100644
> --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
> +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
> @@ -204,7 +204,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
> align = max_t(u32, align, SZ_64K);
>
> - if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) {
> + if (bo->ggtt_node.base.size && view->type == I915_GTT_VIEW_NORMAL) {
> vma->node = bo->ggtt_node;
> } else if (view->type == I915_GTT_VIEW_NORMAL) {
> u32 x, size = bo->ttm.base.size;
> @@ -218,7 +218,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
> xe->pat.idx[XE_CACHE_NONE]);
>
> - ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node.start + x, pte);
> + ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node.base.start + x, pte);
> }
> } else {
> u32 i, ggtt_ofs;
> @@ -232,7 +232,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
> if (ret)
> goto out_unlock;
>
> - ggtt_ofs = vma->node.start;
> + ggtt_ofs = vma->node.base.start;
>
> for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
> write_ggtt_rotated(bo, ggtt, &ggtt_ofs,
> @@ -325,8 +325,8 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma)
>
> if (vma->dpt)
> xe_bo_unpin_map_no_vm(vma->dpt);
> - else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
> - vma->bo->ggtt_node.start != vma->node.start)
> + else if (!drm_mm_node_allocated(&vma->bo->ggtt_node.base) ||
> + vma->bo->ggtt_node.base.start != vma->node.base.start)
> xe_ggtt_remove_node(ggtt, &vma->node, false);
>
> ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index 31192d983d9e..3501a5871069 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -1090,7 +1090,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
>
> xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
>
> - if (bo->ggtt_node.size)
> + if (bo->ggtt_node.base.size)
> xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
>
> #ifdef CONFIG_PROC_FS
> diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
> index 6de894c728f5..7c95133cc32b 100644
> --- a/drivers/gpu/drm/xe/xe_bo.h
> +++ b/drivers/gpu/drm/xe/xe_bo.h
> @@ -194,9 +194,9 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
> static inline u32
> xe_bo_ggtt_addr(struct xe_bo *bo)
> {
> - XE_WARN_ON(bo->ggtt_node.size > bo->size);
> - XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
> - return bo->ggtt_node.start;
> + XE_WARN_ON(bo->ggtt_node.base.size > bo->size);
> + XE_WARN_ON(bo->ggtt_node.base.start + bo->ggtt_node.base.size > (1ull << 32));
> + return bo->ggtt_node.base.start;
> }
>
> int xe_bo_vmap(struct xe_bo *bo);
> diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
> index ebc8abf7930a..3ba96a93623c 100644
> --- a/drivers/gpu/drm/xe/xe_bo_types.h
> +++ b/drivers/gpu/drm/xe/xe_bo_types.h
> @@ -8,12 +8,13 @@
>
> #include <linux/iosys-map.h>
>
> -#include <drm/drm_mm.h>
> #include <drm/ttm/ttm_bo.h>
> #include <drm/ttm/ttm_device.h>
> #include <drm/ttm/ttm_execbuf_util.h>
> #include <drm/ttm/ttm_placement.h>
>
> +#include <xe_ggtt_types.h>
> +
> struct xe_device;
> struct xe_vm;
>
> @@ -39,7 +40,7 @@ struct xe_bo {
> /** @placement: current placement for this BO */
> struct ttm_placement placement;
> /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
> - struct drm_mm_node ggtt_node;
> + struct xe_ggtt_node ggtt_node;
> /** @vmap: iosys map of this buffer */
> struct iosys_map vmap;
> /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> index f0cf9020e463..30f9c58932bb 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -203,7 +203,7 @@ struct xe_tile {
> struct xe_memirq memirq;
>
> /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
> - struct drm_mm_node ggtt_balloon[2];
> + struct xe_ggtt_node ggtt_balloon[2];
> } vf;
> } sriov;
>
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
> index 709ef48f2fdb..ea55c7eabee4 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.c
> +++ b/drivers/gpu/drm/xe/xe_ggtt.c
> @@ -351,61 +351,61 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
> * @ggtt: the &xe_ggtt where we want to make reservation
> * @start: the starting GGTT address of the reserved region
> * @end: then end GGTT address of the reserved region
> - * @node: the &drm_mm_node to hold reserved GGTT node
> + * @node: the &xe_ggtt_node to hold reserved GGTT node
> *
> * Use xe_ggtt_deballoon() to release a reserved GGTT node.
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node)
> +int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct xe_ggtt_node *node)
> {
> int err;
>
> xe_tile_assert(ggtt->tile, start < end);
> xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
> xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
> - xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(node));
> + xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
>
> - node->color = 0;
> - node->start = start;
> - node->size = end - start;
> + node->base.color = 0;
> + node->base.start = start;
> + node->base.size = end - start;
>
> mutex_lock(&ggtt->lock);
> - err = drm_mm_reserve_node(&ggtt->mm, node);
> + err = drm_mm_reserve_node(&ggtt->mm, &node->base);
> mutex_unlock(&ggtt->lock);
>
> if (xe_gt_WARN(ggtt->tile->primary_gt, err,
> "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
> - node->start, node->start + node->size, ERR_PTR(err)))
> + node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
> return err;
>
> - xe_ggtt_dump_node(ggtt, node, "balloon");
> + xe_ggtt_dump_node(ggtt, &node->base, "balloon");
> return 0;
> }
>
> /**
> * xe_ggtt_deballoon - release a reserved GGTT region
> * @ggtt: the &xe_ggtt where reserved node belongs
> - * @node: the &drm_mm_node with reserved GGTT region
> + * @node: the &xe_ggtt_node with reserved GGTT region
> *
> * See xe_ggtt_balloon() for details.
> */
> -void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node)
> +void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct xe_ggtt_node *node)
> {
> - if (!drm_mm_node_allocated(node))
> + if (!drm_mm_node_allocated(&node->base))
> return;
>
> - xe_ggtt_dump_node(ggtt, node, "deballoon");
> + xe_ggtt_dump_node(ggtt, &node->base, "deballoon");
>
> mutex_lock(&ggtt->lock);
> - drm_mm_remove_node(node);
> + drm_mm_remove_node(&node->base);
> mutex_unlock(&ggtt->lock);
> }
>
> /**
> - * xe_ggtt_insert_special_node_locked - Locked version to insert a &drm_mm_node into the GGTT
> + * xe_ggtt_insert_special_node_locked - Locked version to insert a &xe_ggtt_node into the GGTT
> * @ggtt: the &xe_ggtt where node will be inserted
> - * @node: the &drm_mm_node to be inserted
> + * @node: the &xe_ggtt_node to be inserted
> * @size: size of the node
> * @align: alignment constrain of the node
> * @mm_flags: flags to control the node behavior
> @@ -414,23 +414,23 @@ void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node)
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
> +int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
> u32 size, u32 align, u32 mm_flags)
> {
> - return drm_mm_insert_node_generic(&ggtt->mm, node, size, align, 0,
> + return drm_mm_insert_node_generic(&ggtt->mm, &node->base, size, align, 0,
> mm_flags);
> }
>
> /**
> - * xe_ggtt_insert_special_node - Insert a &drm_mm_node into the GGTT
> + * xe_ggtt_insert_special_node - Insert a &xe_ggtt_node into the GGTT
> * @ggtt: the &xe_ggtt where node will be inserted
> - * @node: the &drm_mm_node to be inserted
> + * @node: the &xe_ggtt_node to be inserted
> * @size: size of the node
> * @align: alignment constrain of the node
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
> +int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
> u32 size, u32 align)
> {
> int ret;
> @@ -452,7 +452,7 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
> {
> u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
> u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
> - u64 start = bo->ggtt_node.start;
> + u64 start = bo->ggtt_node.base.start;
> u64 offset, pte;
>
> for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
> @@ -470,9 +470,9 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
> if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
> alignment = SZ_64K;
>
> - if (XE_WARN_ON(bo->ggtt_node.size)) {
> + if (XE_WARN_ON(bo->ggtt_node.base.size)) {
> /* Someone's already inserted this BO in the GGTT */
> - xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
> + xe_tile_assert(ggtt->tile, bo->ggtt_node.base.size == bo->size);
> return 0;
> }
>
> @@ -482,7 +482,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
>
> xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
> mutex_lock(&ggtt->lock);
> - err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
> + err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node.base, bo->size,
> alignment, 0, start, end, 0);
> if (!err)
> xe_ggtt_map_bo(ggtt, bo);
> @@ -523,12 +523,12 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
> }
>
> /**
> - * xe_ggtt_remove_node - Remove a &drm_mm_node from the GGTT
> + * xe_ggtt_remove_node - Remove a &xe_ggtt_node from the GGTT
> * @ggtt: the &xe_ggtt where node will be removed
> - * @node: the &drm_mm_node to be removed
> + * @node: the &xe_ggtt_node to be removed
> * @invalidate: if node needs invalidation upon removal
> */
> -void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
> +void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
> bool invalidate)
> {
> struct xe_device *xe = tile_to_xe(ggtt->tile);
> @@ -541,9 +541,9 @@ void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
>
> mutex_lock(&ggtt->lock);
> if (bound)
> - xe_ggtt_clear(ggtt, node->start, node->size);
> - drm_mm_remove_node(node);
> - node->size = 0;
> + xe_ggtt_clear(ggtt, node->base.start, node->base.size);
> + drm_mm_remove_node(&node->base);
> + node->base.size = 0;
> mutex_unlock(&ggtt->lock);
>
> if (!bound)
> @@ -563,11 +563,11 @@ void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
> */
> void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
> {
> - if (XE_WARN_ON(!bo->ggtt_node.size))
> + if (XE_WARN_ON(!bo->ggtt_node.base.size))
> return;
>
> /* This BO is not currently in the GGTT */
> - xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
> + xe_tile_assert(ggtt->tile, bo->ggtt_node.base.size == bo->size);
>
> xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
> bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
> @@ -602,17 +602,17 @@ static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node
> /**
> * xe_ggtt_assign - assign a GGTT region to the VF
> * @ggtt: the &xe_ggtt where the node belongs
> - * @node: the &drm_mm_node to update
> + * @node: the &xe_ggtt_node to update
> * @vfid: the VF identifier
> *
> * This function is used by the PF driver to assign a GGTT region to the VF.
> * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
> * platforms VFs can't modify that either.
> */
> -void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
> +void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct xe_ggtt_node *node, u16 vfid)
> {
> mutex_lock(&ggtt->lock);
> - xe_ggtt_assign_locked(ggtt, node, vfid);
> + xe_ggtt_assign_locked(ggtt, &node->base, vfid);
> mutex_unlock(&ggtt->lock);
> }
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
> index 2546bab97507..30a521f7b075 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt.h
> @@ -13,15 +13,15 @@ struct drm_printer;
> int xe_ggtt_init_early(struct xe_ggtt *ggtt);
> int xe_ggtt_init(struct xe_ggtt *ggtt);
>
> -int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct drm_mm_node *node);
> -void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node);
> +int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct xe_ggtt_node *node);
> +void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct xe_ggtt_node *node);
>
> -int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
> +int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
> u32 size, u32 align);
> int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
> - struct drm_mm_node *node,
> + struct xe_ggtt_node *node,
> u32 size, u32 align, u32 mm_flags);
> -void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
> +void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
> bool invalidate);
> void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
> int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
> @@ -32,7 +32,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
> int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
>
> #ifdef CONFIG_PCI_IOV
> -void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid);
> +void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct xe_ggtt_node *node, u16 vfid);
> #endif
>
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
> index 4e2114201b35..f3292e6c3873 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt_types.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
> @@ -47,6 +47,14 @@ struct xe_ggtt {
> unsigned int access_count;
> };
>
> +/**
> + * struct xe_ggtt_node - A node in GGTT
> + */
> +struct xe_ggtt_node {
> + /** @base: A drm_mm_node */
> + struct drm_mm_node base;
> +};
> +
> /**
> * struct xe_ggtt_pt_ops - GGTT Page table operations
> * Which can vary from platform to platform.
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> index db6c213da847..3600468da013 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> @@ -6,6 +6,9 @@
> #include <linux/string_choices.h>
> #include <linux/wordpart.h>
>
> +/* FIXME: remove this after encapsulating all drm_mm_node access into xe_ggtt */
> +#include <drm/drm_mm.h>
> +
> #include "abi/guc_actions_sriov_abi.h"
> #include "abi/guc_klvs_abi.h"
>
> @@ -232,14 +235,14 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
> {
> u32 n = 0;
>
> - if (drm_mm_node_allocated(&config->ggtt_region)) {
> + if (drm_mm_node_allocated(&config->ggtt_region.base)) {
> cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
> - cfg[n++] = lower_32_bits(config->ggtt_region.start);
> - cfg[n++] = upper_32_bits(config->ggtt_region.start);
> + cfg[n++] = lower_32_bits(config->ggtt_region.base.start);
> + cfg[n++] = upper_32_bits(config->ggtt_region.base.start);
>
> cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
> - cfg[n++] = lower_32_bits(config->ggtt_region.size);
> - cfg[n++] = upper_32_bits(config->ggtt_region.size);
> + cfg[n++] = lower_32_bits(config->ggtt_region.base.size);
> + cfg[n++] = upper_32_bits(config->ggtt_region.base.size);
> }
>
> return n;
> @@ -369,11 +372,11 @@ static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u6
> return err ?: err2;
> }
>
> -static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
> +static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
> {
> struct xe_ggtt *ggtt = tile->mem.ggtt;
>
> - if (drm_mm_node_allocated(node)) {
> + if (drm_mm_node_allocated(&node->base)) {
> /*
> * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
> * is redundant, as PTE will be implicitly re-assigned to PF by
> @@ -391,7 +394,7 @@ static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_confi
> static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
> {
> struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
> - struct drm_mm_node *node = &config->ggtt_region;
> + struct xe_ggtt_node *node = &config->ggtt_region;
> struct xe_tile *tile = gt_to_tile(gt);
> struct xe_ggtt *ggtt = tile->mem.ggtt;
> u64 alignment = pf_get_ggtt_alignment(gt);
> @@ -403,14 +406,14 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
>
> size = round_up(size, alignment);
>
> - if (drm_mm_node_allocated(node)) {
> + if (drm_mm_node_allocated(&node->base)) {
> err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
> if (unlikely(err))
> return err;
>
> pf_release_ggtt(tile, node);
> }
> - xe_gt_assert(gt, !drm_mm_node_allocated(node));
> + xe_gt_assert(gt, !drm_mm_node_allocated(&node->base));
>
> if (!size)
> return 0;
> @@ -421,9 +424,9 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
>
> xe_ggtt_assign(ggtt, node, vfid);
> xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
> - vfid, node->start, node->start + node->size - 1);
> + vfid, node->base.start, node->base.start + node->base.size - 1);
>
> - err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
> + err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
> if (unlikely(err))
> return err;
>
> @@ -433,10 +436,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
> static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
> {
> struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
> - struct drm_mm_node *node = &config->ggtt_region;
> + struct xe_ggtt_node *node = &config->ggtt_region;
>
> xe_gt_assert(gt, !xe_gt_is_media_type(gt));
> - return drm_mm_node_allocated(node) ? node->size : 0;
> + return drm_mm_node_allocated(&node->base) ? node->base.size : 0;
> }
>
> /**
> @@ -2018,13 +2021,13 @@ int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
>
> for (n = 1; n <= total_vfs; n++) {
> config = >->sriov.pf.vfs[n].config;
> - if (!drm_mm_node_allocated(&config->ggtt_region))
> + if (!drm_mm_node_allocated(&config->ggtt_region.base))
> continue;
>
> - string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
> + string_get_size(config->ggtt_region.base.size, 1, STRING_UNITS_2, buf, sizeof(buf));
> drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
> - n, config->ggtt_region.start,
> - config->ggtt_region.start + config->ggtt_region.size - 1, buf);
> + n, config->ggtt_region.base.start,
> + config->ggtt_region.base.start + config->ggtt_region.base.size - 1, buf);
> }
>
> return 0;
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
> index 7bc66656fcc7..6d0d9299bafa 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
> @@ -6,7 +6,7 @@
> #ifndef _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
> #define _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
>
> -#include <drm/drm_mm.h>
> +#include <xe_ggtt_types.h>
>
> #include "xe_guc_klv_thresholds_set_types.h"
>
> @@ -19,7 +19,7 @@ struct xe_bo;
> */
> struct xe_gt_sriov_config {
> /** @ggtt_region: GGTT region assigned to the VF. */
> - struct drm_mm_node ggtt_region;
> + struct xe_ggtt_node ggtt_region;
> /** @lmem_obj: LMEM allocation for use by the VF. */
> struct xe_bo *lmem_obj;
> /** @num_ctxs: number of GuC contexts IDs. */
> --
> 2.45.2
>
More information about the Intel-xe
mailing list