[Intel-xe] [PATCH 8/9] drm/xe: Use vfunc for ggtt pte encoding

Matt Roper matthew.d.roper at intel.com
Mon Sep 25 23:37:57 UTC 2023


On Mon, Sep 25, 2023 at 03:10:48PM -0700, Lucas De Marchi wrote:
> Use 2 different functions for encoding the ggtt's pte, assigning them
> during initialization. Main difference is that before Xe-LPG, the pte
> didn't have the cache bits.
> 
> Signed-off-by: Lucas De Marchi <lucas.demarchi at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_ggtt.c       | 53 +++++++++++++++++++++++-------
>  drivers/gpu/drm/xe/xe_ggtt.h       |  1 -
>  drivers/gpu/drm/xe/xe_ggtt_types.h |  9 +++++
>  3 files changed, 50 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
> index 54baaffc7235..09c6bd46f097 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.c
> +++ b/drivers/gpu/drm/xe/xe_ggtt.c
> @@ -20,16 +20,31 @@
>  #include "xe_mmio.h"
>  #include "xe_wopcm.h"
>  
> -/* FIXME: Common file, preferably auto-gen */
> -#define MTL_GGTT_PTE_PAT0	BIT_ULL(52)
> -#define MTL_GGTT_PTE_PAT1	BIT_ULL(53)
> +#define XELPG_GGTT_PTE_PAT0	BIT_ULL(52)
> +#define XELPG_GGTT_PTE_PAT1	BIT_ULL(53)
>  
>  /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
>  #define GUC_GGTT_TOP	0xFEE00000
>  
> -u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset)
> +static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
> +				   enum xe_cache_level cache)
> +{
> +	u64 pte;
> +
> +	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
> +	pte |= XE_PAGE_PRESENT;
> +
> +	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
> +		pte |= XE_GGTT_PTE_DM;
> +
> +	return pte;
> +}
> +
> +static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
> +				    enum xe_cache_level cache)
>  {

Since the two functions are basically the same aside from PAT, should we
just implement this by calling the xelp version and then add the
PAT-specific stuff at the end?

        pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, cache);

        xe_assert(xe, pat_index <= 3);
        if (pat_index & BIT(0))
                pte |= XELPG_GGTT_PTE_PAT0;
        if (pat_index & BIT(1))
                pte |= XELPG_GGTT_PTE_PAT1;

        return pte;

Either way,
Reviewed-by: Matt Roper <matthew.d.roper at intel.com>


Matt

>  	struct xe_device *xe = xe_bo_device(bo);
> +	u32 pat_index = xe->pat.idx[cache];
>  	u64 pte;
>  
>  	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
> @@ -38,11 +53,11 @@ u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset)
>  	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
>  		pte |= XE_GGTT_PTE_DM;
>  
> -	/* FIXME: vfunc + pass in caching rules */
> -	if (xe->info.platform == XE_METEORLAKE) {
> -		pte |= MTL_GGTT_PTE_PAT0;
> -		pte |= MTL_GGTT_PTE_PAT1;
> -	}
> +	if (pat_index & BIT(0))
> +		pte |= XELPG_GGTT_PTE_PAT0;
> +
> +	if (pat_index & BIT(1))
> +		pte |= XELPG_GGTT_PTE_PAT1;
>  
>  	return pte;
>  }
> @@ -72,7 +87,8 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
>  	xe_tile_assert(ggtt->tile, start < end);
>  
>  	if (ggtt->scratch)
> -		scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0);
> +		scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
> +							  XE_CACHE_WB);
>  	else
>  		scratch_pte = 0;
>  
> @@ -102,6 +118,14 @@ static void primelockdep(struct xe_ggtt *ggtt)
>  	fs_reclaim_release(GFP_KERNEL);
>  }
>  
> +static const struct xe_ggtt_pt_ops xelp_pt_ops = {
> +	.pte_encode_bo = xelp_ggtt_pte_encode_bo,
> +};
> +
> +static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
> +	.pte_encode_bo = xelpg_ggtt_pte_encode_bo,
> +};
> +
>  int xe_ggtt_init_noalloc(struct xe_ggtt *ggtt)
>  {
>  	struct xe_device *xe = tile_to_xe(ggtt->tile);
> @@ -146,6 +170,11 @@ int xe_ggtt_init_noalloc(struct xe_ggtt *ggtt)
>  	if (ggtt->size > GUC_GGTT_TOP)
>  		ggtt->size = GUC_GGTT_TOP;
>  
> +	if (GRAPHICS_VERx100(xe) >= 1270)
> +		ggtt->pt_ops = &xelpg_pt_ops;
> +	else
> +		ggtt->pt_ops = &xelp_pt_ops;
> +
>  	drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
>  		    ggtt->size - xe_wopcm_size(xe));
>  	mutex_init(&ggtt->lock);
> @@ -260,7 +289,7 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
>  {
>  	u64 addr, scratch_pte;
>  
> -	scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0);
> +	scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, XE_CACHE_WB);
>  
>  	printk("%sGlobal GTT:", prefix);
>  	for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
> @@ -301,7 +330,7 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
>  	u64 offset, pte;
>  
>  	for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
> -		pte = xe_ggtt_pte_encode(bo, offset);
> +		pte = ggtt->pt_ops->pte_encode_bo(bo, offset, XE_CACHE_WB);
>  		xe_ggtt_set_pte(ggtt, start + offset, pte);
>  	}
>  
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
> index 205a6d058bbd..3faa3c6d0375 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt.h
> @@ -10,7 +10,6 @@
>  
>  struct drm_printer;
>  
> -u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset);
>  void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte);
>  void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
>  int xe_ggtt_init_noalloc(struct xe_ggtt *ggtt);
> diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
> index d34b3e733945..486016ea5b67 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt_types.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
> @@ -8,9 +8,16 @@
>  
>  #include <drm/drm_mm.h>
>  
> +#include "xe_pt_types.h"
> +
>  struct xe_bo;
>  struct xe_gt;
>  
> +struct xe_ggtt_pt_ops {
> +	u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
> +			     enum xe_cache_level cache);
> +};
> +
>  struct xe_ggtt {
>  	struct xe_tile *tile;
>  
> @@ -25,6 +32,8 @@ struct xe_ggtt {
>  
>  	u64 __iomem *gsm;
>  
> +	const struct xe_ggtt_pt_ops *pt_ops;
> +
>  	struct drm_mm mm;
>  };
>  
> -- 
> 2.40.1
> 

-- 
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation


More information about the Intel-xe mailing list