[Intel-xe] [PATCH v2 07/11] drm/xe/pat: Prefer the arch/IP names
Matt Roper
matthew.d.roper at intel.com
Tue Sep 26 23:24:42 UTC 2023
On Tue, Sep 26, 2023 at 03:36:27PM -0700, Lucas De Marchi wrote:
> Both DG2 and PVC are derived from XeHP, but DG2 should not really
> re-use something introduced by PVC, so it's odd to have DG2 re-using the
> PVC programming for PAT. Let's prefer using the architecture and/or IP
> names.
>
> Signed-off-by: Lucas De Marchi <lucas.demarchi at intel.com>
> ---
> drivers/gpu/drm/xe/xe_pat.c | 114 ++++++++++++++++++------------------
> 1 file changed, 57 insertions(+), 57 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
> index 8418de44c646..234d0ccd97c9 100644
> --- a/drivers/gpu/drm/xe/xe_pat.c
> +++ b/drivers/gpu/drm/xe/xe_pat.c
> @@ -14,57 +14,57 @@
> 0x4800, 0x4804, \
> 0x4848, 0x484c)
>
> -#define MTL_L4_POLICY_MASK REG_GENMASK(3, 2)
> -#define MTL_PAT_3_UC REG_FIELD_PREP(MTL_L4_POLICY_MASK, 3)
> -#define MTL_PAT_1_WT REG_FIELD_PREP(MTL_L4_POLICY_MASK, 1)
> -#define MTL_PAT_0_WB REG_FIELD_PREP(MTL_L4_POLICY_MASK, 0)
> -#define MTL_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
> -#define MTL_3_COH_2W REG_FIELD_PREP(MTL_INDEX_COH_MODE_MASK, 3)
> -#define MTL_2_COH_1W REG_FIELD_PREP(MTL_INDEX_COH_MODE_MASK, 2)
> -#define MTL_0_COH_NON REG_FIELD_PREP(MTL_INDEX_COH_MODE_MASK, 0)
> -
> -#define PVC_CLOS_LEVEL_MASK REG_GENMASK(3, 2)
> -#define PVC_PAT_CLOS(x) REG_FIELD_PREP(PVC_CLOS_LEVEL_MASK, x)
> -
> -#define TGL_MEM_TYPE_MASK REG_GENMASK(1, 0)
> -#define TGL_PAT_WB REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 3)
> -#define TGL_PAT_WT REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 2)
> -#define TGL_PAT_WC REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 1)
> -#define TGL_PAT_UC REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 0)
> +#define XELPG_L4_POLICY_MASK REG_GENMASK(3, 2)
> +#define XELPG_PAT_3_UC REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
> +#define XELPG_PAT_1_WT REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
> +#define XELPG_PAT_0_WB REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
> +#define XELPG_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
> +#define XELPG_3_COH_2W REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
> +#define XELPG_2_COH_1W REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
> +#define XELPG_0_COH_NON REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
> +
> +#define XEHPC_CLOS_LEVEL_MASK REG_GENMASK(3, 2)
> +#define XEHPC_PAT_CLOS(x) REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
> +
> +#define XELP_MEM_TYPE_MASK REG_GENMASK(1, 0)
> +#define XELP_PAT_WB REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
> +#define XELP_PAT_WT REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
> +#define XELP_PAT_WC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
> +#define XELP_PAT_UC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
>
> struct xe_pat_ops {
> void (*program_graphics)(struct xe_gt *gt, const u32 table[], int n_entries);
> void (*program_media)(struct xe_gt *gt, const u32 table[], int n_entries);
> };
>
> -static const u32 tgl_pat_table[] = {
> - [0] = TGL_PAT_WB,
> - [1] = TGL_PAT_WC,
> - [2] = TGL_PAT_WT,
> - [3] = TGL_PAT_UC,
> - [4] = TGL_PAT_WB,
> - [5] = TGL_PAT_WB,
> - [6] = TGL_PAT_WB,
> - [7] = TGL_PAT_WB,
> +static const u32 xelp_pat_table[] = {
> + [0] = XELP_PAT_WB,
> + [1] = XELP_PAT_WC,
> + [2] = XELP_PAT_WT,
> + [3] = XELP_PAT_UC,
> + [4] = XELP_PAT_WB,
> + [5] = XELP_PAT_WB,
> + [6] = XELP_PAT_WB,
> + [7] = XELP_PAT_WB,
> };
>
> -static const u32 pvc_pat_table[] = {
> - [0] = TGL_PAT_UC,
> - [1] = TGL_PAT_WC,
> - [2] = TGL_PAT_WT,
> - [3] = TGL_PAT_WB,
> - [4] = PVC_PAT_CLOS(1) | TGL_PAT_WT,
> - [5] = PVC_PAT_CLOS(1) | TGL_PAT_WB,
> - [6] = PVC_PAT_CLOS(2) | TGL_PAT_WT,
> - [7] = PVC_PAT_CLOS(2) | TGL_PAT_WB,
> +static const u32 xehpc_pat_table[] = {
> + [0] = XELP_PAT_UC,
> + [1] = XELP_PAT_WC,
> + [2] = XELP_PAT_WT,
> + [3] = XELP_PAT_WB,
> + [4] = XEHPC_PAT_CLOS(1) | XELP_PAT_WT,
> + [5] = XEHPC_PAT_CLOS(1) | XELP_PAT_WB,
> + [6] = XEHPC_PAT_CLOS(2) | XELP_PAT_WT,
> + [7] = XEHPC_PAT_CLOS(2) | XELP_PAT_WB,
> };
>
> -static const u32 mtl_pat_table[] = {
> - [0] = MTL_PAT_0_WB,
> - [1] = MTL_PAT_1_WT,
> - [2] = MTL_PAT_3_UC,
> - [3] = MTL_PAT_0_WB | MTL_2_COH_1W,
> - [4] = MTL_PAT_0_WB | MTL_3_COH_2W,
> +static const u32 xelpg_pat_table[] = {
> + [0] = XELPG_PAT_0_WB,
> + [1] = XELPG_PAT_1_WT,
> + [2] = XELPG_PAT_3_UC,
> + [3] = XELPG_PAT_0_WB | XELPG_2_COH_1W,
> + [4] = XELPG_PAT_0_WB | XELPG_3_COH_2W,
> };
>
> static void program_pat(struct xe_gt *gt, const u32 table[], int n_entries)
> @@ -85,11 +85,11 @@ static void program_pat_mcr(struct xe_gt *gt, const u32 table[], int n_entries)
> }
> }
>
> -static const struct xe_pat_ops tgl_pat_ops = {
> +static const struct xe_pat_ops xelp_pat_ops = {
> .program_graphics = program_pat,
> };
>
> -static const struct xe_pat_ops pvc_pat_ops = {
> +static const struct xe_pat_ops xehp_pat_ops = {
> .program_graphics = program_pat_mcr,
> };
>
> @@ -97,7 +97,7 @@ static const struct xe_pat_ops pvc_pat_ops = {
> * SAMedia register offsets are adjusted by the write methods and they target
> * registers that are not MCR, while for normal GT they are MCR
> */
> -static const struct xe_pat_ops mtl_pat_ops = {
> +static const struct xe_pat_ops xelpg_pat_ops = {
> .program_graphics = program_pat,
> .program_media = program_pat_mcr,
> };
> @@ -105,24 +105,24 @@ static const struct xe_pat_ops mtl_pat_ops = {
> void xe_pat_init_early(struct xe_device *xe)
> {
> if (xe->info.platform == XE_METEORLAKE) {
> - xe->pat.ops = &mtl_pat_ops;
> - xe->pat.table = mtl_pat_table;
> - xe->pat.n_entries = ARRAY_SIZE(mtl_pat_table);
> + xe->pat.ops = &xelpg_pat_ops;
> + xe->pat.table = xelpg_pat_table;
> + xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
> } else if (xe->info.platform == XE_PVC) {
> - xe->pat.ops = &pvc_pat_ops;
> - xe->pat.table = pvc_pat_table;
> - xe->pat.n_entries = ARRAY_SIZE(pvc_pat_table);
> + xe->pat.ops = &xehp_pat_ops;
> + xe->pat.table = xehpc_pat_table;
> + xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
> } else if (xe->info.platform == XE_DG2) {
> /*
> - * Programming is like PVC, but same table as previous platforms
> + * Programming like XeHP, but same table as previous platforms
This comment is even more confusing now that we're actually in the Xe_HP
block. :-) But assuming this comment is already going to get changed
by updates to the previous patches
Reviewed-by: Matt Roper <matthew.d.roper at intel.com>
> */
> - xe->pat.ops = &pvc_pat_ops;
> - xe->pat.table = tgl_pat_table;
> - xe->pat.n_entries = ARRAY_SIZE(tgl_pat_table);
> + xe->pat.ops = &xehp_pat_ops;
> + xe->pat.table = xelp_pat_table;
> + xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
> } else if (GRAPHICS_VERx100(xe) <= 1210) {
> - xe->pat.ops = &tgl_pat_ops;
> - xe->pat.table = tgl_pat_table;
> - xe->pat.n_entries = ARRAY_SIZE(tgl_pat_table);
> + xe->pat.ops = &xelp_pat_ops;
> + xe->pat.table = xelp_pat_table;
> + xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
> } else {
> /*
> * Going forward we expect to need new PAT settings for most
> --
> 2.40.1
>
--
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation
More information about the Intel-xe
mailing list