[Intel-xe] [PATCH 2/7] drm/xe/guc: Handle RCU_MODE as masked from definition
Rodrigo Vivi
rodrigo.vivi at kernel.org
Fri May 5 16:55:28 UTC 2023
On Fri, Apr 28, 2023 at 11:23:27PM -0700, Lucas De Marchi wrote:
> guc_mmio_regset_write() had a flags for the registers to be added to the
> GuC's regset list. The only register actually using that was RCU_MODE,
> but it was setting the flags to a bogus value. From
> struct xe_guc_fwif.h,
>
> #define GUC_REGSET_MASKED BIT(0)
> #define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
> #define GUC_REGSET_RESTORE_ONLY BIT(3)
>
> Cross checking with i915, the only flag to set in RCU_MODE is
> GUC_REGSET_MASKED. That can be done automatically from the register, as
> long as the definition is correct.
>
> Add the XE_REG_OPTION_MASKED annotation to RCU_MODE and kill the "flags"
> field in guc_mmio_regset_write(): guc_mmio_regset_write_one() can decide
> that based on the register being passed.
>
> Signed-off-by: Lucas De Marchi <lucas.demarchi at intel.com>
I'm still trying to get familiarized with XE_REG_OPTION_MASKED
but this patch looks right to me:
Reviewed-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> ---
> drivers/gpu/drm/xe/regs/xe_gt_regs.h | 2 +-
> drivers/gpu/drm/xe/xe_guc_ads.c | 31 +++++++++++-----------------
> 2 files changed, 13 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> index 68e89d71cd1c..4d87f1fe010d 100644
> --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> @@ -325,7 +325,7 @@
> #define SARB_CHICKEN1 XE_REG_MCR(0xe90c)
> #define COMP_CKN_IN REG_GENMASK(30, 29)
>
> -#define RCU_MODE XE_REG(0x14800)
> +#define RCU_MODE XE_REG(0x14800, XE_REG_OPTION_MASKED)
> #define RCU_MODE_CCS_ENABLE REG_BIT(0)
>
> #define FORCEWAKE_ACK_GT XE_REG(0x130044)
> diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
> index 676137dcb510..84c2d7c624c6 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ads.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ads.c
> @@ -422,12 +422,12 @@ static void guc_capture_list_init(struct xe_guc_ads *ads)
>
> static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
> struct iosys_map *regset_map,
> - u32 reg, u32 flags,
> + struct xe_reg reg,
> unsigned int n_entry)
> {
> struct guc_mmio_reg entry = {
> - .offset = reg,
> - .flags = flags,
> + .offset = reg.reg,
> + .flags = reg.masked ? GUC_REGSET_MASKED : 0,
> /* TODO: steering */
> };
>
> @@ -446,40 +446,33 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
> unsigned long idx;
> unsigned count = 0;
> const struct {
> - u32 reg;
> - u32 flags;
> + struct xe_reg reg;
> bool skip;
> } *e, extra_regs[] = {
> - { .reg = RING_MODE(hwe->mmio_base).reg, },
> - { .reg = RING_HWS_PGA(hwe->mmio_base).reg, },
> - { .reg = RING_IMR(hwe->mmio_base).reg, },
> - { .reg = RCU_MODE.reg, .flags = 0x3,
> - .skip = hwe != hwe_rcs_reset_domain },
> + { .reg = RING_MODE(hwe->mmio_base), },
> + { .reg = RING_HWS_PGA(hwe->mmio_base), },
> + { .reg = RING_IMR(hwe->mmio_base), },
> + { .reg = RCU_MODE, .skip = hwe != hwe_rcs_reset_domain },
> };
> u32 i;
>
> BUILD_BUG_ON(ARRAY_SIZE(extra_regs) > ADS_REGSET_EXTRA_MAX);
>
> - xa_for_each(&hwe->reg_sr.xa, idx, entry) {
> - u32 flags = entry->reg.masked ? GUC_REGSET_MASKED : 0;
> -
> - guc_mmio_regset_write_one(ads, regset_map, idx, flags, count++);
> - }
> + xa_for_each(&hwe->reg_sr.xa, idx, entry)
> + guc_mmio_regset_write_one(ads, regset_map, entry->reg, count++);
>
> for (e = extra_regs; e < extra_regs + ARRAY_SIZE(extra_regs); e++) {
> if (e->skip)
> continue;
>
> - guc_mmio_regset_write_one(ads, regset_map,
> - e->reg, e->flags, count++);
> + guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
> }
>
> /* Wa_1607983814 */
> if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
> for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
> guc_mmio_regset_write_one(ads, regset_map,
> - LNCFCMOCS(i).reg, 0,
> - count++);
> + LNCFCMOCS(i), count++);
> }
> }
>
> --
> 2.40.1
>
More information about the Intel-xe
mailing list