[PATCH 1/2] drm/xe/lnl: Apply Wa_22019338487
Vinay Belgaumkar
vinay.belgaumkar at intel.com
Fri Apr 19 20:13:43 UTC 2024
This WA requires us to limit media GT frequency requests to a certain
cap value during driver load as well as after driver unload. Freq limits
are restored after driver load completes, so perf will not be
affected during normal operations.
Signed-off-by: Vinay Belgaumkar <vinay.belgaumkar at intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 6 +++++
drivers/gpu/drm/xe/xe_gsc.c | 8 +++++++
drivers/gpu/drm/xe/xe_guc_pc.c | 40 ++++++++++++++++++++++++++++++++--
drivers/gpu/drm/xe/xe_guc_pc.h | 3 +++
4 files changed, 55 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index d85a2ba0a057..e29c152a6c4e 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -30,6 +30,7 @@
#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
+#include "xe_guc_pc.h"
#include "xe_hwmon.h"
#include "xe_irq.h"
#include "xe_memirq.h"
@@ -336,6 +337,7 @@ static void xe_driver_flr(struct xe_device *xe)
{
const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_guc_pc *pc = >->uc.guc.pc;
int ret;
if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
@@ -343,6 +345,10 @@ static void xe_driver_flr(struct xe_device *xe)
return;
}
+ /* Set requested freq to mert_freq_cap before FLR */
+ if (xe_guc_pc_needs_wa_22019338487(pc))
+ pc_set_cur_freq(pc, min(xe_guc_pc_mert_freq_cap(pc), pc->rpe_freq));
+
drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
/*
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 60202b903687..556e73fca813 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -19,6 +19,7 @@
#include "xe_gt.h"
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
+#include "xe_guc_pc.h"
#include "xe_huc.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -339,6 +340,7 @@ static void gsc_work(struct work_struct *work)
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc_pc *pc = >->uc.guc.pc;
u32 actions;
int ret;
@@ -367,6 +369,12 @@ static void gsc_work(struct work_struct *work)
if (actions & GSC_ACTION_SW_PROXY)
xe_gsc_proxy_request_handler(gsc);
+ /* Revert the min/max freq limits as we're done with GSC/driver load */
+ if (xe_guc_pc_needs_wa_22019338487(pc)) {
+ xe_guc_pc_set_max_freq(pc, pc->rp0_freq);
+ xe_guc_pc_set_min_freq(pc, pc->rpe_freq);
+ }
+
out:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
xe_pm_runtime_put(xe);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 521ae24f2314..7f82b6c2ad3c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -40,6 +40,8 @@
#define GT_FREQUENCY_MULTIPLIER 50
#define GT_FREQUENCY_SCALER 3
+#define LNL_MERT_FREQ_CAP 800
+
/**
* DOC: GuC Power Conservation (PC)
*
@@ -237,7 +239,7 @@ static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
xe_mmio_write32(gt, RP_CONTROL, state);
}
-static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
+void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
{
struct xe_gt *gt = pc_to_gt(pc);
u32 rpnswreq;
@@ -673,6 +675,25 @@ static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
tgl_init_fused_rp_values(pc);
}
+bool xe_guc_pc_needs_wa_22019338487(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (MEDIA_VERx100(xe) == 2000 && xe_gt_is_media_type(gt))
+ return true;
+
+ return false;
+}
+
+u32 xe_guc_pc_mert_freq_cap(struct xe_guc_pc *pc)
+{
+ if (MEDIA_VERx100(pc_to_xe(pc)) == 2000)
+ return LNL_MERT_FREQ_CAP;
+ else
+ return 0;
+}
+
/**
* xe_guc_pc_init_early - Initialize RPx values and request a higher GT
* frequency to allow faster GuC load times
@@ -684,7 +705,11 @@ void xe_guc_pc_init_early(struct xe_guc_pc *pc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
pc_init_fused_rp_values(pc);
- pc_set_cur_freq(pc, pc->rp0_freq);
+
+ if (xe_guc_pc_needs_wa_22019338487(pc))
+ pc_set_cur_freq(pc, min(xe_guc_pc_mert_freq_cap(pc), pc->rp0_freq));
+ else
+ pc_set_cur_freq(pc, pc->rp0_freq);
}
static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
@@ -715,6 +740,17 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
+ if ((!ret) && xe_guc_pc_needs_wa_22019338487(pc)) {
+ /*
+ * Setting min to RPn disables use of efficient freq
+ * which could otherwise interfere with this WA for media GT.
+ * We will also bind max to MERT_FREQ_CAP until driver loads.
+ */
+ ret = pc_set_min_freq(pc, pc->rpn_freq);
+ if (!ret)
+ ret = pc_set_max_freq(pc, min(pc->rp0_freq, xe_guc_pc_mert_freq_cap(pc)));
+ }
+
out:
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index d3680d89490e..25fe693c7ee3 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -27,4 +27,7 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc);
u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc);
u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc);
void xe_guc_pc_init_early(struct xe_guc_pc *pc);
+void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq);
+bool xe_guc_pc_needs_wa_22019338487(struct xe_guc_pc *pc);
+u32 xe_guc_pc_mert_freq_cap(struct xe_guc_pc *pc);
#endif /* _XE_GUC_PC_H_ */
--
2.38.1
More information about the Intel-xe
mailing list