[PATCH v3] drm/i915/slpc: Optmize waitboost for SLPC
Vinay Belgaumkar
vinay.belgaumkar at intel.com
Thu Oct 20 00:29:44 UTC 2022
Waitboost (when SLPC is enabled) results in a H2G message. This can result
in thousands of messages during a stress test and fill up an already full
CTB. There is no need to request for RP0 if GuC is already requesting the
same.
v2: Add the tracing back, and check requested freq
in the worker thread (Tvrtko)
v3: Check requested freq in dec_waiters as well
Signed-off-by: Vinay Belgaumkar <vinay.belgaumkar at intel.com>
---
drivers/gpu/drm/i915/gt/intel_rps.c | 3 +++
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 14 +++++++++++---
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fc23c562d9b2..18b75cf08d1b 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1016,6 +1016,9 @@ void intel_rps_boost(struct i915_request *rq)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+ rq->fence.context, rq->fence.seqno);
+
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters))
schedule_work(&slpc->boost_work);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index b7cdeec44bd3..9dbdbab1515a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -227,14 +227,19 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
static void slpc_boost_work(struct work_struct *work)
{
struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
+ struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
int err;
/*
* Raise min freq to boost. It's possible that
* this is greater than current max. But it will
* certainly be limited by RP0. An error setting
- * the min param is not fatal.
+ * the min param is not fatal. No need to boost
+ * if we are already requesting it.
*/
+ if (intel_rps_get_requested_frequency(rps) == slpc->boost_freq)
+ return;
+
mutex_lock(&slpc->lock);
if (atomic_read(&slpc->num_waiters)) {
err = slpc_force_min_freq(slpc, slpc->boost_freq);
@@ -728,6 +733,7 @@ int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
{
+ struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
/*
* Return min back to the softlimit.
* This is called during request retire,
@@ -735,8 +741,10 @@ void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
* set_param fails.
*/
mutex_lock(&slpc->lock);
- if (atomic_dec_and_test(&slpc->num_waiters))
- slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
+ if (atomic_dec_and_test(&slpc->num_waiters)) {
+ if (intel_rps_get_requested_frequency(rps) != slpc->min_freq_softlimit)
+ slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
+ }
mutex_unlock(&slpc->lock);
}
--
2.35.1
More information about the dri-devel
mailing list