[Intel-gfx] [PATCH v6 12/23] drm/i915/slpc: Send RESET event to enable SLPC

Sagar Arun Kamble sagar.a.kamble at intel.com
Thu Mar 16 18:28:16 UTC 2017


Send host2guc SLPC reset event to GuC post GuC load.
Post this, i915 can ascertain if SLPC has started running successfully
through shared data. This check is done during intel_init_gt_powersave.
This allows to get initial configuration setup by SLPC and if needed
move to Host RPS if SLPC runs into issues.

v1: Extract host2guc_slpc to handle slpc status code
    coding style changes (Paulo)
    Removed WARN_ON for checking msb of gtt address of
    shared gem obj. (ChrisW)
    host2guc_action to i915_guc_action change.(Sagar)
    Updating SLPC enabled status. (Sagar)

v2: Commit message update. (David)

v3: Rebase.

v4: Added DRM_INFO message when SLPC is enabled.

v5: Updated patch as host2guc_slpc is moved to earlier patch.
    SLPC activation status message put after checking the
    state from shared data during intel_init_gt_powersave.

v6: Added definition of host2guc_slpc and clflush the shared data only
    for required size. Setting state to NOT_RUNNING before sending RESET
    event. Output data for SLPC actions is to be retrieved during
    intel_guc_send with lock protection so created wrapper
    __intel_guc_send that outputs GuC output data if needed. Clearing
    pm_rps_events on confirming SLPC RUNNING status so that even if
    host touches any of the PM registers by mistake it should not have
    any effect. (Sagar)

Signed-off-by: Tom O'Rourke <Tom.O'Rourke at intel.com>
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c   |   9 +++
 drivers/gpu/drm/i915/intel_slpc.c | 137 ++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_slpc.h |   4 ++
 drivers/gpu/drm/i915/intel_uc.c   |  21 +++++-
 drivers/gpu/drm/i915/intel_uc.h   |   2 +
 5 files changed, 172 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0225003..73c4cd3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6941,6 +6941,15 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 		intel_runtime_pm_get(dev_priv);
 	}
 
+	if (i915.enable_slpc) {
+		dev_priv->guc.slpc.active = intel_slpc_get_status(dev_priv);
+		if (!dev_priv->guc.slpc.active) {
+			i915.enable_slpc = 0;
+			intel_sanitize_gt_powersave(dev_priv);
+		} else
+			dev_priv->pm_rps_events = 0;
+	}
+
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
diff --git a/drivers/gpu/drm/i915/intel_slpc.c b/drivers/gpu/drm/i915/intel_slpc.c
index 4003bdb1..b1900e7 100644
--- a/drivers/gpu/drm/i915/intel_slpc.c
+++ b/drivers/gpu/drm/i915/intel_slpc.c
@@ -82,6 +82,133 @@ static void slpc_shared_data_init(struct drm_i915_private *dev_priv)
 	kunmap_atomic(data);
 }
 
+static void host2guc_slpc(struct drm_i915_private *dev_priv,
+			  struct slpc_event_input *input, u32 len)
+{
+	u32 *data;
+	u32 output[SLPC_EVENT_MAX_OUTPUT_ARGS];
+	int ret = 0;
+
+	/*
+	 * We have only 15 scratch registers for communication.
+	 * the first we will use for the event ID in input and
+	 * output data. Event processing status will be present
+	 * in SOFT_SCRATCH(1) register.
+	 */
+	BUILD_BUG_ON(SLPC_EVENT_MAX_INPUT_ARGS > 14);
+	BUILD_BUG_ON(SLPC_EVENT_MAX_OUTPUT_ARGS < 1);
+	BUILD_BUG_ON(SLPC_EVENT_MAX_OUTPUT_ARGS > 14);
+
+	data = (u32 *) input;
+	data[0] = INTEL_GUC_ACTION_SLPC_REQUEST;
+	ret = __intel_guc_send(&dev_priv->guc, data, len, output);
+
+	if (ret)
+		DRM_ERROR("event 0x%x status %d\n",
+			  ((output[0] & 0xFF00) >> 8), ret);
+}
+
+static void host2guc_slpc_reset(struct drm_i915_private *dev_priv)
+{
+	struct slpc_event_input data = {0};
+	u32 shared_data_gtt_offset = i915_ggtt_offset(dev_priv->guc.slpc.vma);
+
+	data.header.value = SLPC_EVENT(SLPC_EVENT_RESET, 2);
+	data.args[0] = shared_data_gtt_offset;
+	data.args[1] = 0;
+
+	host2guc_slpc(dev_priv, &data, 4);
+}
+
+static void host2guc_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+	struct slpc_event_input data = {0};
+	u32 shared_data_gtt_offset = i915_ggtt_offset(dev_priv->guc.slpc.vma);
+
+	data.header.value = SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2);
+	data.args[0] = shared_data_gtt_offset;
+	data.args[1] = 0;
+
+	host2guc_slpc(dev_priv, &data, 4);
+}
+
+void intel_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->guc.slpc.active)
+		host2guc_slpc_query_task_state(dev_priv);
+}
+
+/*
+ * This function will reads the state updates from GuC SLPC into shared data
+ * by invoking H2G action. Returns current state of GuC SLPC.
+ */
+void intel_slpc_read_shared_data(struct drm_i915_private *dev_priv,
+				 struct slpc_shared_data *data)
+{
+	struct page *page;
+	void *pv = NULL;
+
+	intel_slpc_query_task_state(dev_priv);
+
+	page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+	pv = kmap_atomic(page);
+
+	drm_clflush_virt_range(pv, sizeof(struct slpc_shared_data));
+	memcpy(data, pv, sizeof(struct slpc_shared_data));
+
+	kunmap_atomic(pv);
+}
+
+const char *intel_slpc_get_state_str(enum slpc_global_state state)
+{
+	if (state == SLPC_GLOBAL_STATE_NOT_RUNNING)
+		return "not running";
+	else if (state == SLPC_GLOBAL_STATE_INITIALIZING)
+		return "initializing";
+	else if (state == SLPC_GLOBAL_STATE_RESETTING)
+		return "resetting";
+	else if (state == SLPC_GLOBAL_STATE_RUNNING)
+		return "running";
+	else if (state == SLPC_GLOBAL_STATE_SHUTTING_DOWN)
+		return "shutting down";
+	else if (state == SLPC_GLOBAL_STATE_ERROR)
+		return "error";
+	else
+		return "unknown";
+}
+bool intel_slpc_get_status(struct drm_i915_private *dev_priv)
+{
+	struct slpc_shared_data data;
+	bool ret = false;
+
+	intel_slpc_read_shared_data(dev_priv, &data);
+	DRM_INFO("SLPC state: %s\n",
+		 intel_slpc_get_state_str(data.global_state));
+
+	switch (data.global_state) {
+	case SLPC_GLOBAL_STATE_RUNNING:
+		/* Capture required state from SLPC here */
+		ret = true;
+		break;
+	case SLPC_GLOBAL_STATE_ERROR:
+		DRM_ERROR("SLPC in error state.\n");
+		break;
+	case SLPC_GLOBAL_STATE_RESETTING:
+		/*
+		 * SLPC enabling in GuC should be completing fast.
+		 * If SLPC is taking time to initialize (unlikely as we are
+		 * sending reset event during GuC load itself).
+		 * TODO: Need to wait till state changes to RUNNING.
+		 */
+		ret = true;
+		DRM_ERROR("SLPC not running yet.!!!");
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
 void intel_slpc_init(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
@@ -118,6 +245,16 @@ void intel_slpc_cleanup(struct drm_i915_private *dev_priv)
 
 void intel_slpc_enable(struct drm_i915_private *dev_priv)
 {
+	struct page *page;
+	struct slpc_shared_data *data;
+
+	page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+	data = kmap_atomic(page);
+	data->global_state = SLPC_GLOBAL_STATE_NOT_RUNNING;
+	kunmap_atomic(data);
+
+	host2guc_slpc_reset(dev_priv);
+	dev_priv->guc.slpc.active = true;
 }
 
 void intel_slpc_suspend(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_slpc.h b/drivers/gpu/drm/i915/intel_slpc.h
index f7819ed..567b9a3 100644
--- a/drivers/gpu/drm/i915/intel_slpc.h
+++ b/drivers/gpu/drm/i915/intel_slpc.h
@@ -195,6 +195,10 @@ enum slpc_status {
 };
 
 /* intel_slpc.c */
+void intel_slpc_read_shared_data(struct drm_i915_private *dev_priv,
+				struct slpc_shared_data *data);
+const char *intel_slpc_get_state_str(enum slpc_global_state state);
+bool intel_slpc_get_status(struct drm_i915_private *dev_priv);
 void intel_slpc_init(struct drm_i915_private *dev_priv);
 void intel_slpc_cleanup(struct drm_i915_private *dev_priv);
 void intel_slpc_enable(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 5e5131f..bbb6635 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -228,9 +228,11 @@ static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
 	return INTEL_GUC_RECV_IS_RESPONSE(val);
 }
 
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+int __intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len,
+		     u32 *output)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	union slpc_event_output_header header;
 	u32 status;
 	int i;
 	int ret;
@@ -277,12 +279,29 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 	}
 	dev_priv->guc.action_status = status;
 
+	/*
+	 * Output data from Host to GuC SLPC actions is populated in scratch
+	 * registers SOFT_SCRATCH(1) to SOFT_SCRATCH(14) based on event.
+	 * Currently only SLPC action status in GuC is meaningful as Host
+	 * can query only overridden parameters and that are fetched from
+	 * Host-GuC SLPC shared data.
+	 */
+	if (output && !ret) {
+		output[0] = header.value = I915_READ(SOFT_SCRATCH(1));
+		ret = header.status;
+	}
+
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	mutex_unlock(&guc->send_mutex);
 
 	return ret;
 }
 
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+	return __intel_guc_send(guc, action, len, NULL);
+}
+
 int intel_guc_sample_forcewake(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index fdd4f90..4cd1152 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -193,6 +193,8 @@ struct intel_huc {
 int intel_uc_init_hw(struct drm_i915_private *dev_priv);
 void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
 			 struct intel_uc_fw *uc_fw);
+int __intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len,
+		     u32 *output);
 int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 
-- 
1.9.1



More information about the Intel-gfx mailing list