[PATCH v6 12/23] drm/i915/slpc: Send RESET event to enable SLPC

Sagar Arun Kamble sagar.a.kamble at intel.com
Thu Jan 5 10:40:42 UTC 2017


From: Tom O'Rourke <Tom.O'Rourke at intel.com>

Send host2guc SLPC reset event to GuC post GuC load.
Post this, i915 can ascertain if SLPC has started running successfully
through shared data. This check is done during intel_init_gt_powersave.
This allows to get initial configuration setup by SLPC and if needed
move to Host RPS if SLPC runs into issues.

v1: Extract host2guc_slpc to handle slpc status code
    coding style changes (Paulo)
    Removed WARN_ON for checking msb of gtt address of
    shared gem obj. (ChrisW)
    host2guc_action to i915_guc_action change.(Sagar)
    Updating SLPC enabled status. (Sagar)

v2: Commit message update. (David)

v3: Rebase.

v4: Added DRM_INFO message when SLPC is enabled.

v5: Updated patch as host2guc_slpc is moved to earlier patch.
    SLPC activation status message put after checking the
    state from shared data during intel_init_gt_powersave.

v6: Added definition of host2guc_slpc and clflush the shared data only
    for required size. Setting state to NOT_RUNNING before sending RESET
    event.

Signed-off-by: Tom O'Rourke <Tom.O'Rourke at intel.com>
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c   |   8 +++
 drivers/gpu/drm/i915/intel_slpc.c | 141 ++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_slpc.h |   4 ++
 3 files changed, 153 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9bb9547..e35c564 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6681,6 +6681,14 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 		intel_runtime_pm_get(dev_priv);
 	}
 
+	if (i915.enable_slpc) {
+		dev_priv->guc.slpc.active = intel_slpc_get_status(dev_priv);
+		if (!dev_priv->guc.slpc.active) {
+			i915.enable_slpc = 0;
+			intel_sanitize_gt_powersave(dev_priv);
+		}
+	}
+
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
diff --git a/drivers/gpu/drm/i915/intel_slpc.c b/drivers/gpu/drm/i915/intel_slpc.c
index 4003bdb1..be44776 100644
--- a/drivers/gpu/drm/i915/intel_slpc.c
+++ b/drivers/gpu/drm/i915/intel_slpc.c
@@ -82,6 +82,137 @@ static void slpc_shared_data_init(struct drm_i915_private *dev_priv)
 	kunmap_atomic(data);
 }
 
+static void host2guc_slpc(struct drm_i915_private *dev_priv,
+			  struct slpc_event_input *input, u32 len)
+{
+	union slpc_event_output_header header;
+	u32 *data;
+	int ret = 0;
+
+	/*
+	 * We have only 15 scratch registers for communication.
+	 * the first we will use for the event ID in input and
+	 * output data. Event processing status will be present
+	 * in SOFT_SCRATCH(1) register.
+	 */
+	BUILD_BUG_ON(SLPC_EVENT_MAX_INPUT_ARGS > 14);
+	BUILD_BUG_ON(SLPC_EVENT_MAX_OUTPUT_ARGS < 1);
+	BUILD_BUG_ON(SLPC_EVENT_MAX_OUTPUT_ARGS > 14);
+
+	data = (u32 *) input;
+	data[0] = INTEL_GUC_ACTION_SLPC_REQUEST;
+	ret = intel_guc_send(&dev_priv->guc, data, len);
+
+	if (!ret) {
+		header.value = I915_READ(SOFT_SCRATCH(1));
+		ret = header.status;
+	}
+
+	if (ret)
+		DRM_ERROR("event 0x%x status %d\n", (data[1] >> 8), ret);
+}
+
+static void host2guc_slpc_reset(struct drm_i915_private *dev_priv)
+{
+	struct slpc_event_input data = {0};
+	u32 shared_data_gtt_offset = i915_ggtt_offset(dev_priv->guc.slpc.vma);
+
+	data.header.value = SLPC_EVENT(SLPC_EVENT_RESET, 2);
+	data.args[0] = shared_data_gtt_offset;
+	data.args[1] = 0;
+
+	host2guc_slpc(dev_priv, &data, 4);
+}
+
+static void host2guc_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+	struct slpc_event_input data = {0};
+	u32 shared_data_gtt_offset = i915_ggtt_offset(dev_priv->guc.slpc.vma);
+
+	data.header.value = SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2);
+	data.args[0] = shared_data_gtt_offset;
+	data.args[1] = 0;
+
+	host2guc_slpc(dev_priv, &data, 4);
+}
+
+void intel_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->guc.slpc.active)
+		host2guc_slpc_query_task_state(dev_priv);
+}
+
+/*
+ * This function will reads the state updates from GuC SLPC into shared data
+ * by invoking H2G action. Returns current state of GuC SLPC.
+ */
+void intel_slpc_read_shared_data(struct drm_i915_private *dev_priv,
+				 struct slpc_shared_data *data)
+{
+	struct page *page;
+	void *pv = NULL;
+
+	intel_slpc_query_task_state(dev_priv);
+
+	page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+	pv = kmap_atomic(page);
+
+	drm_clflush_virt_range(pv, sizeof(struct slpc_shared_data));
+	memcpy(data, pv, sizeof(struct slpc_shared_data));
+
+	kunmap_atomic(pv);
+}
+
+const char *intel_slpc_get_state_str(enum slpc_global_state state)
+{
+	if (state == SLPC_GLOBAL_STATE_NOT_RUNNING)
+		return "not running";
+	else if (state == SLPC_GLOBAL_STATE_INITIALIZING)
+		return "initializing";
+	else if (state == SLPC_GLOBAL_STATE_RESETTING)
+		return "resetting";
+	else if (state == SLPC_GLOBAL_STATE_RUNNING)
+		return "running";
+	else if (state == SLPC_GLOBAL_STATE_SHUTTING_DOWN)
+		return "shutting down";
+	else if (state == SLPC_GLOBAL_STATE_ERROR)
+		return "error";
+	else
+		return "unknown";
+}
+bool intel_slpc_get_status(struct drm_i915_private *dev_priv)
+{
+	struct slpc_shared_data data;
+	bool ret = false;
+
+	intel_slpc_read_shared_data(dev_priv, &data);
+	DRM_INFO("SLPC state: %s\n",
+		 intel_slpc_get_state_str(data.global_state));
+
+	switch (data.global_state) {
+	case SLPC_GLOBAL_STATE_RUNNING:
+		/* Capture required state from SLPC here */
+		ret = true;
+		break;
+	case SLPC_GLOBAL_STATE_ERROR:
+		DRM_ERROR("SLPC in error state.\n");
+		break;
+	case SLPC_GLOBAL_STATE_RESETTING:
+		/*
+		 * SLPC enabling in GuC should be completing fast.
+		 * If SLPC is taking time to initialize (unlikely as we are
+		 * sending reset event during GuC load itself).
+		 * TODO: Need to wait till state changes to RUNNING.
+		 */
+		ret = true;
+		DRM_ERROR("SLPC not running yet.!!!");
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
 void intel_slpc_init(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
@@ -118,6 +249,16 @@ void intel_slpc_cleanup(struct drm_i915_private *dev_priv)
 
 void intel_slpc_enable(struct drm_i915_private *dev_priv)
 {
+	struct page *page;
+	struct slpc_shared_data *data;
+
+	page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+	data = kmap_atomic(page);
+	data->global_state = SLPC_GLOBAL_STATE_NOT_RUNNING;
+	kunmap_atomic(data);
+
+	host2guc_slpc_reset(dev_priv);
+	dev_priv->guc.slpc.active = true;
 }
 
 void intel_slpc_suspend(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_slpc.h b/drivers/gpu/drm/i915/intel_slpc.h
index f7819ed..567b9a3 100644
--- a/drivers/gpu/drm/i915/intel_slpc.h
+++ b/drivers/gpu/drm/i915/intel_slpc.h
@@ -195,6 +195,10 @@ enum slpc_status {
 };
 
 /* intel_slpc.c */
+void intel_slpc_read_shared_data(struct drm_i915_private *dev_priv,
+				struct slpc_shared_data *data);
+const char *intel_slpc_get_state_str(enum slpc_global_state state);
+bool intel_slpc_get_status(struct drm_i915_private *dev_priv);
 void intel_slpc_init(struct drm_i915_private *dev_priv);
 void intel_slpc_cleanup(struct drm_i915_private *dev_priv);
 void intel_slpc_enable(struct drm_i915_private *dev_priv);
-- 
1.9.1



More information about the Intel-gfx-trybot mailing list