[Intel-gfx] [PATCH 17/17] drm/i915: Use rt priority kthread to do GuC log buffer sampling

akash.goel at intel.com akash.goel at intel.com
Sun Jul 10 13:41:24 UTC 2016


From: Akash Goel <akash.goel at intel.com>

In cases where GuC firmware generates logs at a very high rate, Driver too
needs to be fast enough to sample the log buffer in time for the half-full
draining protocol to be effective and prevent any overflows of log buffer.
So the scheduling latency of bottom half, where sampling is done, should be
kept to lowest and for that having a dedicated high priority rt kthread is a
better option compared to a dedicated high priority workqueue.
The behavior would be more deterministic with a dedicated high priority rt
kthread, compared to worker threads which could have to cater to work items
from other workqueues.
Also optimization on Driver side alone will not ensure lossless capture of
logs, under high speed logging, Userpsace should also be equally fast in
collecting the logs from relay buffer.

Suggested-by: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c            | 13 -----
 drivers/gpu/drm/i915/i915_guc_submission.c | 76 ++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_irq.c            | 24 ++--------
 drivers/gpu/drm/i915/intel_guc.h           |  5 +-
 4 files changed, 80 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 43c9900..ed00192 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -791,20 +791,8 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 	if (dev_priv->hotplug.dp_wq == NULL)
 		goto out_free_wq;
 
-	if (HAS_GUC_SCHED(dev_priv)) {
-		/* Need a dedicated wq to process log buffer flush interrupts
-		 * from GuC without much delay so as to avoid any loss of logs.
-		 */
-		dev_priv->guc.log.wq =
-			alloc_ordered_workqueue("i915-guc_log", 0);
-		if (dev_priv->guc.log.wq == NULL)
-			goto out_free_hotplug_dp_wq;
-	}
-
 	return 0;
 
-out_free_hotplug_dp_wq:
-	destroy_workqueue(dev_priv->hotplug.dp_wq);
 out_free_wq:
 	destroy_workqueue(dev_priv->wq);
 out_err:
@@ -815,7 +803,6 @@ out_err:
 
 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 {
-	destroy_workqueue(dev_priv->guc.log.wq);
 	destroy_workqueue(dev_priv->hotplug.dp_wq);
 	destroy_workqueue(dev_priv->wq);
 }
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 067eb52..0093240 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -21,6 +21,7 @@
  * IN THE SOFTWARE.
  *
  */
+#include <linux/kthread.h>
 #include <linux/firmware.h>
 #include <linux/circ_buf.h>
 #include <linux/debugfs.h>
@@ -1096,6 +1097,48 @@ static int guc_create_log_relay_file(struct intel_guc *guc)
 	return 0;
 }
 
+void guc_cancel_log_flush_work_sync(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->guc.log.flush_signal = false;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	if (dev_priv->guc.log.flush_task)
+		wait_for_completion(&dev_priv->guc.log.flush_completion);
+}
+
+static int guc_log_flush_worker(void *arg)
+{
+	struct drm_i915_private *dev_priv = arg;
+	struct intel_guc *guc = &dev_priv->guc;
+
+	/* Install ourselves with high priority to reduce signalling latency */
+	struct sched_param param = { .sched_priority = 1 };
+	sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+
+	do {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irq(&dev_priv->irq_lock);
+		if (guc->log.flush_signal) {
+			guc->log.flush_signal = false;
+			reinit_completion(&guc->log.flush_completion);
+			spin_unlock_irq(&dev_priv->irq_lock);
+			i915_guc_capture_logs(&dev_priv->drm);
+			complete_all(&guc->log.flush_completion);
+		} else {
+			spin_unlock_irq(&dev_priv->irq_lock);
+			if (kthread_should_stop())
+				break;
+
+			schedule();
+		}
+	} while (1);
+	__set_current_state(TASK_RUNNING);
+
+	return 0;
+}
+
 static void guc_log_cleanup(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
@@ -1108,6 +1151,11 @@ static void guc_log_cleanup(struct drm_i915_private *dev_priv)
 	/* First disable the flush interrupt */
 	gen9_disable_guc_interrupts(dev_priv);
 
+	if (guc->log.flush_task)
+		kthread_stop(guc->log.flush_task);
+
+	guc->log.flush_task = NULL;
+
 	guc_remove_log_relay_file(guc);
 	guc->log.relay_chan = NULL;
 
@@ -1120,6 +1168,7 @@ static void guc_log_cleanup(struct drm_i915_private *dev_priv)
 static int guc_create_log_extras(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct task_struct *tsk;
 	void *vaddr;
 	int ret;
 
@@ -1144,6 +1193,23 @@ static int guc_create_log_extras(struct intel_guc *guc)
 		guc->log.buf_addr = vaddr;
 	}
 
+	if (!guc->log.flush_task) {
+		init_completion(&guc->log.flush_completion);
+		/* Spawn a thread to provide a fast bottom-half for handling log
+		 * buffer flush interrupts.
+		 */
+		tsk = kthread_run(guc_log_flush_worker, dev_priv,
+					"i915/guc_log_flushd");
+		if (IS_ERR(tsk)) {
+			ret = PTR_ERR(tsk);
+			DRM_ERROR("creation of log flush task failed %d\n", ret);
+			guc_log_cleanup(dev_priv);
+			return ret;
+		}
+
+		guc->log.flush_task = tsk;
+	}
+
 	return 0;
 }
 
@@ -1206,8 +1272,9 @@ static int guc_log_late_setup(struct drm_device *dev)
 	if (WARN_ON(guc->log.relay_chan))
 		return -EINVAL;
 
-	/* If log_level was set as -1 at boot time, then vmalloc mapping would
-	 * not have been created for the log buffer, so create one now.
+	/* If log_level was set as -1 at boot time, then vmalloc mapping and
+	 * flush daemon would not have been created for the log buffer, so
+	 * create one now.
 	 */
 	ret = guc_create_log_extras(guc);
 	if (ret)
@@ -1474,6 +1541,9 @@ void i915_guc_capture_logs_on_reset(struct drm_device *dev)
 	/* First disable the interrupts, will be renabled after reset */
 	gen9_disable_guc_interrupts(dev_priv);
 
+	/* Wait for the ongoing flush, if any, to complete */
+	guc_cancel_log_flush_work_sync(dev_priv);
+
 	/* Ask GuC to update the log buffer state */
 	host2guc_force_logbuffer_flush(&dev_priv->guc);
 
@@ -1544,7 +1614,7 @@ int i915_guc_log_control(struct drm_device *dev, uint64_t control_val)
 		 * buffer state and send the flush interrupt so that Host can
 		 * collect the left over logs also.
 		 */
-		flush_work(&dev_priv->guc.events_work);
+		guc_cancel_log_flush_work_sync(dev_priv);
 		host2guc_force_logbuffer_flush(&dev_priv->guc);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c3fb67e..0b6991e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -435,13 +435,14 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
 {
 	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->guc.interrupts_enabled = false;
+	/* Speed up cancellation after disabling interrupts */
+	dev_priv->guc.log.flush_signal = false;
 
 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
 	synchronize_irq(dev_priv->drm.irq);
 
-	cancel_work_sync(&dev_priv->guc.events_work);
 	gen9_reset_guc_interrupts(dev_priv);
 }
 
@@ -1208,22 +1209,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static void gen9_guc2host_events_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, guc.events_work);
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	/* Speed up work cancellation during disabling guc interrupts. */
-	if (!dev_priv->guc.interrupts_enabled) {
-		spin_unlock_irq(&dev_priv->irq_lock);
-		return;
-	}
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	i915_guc_capture_logs(&dev_priv->drm);
-}
-
 /**
  * ivybridge_parity_work - Workqueue called when a parity error interrupt
  * occurred.
@@ -1707,8 +1692,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
 					I915_READ(SOFT_SCRATCH(15)) & ~msg);
 
 				/* Handle flush interrupt event in bottom half */
-				queue_work(dev_priv->guc.log.wq,
-						&dev_priv->guc.events_work);
+				smp_store_mb(dev_priv->guc.log.flush_signal, 1);
+				wake_up_process(dev_priv->guc.log.flush_task);
 			}
 		}
 		dev_priv->guc.log.flush_interrupt_count++;
@@ -4629,7 +4614,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
-	INIT_WORK(&dev_priv->guc.events_work, gen9_guc2host_events_work);
 
 	if (HAS_GUC_UCODE(dev))
 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index e911a32..70c2241 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -125,9 +125,11 @@ struct intel_guc_fw {
 struct intel_guc_log {
 	uint32_t flags;
 	struct drm_i915_gem_object *obj;
-	struct workqueue_struct *wq;
 	void *buf_addr;
 	struct rchan *relay_chan;
+	struct task_struct *flush_task;
+	struct completion flush_completion;
+	bool flush_signal;
 
 	/* logging related stats */
 	u32 flush_interrupt_count;
@@ -141,7 +143,6 @@ struct intel_guc {
 	struct intel_guc_log log;
 
 	/* GuC2Host interrupt related state */
-	struct work_struct events_work;
 	bool interrupts_enabled;
 
 	struct drm_i915_gem_object *ads_obj;
-- 
1.9.2



More information about the Intel-gfx mailing list