[Intel-gfx] [PATCH v5 12/35] drm/i915: Added deferred work handler for scheduler
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Thu Feb 18 14:27:00 UTC 2016
From: John Harrison <John.C.Harrison at Intel.com>
The scheduler needs to do interrupt triggered work that is too complex
to do in the interrupt handler. Thus it requires a deferred work
handler to process such tasks asynchronously.
v2: Updated to reduce mutex lock usage. The lock is now only held for
the minimum time within the remove function rather than for the whole
of the worker thread's operation.
v5: Removed objectionable white space and added some documentation.
[Joonas Lahtinen]
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_dma.c | 3 +++
drivers/gpu/drm/i915/i915_drv.h | 10 ++++++++++
drivers/gpu/drm/i915/i915_gem.c | 2 ++
drivers/gpu/drm/i915/i915_scheduler.c | 29 +++++++++++++++++++++++++++--
drivers/gpu/drm/i915/i915_scheduler.h | 1 +
5 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 678adc7..c3d382d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1158,6 +1158,9 @@ int i915_driver_unload(struct drm_device *dev)
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
+ /* Cancel the scheduler work handler, which should be idle now. */
+ cancel_work_sync(&dev_priv->mm.scheduler_work);
+
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 03add1a..4d544f1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1291,6 +1291,16 @@ struct i915_gem_mm {
struct delayed_work retire_work;
/**
+ * New scheme is to get an interrupt after every work packet
+ * in order to allow the low latency scheduling of pending
+ * packets. The idea behind adding new packets to a pending
+ * queue rather than directly into the hardware ring buffer
+ * is to allow high priority packets to over take low priority
+ * ones.
+ */
+ struct work_struct scheduler_work;
+
+ /**
* When we detect an idle GPU, we want to turn on
* powersaving features. So once we see that there
* are no more requests outstanding and no more
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c3b7def..1ab7256 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5427,6 +5427,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
i915_gem_idle_work_handler);
+ INIT_WORK(&dev_priv->mm.scheduler_work,
+ i915_scheduler_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index ab5007a..3986890 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -697,7 +697,9 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *scheduler,
*/
void i915_scheduler_wakeup(struct drm_device *dev)
{
- /* XXX: Need to call i915_scheduler_remove() via work handler. */
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ queue_work(dev_priv->wq, &dev_priv->mm.scheduler_work);
}
/**
@@ -827,7 +829,7 @@ static bool i915_scheduler_remove(struct i915_scheduler *scheduler,
return do_submit;
}
-void i915_scheduler_process_work(struct intel_engine_cs *ring)
+static void i915_scheduler_process_work(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_scheduler *scheduler = dev_priv->scheduler;
@@ -874,6 +876,29 @@ void i915_scheduler_process_work(struct intel_engine_cs *ring)
}
/**
+ * i915_scheduler_work_handler - scheduler's work handler callback.
+ * @work: Work structure
+ * A lot of the scheduler's work must be done asynchronously in response to
+ * an interrupt or other event. However, that work cannot be done at
+ * interrupt time or in the context of the event signaller (which might in
+ * fact be an interrupt). Thus a worker thread is required. This function
+ * will cause the thread to wake up and do its processing.
+ */
+void i915_scheduler_work_handler(struct work_struct *work)
+{
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+ int i;
+
+ dev_priv = container_of(work, struct drm_i915_private, mm.scheduler_work);
+ dev = dev_priv->dev;
+
+ for_each_ring(ring, dev_priv, i)
+ i915_scheduler_process_work(ring);
+}
+
+/**
* i915_scheduler_closefile - notify the scheduler that a DRM file handle
* has been closed.
* @dev: DRM device
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 0e8b6a9..180d75f 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -93,5 +93,6 @@ void i915_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);
+void i915_scheduler_work_handler(struct work_struct *work);
#endif /* _I915_SCHEDULER_H_ */
--
1.9.1
More information about the Intel-gfx
mailing list