[Intel-gfx] [PATCH 08/15] drm/i915: fairness
Ben Widawsky
ben at bwidawsk.net
Sat Nov 19 03:24:25 CET 2011
We now have enough information to be able to block apps which are
getting greedy.
We don't have HW support for preempting batches, therefore the finest
granularity for which we can schedule is the batch buffer. We can block
clients at the time they submit if they've gone too many batches ahead.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_dma.c | 1 +
drivers/gpu/drm/i915/i915_drv.c | 5 ++
drivers/gpu/drm/i915/i915_drv.h | 16 ++++
drivers/gpu/drm/i915/i915_gem.c | 4 +
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 109 +++++++++++++++++++++++++++-
5 files changed, 131 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d1c9a6f..942c5c2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2206,6 +2206,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->lock);
INIT_LIST_HEAD(&file_priv->request_list);
file_priv->outstanding_requests = 0;
+ file_priv->forced_throttles = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 548e04b..79653b6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -102,6 +102,11 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
+int i915_scheduler __read_mostly = 0;
+module_param_named(scheduler, i915_scheduler, int, 0444);
+MODULE_PARM_DESC(scheduler,
+ "GPU workload scheduling. (default: off)");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 406279c..2917e54 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -734,6 +734,17 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
atomic_t forcewake_count;
+
+ /* For the foreseeable future, we will only have 2 real scheduler
+ * types, and both are really similar. If the count grows past that,
+ * we'd want to abstract this better.
+ */
+ struct {
+ /* Point at which we consider blocking */
+ unsigned high_watermark;
+ /* Point when we consider unblocking */
+ unsigned low_watermark;
+ } scheduler;
} drm_i915_private_t;
enum i915_cache_level {
@@ -922,6 +933,7 @@ struct drm_i915_file_private {
struct list_head request_list;
int outstanding_requests;
+ int forced_throttles;
};
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1006,6 +1018,10 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
extern unsigned int i915_enable_rc6 __read_mostly;
extern unsigned int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
+extern int i915_scheduler __read_mostly ;
+extern unsigned int i915_sched_hog_threshold;
+extern unsigned int i915_sched_low_watermark;
+
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ee9a77a..2a2f0cd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3942,6 +3942,10 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
+
+ /* Set some decent defaults for the scheudler */
+ dev_priv->scheduler.high_watermark = 50;
+ dev_priv->scheduler.low_watermark = 5;
}
/*
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1d66c24..744ff9b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -953,6 +953,87 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
}
}
+enum {
+ I915_RESCHED,
+ I915_OUT,
+ I915_DONE,
+ I915_ERROR
+};
+
+static int
+i915_schedule(struct drm_device *dev, struct intel_ring_buffer *ring, struct drm_file *file)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_request *request = NULL;
+ u32 seqno = 0;
+ int ret, i = 0;
+
+#define BLOCK_CONDITION ( \
+ i915_scheduler && \
+ (file_priv->outstanding_requests > dev_priv->scheduler.high_watermark))
+#define OUT_CONDITION ( \
+ (atomic_read(&dev_priv->mm.wedged)) || \
+ (dev_priv->mm.suspended))
+#define RUNNABLE ( \
+ file_priv->outstanding_requests < dev_priv->scheduler.low_watermark)
+
+ spin_lock(&file_priv->lock);
+ if (!BLOCK_CONDITION) {
+ spin_unlock(&file_priv->lock);
+ return I915_DONE;
+ }
+
+ /* Check if our fd was released while spinning*/
+ if (file_priv->outstanding_requests == -1) {
+ spin_unlock(&file_priv->lock);
+ return I915_OUT;
+ }
+
+ /* Check if we're now able to run */
+ if (RUNNABLE) {
+ spin_unlock(&file_priv->lock);
+ return I915_DONE;
+ }
+
+ list_for_each_entry(request, &file_priv->request_list, client_list) {
+ if (i == dev_priv->scheduler.low_watermark) {
+ seqno = request->seqno;
+ break;
+ }
+ i++;
+ }
+ spin_unlock(&file_priv->lock);
+
+ BUG_ON(seqno == 0 || i < dev_priv->scheduler.low_watermark);
+
+ ret = i915_wait_request(ring, seqno, true);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* Check if our fd was released while spinning*/
+ spin_lock(&file_priv->lock);
+ if (file_priv->outstanding_requests == -1) {
+ DRM_INFO("File was closed while waiting\n");
+ spin_unlock(&file_priv->lock);
+ return I915_OUT;
+ } else {
+ file_priv->forced_throttles++;
+ spin_unlock(&file_priv->lock);
+ }
+
+ if (ret == -ERESTARTSYS || (ret == 0 && OUT_CONDITION))
+ return I915_OUT;
+ else if (ret)
+ return I915_OUT;
+ else
+ return I915_RESCHED;
+
+#undef RUNNABLE
+#undef BLOCK_CONDITION
+#undef OUT_CONDITION
+ return I915_ERROR;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
@@ -961,7 +1042,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
- struct eb_objects *eb;
+ struct eb_objects *eb = NULL;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
@@ -1034,17 +1115,37 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
+again:
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
- eb = eb_create(args->buffer_count);
- if (eb == NULL) {
+ if (!eb) {
+ eb = eb_create(args->buffer_count);
+ if (eb == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+ }
+
+ switch (i915_schedule(dev, ring, file)) {
+ case I915_RESCHED:
+ goto again;
+ break;
+ case I915_OUT:
+ eb_destroy(eb);
mutex_unlock(&dev->struct_mutex);
- ret = -ENOMEM;
+ ret = -EINVAL;
goto pre_mutex_err;
+ break;
+ case I915_DONE:
+ break;
+ case I915_ERROR:
+ default:
+ BUG();
}
/* Look up object handles */
--
1.7.7.3
More information about the Intel-gfx
mailing list