[Intel-gfx] [PATCH 3/3] drm/i915: fairness

Ben Widawsky ben at bwidawsk.net
Sat Oct 29 07:55:29 CEST 2011


We now have enough information to be able to block apps which are
getting greedy.

We don't have HW support for preempting batches, therefore the finest
granularity for which we can schedule is the batch buffer. We can block
clients at the time they submit if they've gone too many batches ahead .
The code does a very basic adaptive style synchronization since a few
tests were suggesting that the cost of putting the proc to sleep was too
expensive for smaller limits. Until we get some good values for the
various use cases, this will stay, but I can easily envision it being
gone in the final code (that's why the new wait APIs are in this patch).

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.c            |   16 +++++++--
 drivers/gpu/drm/i915/i915_gem.c            |    7 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   52 ++++++++++++++++++++++++---
 drivers/gpu/drm/i915/intel_drv.h           |   13 ++++---
 4 files changed, 70 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 34289ab..41e418f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -102,9 +102,19 @@ MODULE_PARM_DESC(enable_hangcheck,
 		"WARNING: Disabling this can cause system wide hangs. "
 		"(default: true)");
 
-/* Default sched options to off */
-unsigned int i915_sched_hog_threshold __read_mostly = -1;
-unsigned int i915_sched_low_watermark __read_mostly = -1;
+unsigned int i915_sched_hog_threshold __read_mostly = 0xffffffff;
+module_param_named(hog_thresh, i915_sched_hog_threshold, int, 0600);
+MODULE_PARM_DESC(hog_thresh,
+		"Number of consecutive batches after which the driver "
+		"will block the DRM client. (default: 0xffffffff)");
+
+unsigned int i915_sched_low_watermark __read_mostly = 0xffffffff;
+module_param_named(low, i915_sched_low_watermark, int, 0600);
+MODULE_PARM_DESC(hog_thresh,
+		"Number of batches remaining before unblocking a DRM "
+		"client that had been blocked because of too many "
+		"consecutive submissions. (default: 0xffffffff)");
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 26b4b81..5b7daa1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4120,8 +4120,10 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 	 * later retire_requests won't dereference our soon-to-be-gone
 	 * file_priv.
 	 */
-	spin_lock(&file_priv->mm.lock);
 	wake_up_all(&file_priv->mm.request_waitq);
+	wait_for(&file_priv->mm.outstanding_requests == 0, 1000);
+
+	spin_lock(&file_priv->mm.lock);
 	while (!list_empty(&file_priv->mm.request_list)) {
 		struct drm_i915_gem_request *request;
 
@@ -4129,9 +4131,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 					   struct drm_i915_gem_request,
 					   client_list);
 		list_del(&request->client_list);
-		atomic_dec(&file_priv->mm.outstanding_requests);
-		if (WARN_ON(atomic_read(&file_priv->mm.outstanding_requests)))
-			atomic_set(&file_priv->mm.outstanding_requests, 0);
 		request->file_priv = NULL;
 	}
 	spin_unlock(&file_priv->mm.lock);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83..5f5a63a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -960,8 +960,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		       struct drm_i915_gem_exec_object2 *exec)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct list_head objects;
-	struct eb_objects *eb;
+	struct eb_objects *eb = NULL;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_clip_rect *cliprects = NULL;
 	struct intel_ring_buffer *ring;
@@ -1063,6 +1064,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		}
 	}
 
+again:
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		goto pre_mutex_err;
@@ -1073,15 +1075,53 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 	}
 
-	eb = eb_create(args->buffer_count);
-	if (eb == NULL) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = -ENOMEM;
-		goto pre_mutex_err;
+	if (!eb) {
+		eb = eb_create(args->buffer_count);
+		if (eb == NULL) {
+			mutex_unlock(&dev->struct_mutex);
+			ret = -ENOMEM;
+			goto pre_mutex_err;
+		}
 	}
 
 	/* Look up object handles */
 	INIT_LIST_HEAD(&objects);
+
+#define BLOCK_CONDITION ( \
+	atomic_read(&file_priv->mm.outstanding_requests) > i915_sched_hog_threshold)
+#define DONE_CONDITION ( \
+	atomic_read(&file_priv->mm.outstanding_requests) <= i915_sched_low_watermark)
+#define OUT_CONDITION (\
+	DONE_CONDITION || \
+	(atomic_read(&dev_priv->mm.wedged)) || \
+	(dev_priv->mm.suspended))
+
+	if (BLOCK_CONDITION) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = wait_for_us(OUT_CONDITION, 200);
+		if (!ret && DONE_CONDITION) {
+			eb_reset(eb);
+			goto again;
+		} else if (ret != -ETIMEDOUT) {
+			eb_destroy(eb);
+			goto pre_mutex_err;
+		}
+
+		ret = wait_event_interruptible(file_priv->mm.request_waitq,
+					       OUT_CONDITION);
+		if (!ret && DONE_CONDITION) {
+			eb_reset(eb);
+			goto again;
+		} else {
+			eb_destroy(eb);
+			goto pre_mutex_err;
+		}
+	}
+
+#undef BLOCK_CONDITION
+#undef DONE_CONDITION
+#undef OUT_CONDITION
+
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_i915_gem_object *obj;
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604..6c5dad9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,21 +31,24 @@
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
 
-#define _wait_for(COND, MS, W) ({ \
-	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+#define _wait_for(COND, XS, W, gran, func) ({ \
+	unsigned long timeout__ = jiffies + gran##_to_jiffies(XS);	\
 	int ret__ = 0;							\
 	while (!(COND)) {						\
 		if (time_after(jiffies, timeout__)) {			\
 			ret__ = -ETIMEDOUT;				\
 			break;						\
 		}							\
-		if (W && !(in_atomic() || in_dbg_master())) msleep(W);	\
+		if (W && !(in_atomic() || in_dbg_master())) func(W);	\
 	}								\
 	ret__;								\
 })
 
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define wait_for_us(COND, US) _wait_for(COND, US, 1, usecs, udelay)
+#define wait_for_ms(COND, MS) _wait_for(COND, MS, 1, msecs, msleep)
+#define wait_for_atomic_ms(COND, MS) _wait_for(COND, MS, 0, msecs, msleep)
+#define wait_for(COND, MS) wait_for_ms(COND, MS)
+#define wait_for_atomic(COND, MS) wait_for_atomic_ms(COND, MS)
 
 #define MSLEEP(x) do { \
 	if (in_dbg_master()) \
-- 
1.7.7.1




More information about the Intel-gfx mailing list