[Intel-gfx] [PATCH 03/15] drm/i915: move flushing list processing to i915_gem_flush

Daniel Vetter daniel.vetter at ffwll.ch
Thu Mar 11 16:58:48 CET 2010


Now that we can move objects to the active list without already having
emitted a request, move the flushing list handling into i915_gem_flush.
This makes more sense and allows to drop a few i915_add_request calls
that are not strictly necessary.

Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c |   23 +++++++++++++++--------
 1 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4bacee6..4c84b87 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1664,7 +1664,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	}
 
 	/* Associate any objects on the flushing list matching the write
-	 * domain we're flushing with our flush.
+	 * domain we're flushing with our request.
 	 */
 	if (flush_domains != 0) 
 		i915_gem_process_flushing_list(dev, flush_domains, seqno);
@@ -1886,8 +1886,9 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 		ret = -EIO;
 
 	if (ret && ret != -ERESTARTSYS)
-		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+			  __func__, ret, seqno, i915_get_gem_seqno(dev),
+			  dev_priv->mm.next_gem_seqno);
 
 	/* Directly dispatch request retiring.  While we have the work queue
 	 * to handle this, the waiter on a request often wants an associated
@@ -1981,6 +1982,13 @@ i915_gem_flush(struct drm_device *dev,
 		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
 	}
+
+	/* Associate any objects on the flushing list matching the write
+	 * domain we're flushing with the next request.
+	 */
+	if (flush_domains != 0) 
+		i915_gem_process_flushing_list(dev, flush_domains, 0);
+
 }
 
 /**
@@ -2135,7 +2143,7 @@ i915_gpu_idle(struct drm_device *dev)
 
 	/* Flush everything onto the inactive list. */
 	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+	seqno = i915_add_request(dev, NULL, 0);
 	if (seqno == 0)
 		return -ENOMEM;
 
@@ -2248,7 +2256,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 				i915_gem_flush(dev,
 					       obj->write_domain,
 					       obj->write_domain);
-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+				seqno = i915_add_request(dev, NULL, 0);
 				if (seqno == 0)
 					return -ENOMEM;
 
@@ -2784,7 +2792,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 	/* Queue the GPU write cache flushing we need. */
 	old_write_domain = obj->write_domain;
 	i915_gem_flush(dev, 0, obj->write_domain);
-	(void) i915_add_request(dev, NULL, obj->write_domain);
+	(void) i915_add_request(dev, NULL, 0);
 	BUG_ON(obj->write_domain);
 
 	trace_i915_gem_object_change_domain(obj,
@@ -3939,8 +3947,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			       dev->invalidate_domains,
 			       dev->flush_domains);
 		if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
-			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains);
+			(void)i915_add_request(dev, file_priv, 0);
 	}
 
 	for (i = 0; i < args->buffer_count; i++) {
-- 
1.6.6.1




More information about the Intel-gfx mailing list