[Intel-gfx] [PATCH 03/13] drm/i915: move flushing list processing to i915_gem_flush

Daniel Vetter daniel.vetter at ffwll.ch
Thu Feb 4 22:05:03 CET 2010


Now that we can move objects to the active list without already having
emitted a request, move the flushing list handling into i915_gem_flush.
This makes more sense and allows to drop a few i915_add_request calls
that are not strictly necessary.

Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c |   24 ++++++++++++++++--------
 1 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3c0bf2c..b78b0e5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1674,7 +1674,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	}
 
 	/* Associate any objects on the flushing list matching the write
-	 * domain we're flushing with our flush.
+	 * domain we're flushing with our request.
 	 */
 	if (flush_domains != 0) 
 		i915_gem_process_flushing_list(dev, flush_domains, seqno);
@@ -1852,6 +1852,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 	int ret = 0;
 
 	BUG_ON(seqno == 0);
+	BUG_ON(seqno == dev_priv->mm.next_gem_seqno);
 
 	if (atomic_read(&dev_priv->mm.wedged))
 		return -EIO;
@@ -1890,8 +1891,9 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 		ret = -EIO;
 
 	if (ret && ret != -ERESTARTSYS)
-		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+			  __func__, ret, seqno, i915_get_gem_seqno(dev),
+			  dev_priv->mm.next_gem_seqno);
 
 	/* Directly dispatch request retiring.  While we have the work queue
 	 * to handle this, the waiter on a request often wants an associated
@@ -1985,6 +1987,13 @@ i915_gem_flush(struct drm_device *dev,
 		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
 	}
+
+	/* Associate any objects on the flushing list matching the write
+	 * domain we're flushing with the next request.
+	 */
+	if (flush_domains != 0) 
+		i915_gem_process_flushing_list(dev, flush_domains, 0);
+
 }
 
 /**
@@ -2142,7 +2151,7 @@ i915_gpu_idle(struct drm_device *dev)
 
 	/* Flush everything onto the inactive list. */
 	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+	seqno = i915_add_request(dev, NULL, 0);
 	if (seqno == 0)
 		return -ENOMEM;
 
@@ -2255,7 +2264,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 				i915_gem_flush(dev,
 					       obj->write_domain,
 					       obj->write_domain);
-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+				seqno = i915_add_request(dev, NULL, 0);
 				if (seqno == 0)
 					return -ENOMEM;
 
@@ -2768,7 +2777,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 	/* Queue the GPU write cache flushing we need. */
 	old_write_domain = obj->write_domain;
 	i915_gem_flush(dev, 0, obj->write_domain);
-	(void) i915_add_request(dev, NULL, obj->write_domain);
+	(void) i915_add_request(dev, NULL, 0);
 	BUG_ON(obj->write_domain);
 
 	trace_i915_gem_object_change_domain(obj,
@@ -3918,8 +3927,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			       dev->invalidate_domains,
 			       dev->flush_domains);
 		if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
-			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains);
+			(void)i915_add_request(dev, file_priv, 0);
 	}
 
 	for (i = 0; i < args->buffer_count; i++) {
-- 
1.6.6.1




More information about the Intel-gfx mailing list