[Intel-gfx] [PATCH v2 3b/3] drm/i915: Kill i915_gem_execbuffer_wait_for_flips()

ville.syrjala at linux.intel.com ville.syrjala at linux.intel.com
Thu Nov 1 19:06:03 CET 2012


From: Ville Syrjälä <ville.syrjala at linux.intel.com>

As per Chris Wilson's suggestion make
i915_gem_execbuffer_wait_for_flips() go away.

This was used to stall the GPU ring while there are pending
page flips involving the relevant BO. Ie. while the BO is still
being scanned out by the display controller.

The recommended alternative is to use the page flip events to
wait for the page flips to fully complete before reusing the BO
of the old front buffer. Or use more buffers.

Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
v2: Fixed the wait_event() logic mixup of v1

 drivers/gpu/drm/i915/i915_drv.h            |    7 ----
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   42 ----------------------------
 drivers/gpu/drm/i915/intel_display.c       |   13 --------
 3 files changed, 0 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a2c5e89..facfd2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1055,13 +1055,6 @@ struct drm_i915_gem_object {
 
 	/** for phy allocated objects */
 	struct drm_i915_gem_phys_object *phys_obj;
-
-	/**
-	 * Number of crtcs where this object is currently the fb, but
-	 * will be page flipped away on the next vblank.  When it
-	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
-	 */
-	atomic_t pending_flip;
 };
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 91d43d5..2e527be 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -611,44 +611,11 @@ err:
 }
 
 static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
-{
-	u32 plane, flip_mask;
-	int ret;
-
-	/* Check for any pending flips. As we only maintain a flip queue depth
-	 * of 1, we can simply insert a WAIT for the next display flip prior
-	 * to executing the batch and avoid stalling the CPU.
-	 */
-
-	for (plane = 0; flips >> plane; plane++) {
-		if (((flips >> plane) & 1) == 0)
-			continue;
-
-		if (plane)
-			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-		else
-			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
-		ret = intel_ring_begin(ring, 2);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
-	}
-
-	return 0;
-}
-
-static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 				struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
 	uint32_t flush_domains = 0;
-	uint32_t flips = 0;
 	int ret;
 
 	list_for_each_entry(obj, objects, exec_list) {
@@ -659,18 +626,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 			i915_gem_clflush_object(obj);
 
-		if (obj->base.pending_write_domain)
-			flips |= atomic_read(&obj->pending_flip);
-
 		flush_domains |= obj->base.write_domain;
 	}
 
-	if (flips) {
-		ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
-		if (ret)
-			return ret;
-	}
-
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
 		intel_gtt_chipset_flush();
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7bf4749..68bea06 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2182,10 +2182,6 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
 	bool was_interruptible = dev_priv->mm.interruptible;
 	int ret;
 
-	wait_event(dev_priv->pending_flip_queue,
-		   atomic_read(&dev_priv->mm.wedged) ||
-		   atomic_read(&obj->pending_flip) == 0);
-
 	/* Big Hammer, we also need to ensure that any pending
 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
 	 * current scanout is retired before unpinning the old
@@ -6897,9 +6893,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
 	obj = work->old_fb_obj;
 
-	atomic_clear_mask(1 << intel_crtc->plane,
-			  &obj->pending_flip.counter);
-
 	wake_up(&dev_priv->pending_flip_queue);
 	schedule_work(&work->work);
 
@@ -7241,11 +7234,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	work->enable_stall_check = true;
 
-	/* Block clients from rendering to the new back buffer until
-	 * the flip occurs and the object is no longer visible.
-	 */
-	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-
 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
 	if (ret)
 		goto cleanup_pending;
@@ -7259,7 +7247,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	return 0;
 
 cleanup_pending:
-	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
-- 
1.7.8.6




More information about the Intel-gfx mailing list