[PATCH 23/26] drm/i915: Allow global modeset work to run async.

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Thu May 12 14:34:06 UTC 2016


When cdclk changes all crtc's will have flip_work until the cdclk is
changed. This is required to prevent crtc's from being enabled when
the cdclk isn't updated yet.

Global cdclk updates are now async.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
 drivers/gpu/drm/i915/intel_display.c | 227 ++++++++++++++++++++++-------------
 drivers/gpu/drm/i915/intel_drv.h     |   1 +
 2 files changed, 144 insertions(+), 84 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6b4966714cc3..e6d187ff02a4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -118,6 +118,7 @@ static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
 static void intel_modeset_verify_crtc(struct drm_crtc *crtc,
 				      struct drm_crtc_state *old_state,
 				      struct drm_crtc_state *new_state);
+static void intel_modeset_verify_disabled(struct drm_device *dev);
 
 typedef struct {
 	int	min, max;
@@ -11132,6 +11133,94 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 	intel_pipe_update_end(intel_crtc, work);
 }
 
+static void intel_schedule_unpin(struct drm_crtc *crtc,
+				 struct intel_atomic_state *state,
+				 struct intel_flip_work *work);
+static void intel_schedule_flip(struct drm_crtc *crtc,
+				struct intel_atomic_state *state,
+				struct intel_flip_work *work,
+				bool async);
+
+static void intel_atomic_global_ms(struct intel_atomic_state *state, bool async)
+{
+	int i;
+	struct drm_device *dev = state->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_ATOMIC("Performing global cleanup\n");
+
+	for (i = 0; i < ARRAY_SIZE(state->work); i++) {
+		struct intel_flip_work *work = state->work[i];
+		struct drm_crtc *crtc;
+
+		if (!work)
+			continue;
+
+		crtc = work->new_crtc_state->base.crtc;
+
+		if (needs_modeset(&work->new_crtc_state->base))
+			work->put_power_domains =
+				modeset_get_crtc_power_domains(crtc, work->new_crtc_state);
+	}
+
+	drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
+
+	if (dev_priv->display.modeset_commit_cdclk &&
+	    state->cdclk != dev_priv->cdclk_freq)
+		dev_priv->display.modeset_commit_cdclk(&state->base);
+
+	intel_modeset_verify_disabled(dev);
+
+	for (i = 0; i < ARRAY_SIZE(state->work); i++) {
+		struct intel_flip_work *work = state->work[i];
+		struct drm_crtc *crtc;
+
+		if (!work)
+			continue;
+
+		state->work[i] = NULL;
+		WARN_ON(work->can_async_unpin);
+
+		crtc = work->new_crtc_state->base.crtc;
+
+		DRM_DEBUG_ATOMIC("Forcing update on [CRTC:%i]\n", crtc->base.id);
+
+		if (work->new_crtc_state->base.active)
+			intel_schedule_flip(crtc, state, work, async);
+		else
+			intel_schedule_unpin(crtc, state, work);
+	}
+
+	drm_atomic_state_free(&state->base);
+}
+
+static void intel_atomic_global_ms_work(struct work_struct *w)
+{
+	struct intel_atomic_state *state =
+		container_of(w, struct intel_atomic_state, global_work);
+
+	intel_atomic_global_ms(state, true);
+}
+
+static void intel_update_legacy_crtc_state(struct drm_crtc *crtc,
+					   struct intel_flip_work *work,
+					   struct intel_crtc_state *crtc_state)
+{
+	to_intel_crtc(crtc)->config = crtc_state;
+
+	/* Update hwmode for vblank functions */
+	if (crtc_state->base.active)
+		crtc->hwmode = crtc_state->base.adjusted_mode;
+	else
+		crtc->hwmode.crtc_clock = 0;
+
+	crtc->mode = crtc_state->base.mode;
+	crtc->enabled = crtc_state->base.enable;
+
+	if (crtc_state->base.enable)
+		drm_calc_timestamping_constants(crtc, &crtc_state->base.adjusted_mode);
+}
+
 /**
  * intel_wm_need_update - Check whether watermarks need updating
  * @plane: drm plane
@@ -11834,37 +11923,6 @@ fail:
 	return ret;
 }
 
-static void
-intel_modeset_update_crtc_state(struct drm_atomic_state *state)
-{
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int i;
-
-	/* Double check state. */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
-
-		/* Update hwmode for vblank functions */
-		if (crtc->state->active)
-			crtc->hwmode = crtc->state->adjusted_mode;
-		else
-			crtc->hwmode.crtc_clock = 0;
-
-		/*
-		 * Update legacy state to satisfy fbc code. This can
-		 * be removed when fbc uses the atomic state.
-		 */
-		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
-			struct drm_plane_state *plane_state = crtc->primary->state;
-
-			crtc->primary->fb = plane_state->fb;
-			crtc->x = plane_state->src_x >> 16;
-			crtc->y = plane_state->src_y >> 16;
-		}
-	}
-}
-
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
 	int diff;
@@ -12586,6 +12644,10 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 		if (needs_modeset(crtc_state))
 			continue;
 
+		/*
+		 * Even set this for disabled crtc's to force the event to
+		 * complete only after the clock is changed.
+		 */
 		crtc_state->mode_changed = true;
 
 		ret = drm_atomic_add_affected_connectors(state, crtc);
@@ -12998,7 +13060,9 @@ static void intel_schedule_update(struct drm_crtc *crtc,
 				  bool nonblock)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc_state *pipe_config = work->new_crtc_state;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 	if (!pipe_config->base.active && work->can_async_unpin) {
 		INIT_LIST_HEAD(&work->head);
@@ -13007,9 +13071,36 @@ static void intel_schedule_update(struct drm_crtc *crtc,
 	}
 
 	spin_lock_irq(&dev->event_lock);
-	list_add_tail(&work->head, &to_intel_crtc(crtc)->flip_work);
+	list_add_tail(&work->head, &intel_crtc->flip_work);
 	spin_unlock_irq(&dev->event_lock);
 
+	if (needs_modeset(&pipe_config->base)) {
+		WARN_ON(nonblock);
+
+		if (work->old_crtc_state->base.active) {
+			intel_crtc_disable_planes(crtc, work->old_crtc_state->base.plane_mask);
+			dev_priv->display.crtc_disable(crtc);
+			intel_crtc->active = false;
+			intel_fbc_disable(intel_crtc);
+			intel_disable_shared_dpll(intel_crtc);
+
+			/*
+			 * Underruns don't always raise
+			 * interrupts, so check manually.
+			 */
+			intel_check_cpu_fifo_underruns(dev_priv);
+			intel_check_pch_fifo_underruns(dev_priv);
+
+			if (!work->new_crtc_state->base.active)
+				intel_update_watermarks(crtc);
+		}
+
+		intel_update_legacy_crtc_state(crtc, work, pipe_config);
+		return;
+	}
+
+	intel_update_legacy_crtc_state(crtc, work, pipe_config);
+
 	if (!pipe_config->base.active)
 		intel_schedule_unpin(crtc, state, work);
 	else
@@ -13061,57 +13152,7 @@ static int intel_atomic_commit(struct drm_device *dev,
 
 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-		if (!needs_modeset(crtc->state))
-			continue;
-
-		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
-		intel_state->work[i]->put_power_domains =
-			modeset_get_crtc_power_domains(crtc,
-				to_intel_crtc_state(crtc->state));
-
-		if (old_crtc_state->active) {
-			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
-			dev_priv->display.crtc_disable(crtc);
-			intel_crtc->active = false;
-			intel_fbc_disable(intel_crtc);
-			intel_disable_shared_dpll(intel_crtc);
-
-			/*
-			 * Underruns don't always raise
-			 * interrupts, so check manually.
-			 */
-			intel_check_cpu_fifo_underruns(dev_priv);
-			intel_check_pch_fifo_underruns(dev_priv);
-
-			if (!crtc->state->active)
-				intel_update_watermarks(crtc);
-		}
-	}
-
-	/* Only after disabling all output pipelines that will be changed can we
-	 * update the the output configuration. */
-	intel_modeset_update_crtc_state(state);
-
-	if (intel_state->modeset) {
-		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
-
-		if (dev_priv->display.modeset_commit_cdclk &&
-		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
-			dev_priv->display.modeset_commit_cdclk(state);
-
-		intel_modeset_verify_disabled(dev);
-	}
-
-	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
 		struct intel_flip_work *work = intel_state->work[i];
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		bool modeset = needs_modeset(crtc->state);
-
-		if (!modeset)
-			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
 
 		if (!work) {
 			if (!list_empty_careful(&intel_crtc->flip_work)) {
@@ -13127,17 +13168,35 @@ static int intel_atomic_commit(struct drm_device *dev,
 				}
 				spin_unlock_irq(&dev->event_lock);
 			}
+			intel_crtc->config = to_intel_crtc_state(crtc->state);
 			continue;
 		}
 
-		intel_state->work[i] = NULL;
+		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+
+		if (!needs_modeset(crtc->state))
+			intel_state->work[i] = NULL;
+
 		intel_prepare_work(crtc, work, state, old_crtc_state);
 		intel_schedule_update(crtc, intel_state, work, nonblock);
 	}
 
 	/* FIXME: add subpixel order */
+	if (intel_state->modeset) {
+		if (nonblock) {
+			INIT_WORK(&intel_state->global_work, intel_atomic_global_ms_work);
+			schedule_work(&intel_state->global_work);
+		} else
+			intel_atomic_global_ms(intel_state, false);
+	} else {
+		for (i = 0; i < ARRAY_SIZE(intel_state->work); i++) {
+			WARN_ON(intel_state->work[i]);
+		}
 
-	drm_atomic_state_free(state);
+		memset(intel_state->work, 0, sizeof(intel_state->work));
+
+		drm_atomic_state_free(state);
+	}
 
 	/* As one of the primary mmio accessors, KMS has a high likelihood
 	 * of triggering bugs in unclaimed access. After we finish
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index dc96f511cf6c..b351fced2d1a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -295,6 +295,7 @@ struct intel_atomic_state {
 	unsigned int min_pixclk[I915_MAX_PIPES];
 
 	struct intel_flip_work *work[I915_MAX_PIPES];
+	struct work_struct global_work;
 
 	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
 	struct intel_wm_config wm_config;
-- 
2.5.5



More information about the Intel-gfx-trybot mailing list