[Intel-gfx] [06/10] drm/i915: Replace pcu_lock with sb_lock

Hans de Goede hdegoede at redhat.com
Thu Mar 8 08:06:53 UTC 2018


Hi Chris,

Thank you for your work on this. I've 2 small remarks inline.

On 07-03-18 20:41, Chris Wilson wrote:
> We now have two locks for sideband access. The general one covering
> sideband access across all generation, sb_lock, and a specific one
> covering sideband access via the punit on vlv/chv. After lifting the
> sb_lock around the punit into the callers, the pcu_lock is now redudant
> and can be separated from its other use to regulate RPS (essentially
> giving RPS a lock all of its own).
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c     |  55 +++++---------
>   drivers/gpu/drm/i915/i915_drv.h         |  10 +--
>   drivers/gpu/drm/i915/i915_irq.c         |   4 +-
>   drivers/gpu/drm/i915/i915_sysfs.c       |  40 +++++-----
>   drivers/gpu/drm/i915/intel_cdclk.c      |  28 -------
>   drivers/gpu/drm/i915/intel_display.c    |   6 --
>   drivers/gpu/drm/i915/intel_hdcp.c       |   2 -
>   drivers/gpu/drm/i915/intel_pm.c         | 127 +++++++++++++++-----------------
>   drivers/gpu/drm/i915/intel_runtime_pm.c |   8 --
>   drivers/gpu/drm/i915/intel_sideband.c   |   4 -
>   10 files changed, 104 insertions(+), 180 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 20a6c00a7365..82b8bfe2940c 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1074,8 +1074,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
>   	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
>   		u32 rpmodectl, freq_sts;
>   
> -		mutex_lock(&dev_priv->pcu_lock);
> -
>   		rpmodectl = I915_READ(GEN6_RP_CONTROL);
>   		seq_printf(m, "Video Turbo Mode: %s\n",
>   			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
> @@ -1110,7 +1108,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
>   		seq_printf(m,
>   			   "efficient (RPe) frequency: %d MHz\n",
>   			   intel_gpu_freq(dev_priv, rps->efficient_freq));
> -		mutex_unlock(&dev_priv->pcu_lock);
>   	} else if (INTEL_GEN(dev_priv) >= 6) {
>   		u32 rp_state_limits;
>   		u32 gt_perf_status;
> @@ -1525,12 +1522,9 @@ static int gen6_drpc_info(struct seq_file *m)
>   		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
>   	}
>   
> -	if (INTEL_GEN(dev_priv) <= 7) {
> -		mutex_lock(&dev_priv->pcu_lock);
> +	if (INTEL_GEN(dev_priv) <= 7)
>   		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
>   				       &rc6vids);
> -		mutex_unlock(&dev_priv->pcu_lock);
> -	}
>   
>   	seq_printf(m, "RC1e Enabled: %s\n",
>   		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
> @@ -1799,30 +1793,24 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
>   {
>   	struct drm_i915_private *dev_priv = node_to_i915(m->private);
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -	int ret = 0;
>   	int gpu_freq, ia_freq;
>   	unsigned int max_gpu_freq, min_gpu_freq;
>   
>   	if (!HAS_LLC(dev_priv))
>   		return -ENODEV;
>   
> -	intel_runtime_pm_get(dev_priv);
> -
> -	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
> -	if (ret)
> -		goto out;
> +	min_gpu_freq = rps->min_freq;
> +	max_gpu_freq = rps->max_freq;
>   
>   	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
>   		/* Convert GT frequency to 50 HZ units */
> -		min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
> -		max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
> -	} else {
> -		min_gpu_freq = rps->min_freq_softlimit;
> -		max_gpu_freq = rps->max_freq_softlimit;
> +		min_gpu_freq /= GEN9_FREQ_SCALER;
> +		max_gpu_freq /= GEN9_FREQ_SCALER;
>   	}
>   

You are replacing rps->min_freq_softlimit with rps->min_freq here (and same for max)
if this is intentional I think this should be split out into a separate patch.

>   	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
>   
> +	intel_runtime_pm_get(dev_priv);
>   	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
>   		ia_freq = gpu_freq;
>   		sandybridge_pcode_read(dev_priv,
> @@ -1836,12 +1824,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
>   			   ((ia_freq >> 0) & 0xff) * 100,
>   			   ((ia_freq >> 8) & 0xff) * 100);
>   	}
> -
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
> -out:
>   	intel_runtime_pm_put(dev_priv);
> -	return ret;
> +
> +	return 0;
>   }
>   
>   static int i915_opregion(struct seq_file *m, void *unused)
> @@ -4169,7 +4154,7 @@ i915_max_freq_set(void *data, u64 val)
>   
>   	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
>   
> -	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
> +	ret = mutex_lock_interruptible(&rps->lock);
>   	if (ret)
>   		return ret;
>   
> @@ -4182,8 +4167,8 @@ i915_max_freq_set(void *data, u64 val)
>   	hw_min = rps->min_freq;
>   
>   	if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
> -		mutex_unlock(&dev_priv->pcu_lock);
> -		return -EINVAL;
> +		ret = -EINVAL;
> +		goto unlock;
>   	}
>   
>   	rps->max_freq_softlimit = val;
> @@ -4191,9 +4176,9 @@ i915_max_freq_set(void *data, u64 val)
>   	if (intel_set_rps(dev_priv, val))
>   		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
> -	return 0;
> +unlock:
> +	mutex_unlock(&rps->lock);
> +	return ret;
>   }
>   
>   DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
> @@ -4225,7 +4210,7 @@ i915_min_freq_set(void *data, u64 val)
>   
>   	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
>   
> -	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
> +	ret = mutex_lock_interruptible(&rps->lock);
>   	if (ret)
>   		return ret;
>   
> @@ -4239,8 +4224,8 @@ i915_min_freq_set(void *data, u64 val)
>   
>   	if (val < hw_min ||
>   	    val > hw_max || val > rps->max_freq_softlimit) {
> -		mutex_unlock(&dev_priv->pcu_lock);
> -		return -EINVAL;
> +		ret = -EINVAL;
> +		goto unlock;
>   	}
>   
>   	rps->min_freq_softlimit = val;
> @@ -4248,9 +4233,9 @@ i915_min_freq_set(void *data, u64 val)
>   	if (intel_set_rps(dev_priv, val))
>   		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
> -	return 0;
> +unlock:
> +	mutex_unlock(&rps->lock);
> +	return ret;
>   }
>   
>   DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index afc89a8f43e8..61df8c338e20 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -900,6 +900,8 @@ struct intel_rps_ei {
>   };
>   
>   struct intel_rps {
> +	struct mutex lock;
> +
>   	/*
>   	 * work, interrupts_enabled and pm_iir are protected by
>   	 * dev_priv->irq_lock
> @@ -2054,14 +2056,6 @@ struct drm_i915_private {
>   	/* Cannot be determined by PCIID. You must always read a register. */
>   	u32 edram_cap;
>   
> -	/*
> -	 * Protects RPS/RC6 register access and PCU communication.
> -	 * Must be taken after struct_mutex if nested. Note that
> -	 * this lock may be held for long periods of time when
> -	 * talking to hw - so only take it when talking to hw!
> -	 */
> -	struct mutex pcu_lock;
> -
>   	/* gen6+ GT PM state */
>   	struct intel_gen6_power_mgmt gt_pm;
>   
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 633c18785c1e..c291a8edfcb9 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -1204,7 +1204,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
>   	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
>   		goto out;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
>   
>   	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
>   
> @@ -1258,7 +1258,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
>   		rps->last_adj = 0;
>   	}
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_unlock(&rps->lock);
>   
>   out:
>   	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
> diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> index 17b20a34e99f..035b2fc72bd1 100644
> --- a/drivers/gpu/drm/i915/i915_sysfs.c
> +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> @@ -262,7 +262,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
>   
>   	intel_runtime_pm_get(dev_priv);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
>   		vlv_punit_get(dev_priv);
>   		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
> @@ -272,7 +271,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
>   	} else {
>   		freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
>   	}
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	intel_runtime_pm_put(dev_priv);
>   
> @@ -304,6 +302,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
>   {
>   	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> +	bool boost = false;
>   	u32 val;
>   	ssize_t ret;
>   
> @@ -316,9 +315,14 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
>   	if (val < rps->min_freq || val > rps->max_freq)
>   		return -EINVAL;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
> -	rps->boost_freq = val;
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
> +	if (val != rps->boost_freq) {
> +		rps->boost_freq = val;
> +		boost = atomic_read(&rps->num_waiters);
> +	}
> +	mutex_unlock(&rps->lock);
> +	if (boost)
> +		schedule_work(&rps->work);

This schedule work is new here, please put it in a separate patch
with a commit message explaining why it is added.

>   	return count;
>   }
> @@ -356,17 +360,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
>   		return ret;
>   
>   	intel_runtime_pm_get(dev_priv);
> -
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
>   
>   	val = intel_freq_opcode(dev_priv, val);
> -
>   	if (val < rps->min_freq ||
>   	    val > rps->max_freq ||
>   	    val < rps->min_freq_softlimit) {
> -		mutex_unlock(&dev_priv->pcu_lock);
> -		intel_runtime_pm_put(dev_priv);
> -		return -EINVAL;
> +		ret = -EINVAL;
> +		goto unlock;
>   	}
>   
>   	if (val > rps->rp0_freq)
> @@ -384,8 +385,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
>   	 * frequency request may be unchanged. */
>   	ret = intel_set_rps(dev_priv, val);
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
> +unlock:
> +	mutex_unlock(&rps->lock);
>   	intel_runtime_pm_put(dev_priv);
>   
>   	return ret ?: count;
> @@ -414,17 +415,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
>   		return ret;
>   
>   	intel_runtime_pm_get(dev_priv);
> -
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
>   
>   	val = intel_freq_opcode(dev_priv, val);
> -
>   	if (val < rps->min_freq ||
>   	    val > rps->max_freq ||
>   	    val > rps->max_freq_softlimit) {
> -		mutex_unlock(&dev_priv->pcu_lock);
> -		intel_runtime_pm_put(dev_priv);
> -		return -EINVAL;
> +		ret = -EINVAL;
> +		goto unlock;
>   	}
>   
>   	rps->min_freq_softlimit = val;
> @@ -438,8 +436,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
>   	 * frequency request may be unchanged. */
>   	ret = intel_set_rps(dev_priv, val);
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
> +unlock:
> +	mutex_unlock(&rps->lock);
>   	intel_runtime_pm_put(dev_priv);
>   
>   	return ret ?: count;
> diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
> index e53e8c36a591..b77589d9fb2c 100644
> --- a/drivers/gpu/drm/i915/intel_cdclk.c
> +++ b/drivers/gpu/drm/i915/intel_cdclk.c
> @@ -461,7 +461,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
>   {
>   	u32 val;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_iosf_sb_get(dev_priv,
>   			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
>   
> @@ -474,7 +473,6 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
>   
>   	vlv_iosf_sb_put(dev_priv,
>   			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	if (IS_VALLEYVIEW(dev_priv))
>   		cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
> @@ -551,7 +549,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
>   			BIT(VLV_IOSF_SB_BUNIT) |
>   			BIT(VLV_IOSF_SB_PUNIT));
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
>   	val &= ~DSPFREQGUAR_MASK;
>   	val |= (cmd << DSPFREQGUAR_SHIFT);
> @@ -561,7 +558,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
>   		     50)) {
>   		DRM_ERROR("timed out waiting for CDclk change\n");
>   	}
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	if (cdclk == 400000) {
>   		u32 divider;
> @@ -632,7 +628,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
>   	 */
>   	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
> @@ -646,7 +641,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
>   	}
>   
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	intel_update_cdclk(dev_priv);
>   
> @@ -724,10 +718,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
>   		 "trying to change cdclk frequency with cdclk not enabled\n"))
>   		return;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	ret = sandybridge_pcode_write(dev_priv,
>   				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   	if (ret) {
>   		DRM_ERROR("failed to inform pcode about cdclk change\n");
>   		return;
> @@ -776,10 +768,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
>   			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
>   		DRM_ERROR("Switching back to LCPLL failed\n");
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
>   				cdclk_state->voltage_level);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
>   
> @@ -1007,12 +997,10 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
>   	u32 freq_select, cdclk_ctl;
>   	int ret;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
>   				SKL_CDCLK_PREPARE_FOR_CHANGE,
>   				SKL_CDCLK_READY_FOR_CHANGE,
>   				SKL_CDCLK_READY_FOR_CHANGE, 3);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   	if (ret) {
>   		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
>   			  ret);
> @@ -1076,10 +1064,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
>   	POSTING_READ(CDCLK_CTL);
>   
>   	/* inform PCU of the change */
> -	mutex_lock(&dev_priv->pcu_lock);
>   	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
>   				cdclk_state->voltage_level);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	intel_update_cdclk(dev_priv);
>   }
> @@ -1391,12 +1377,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
>   	 * requires us to wait up to 150usec, but that leads to timeouts;
>   	 * the 2ms used here is based on experiment.
>   	 */
> -	mutex_lock(&dev_priv->pcu_lock);
>   	ret = sandybridge_pcode_write_timeout(dev_priv,
>   					      HSW_PCODE_DE_WRITE_FREQ_REQ,
>   					      0x80000000, 150, 2);
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
>   	if (ret) {
>   		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
>   			  ret, cdclk);
> @@ -1424,7 +1407,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
>   		val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
>   	I915_WRITE(CDCLK_CTL, val);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	/*
>   	 * The timeout isn't specified, the 2ms used here is based on
>   	 * experiment.
> @@ -1434,8 +1416,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
>   	ret = sandybridge_pcode_write_timeout(dev_priv,
>   					      HSW_PCODE_DE_WRITE_FREQ_REQ,
>   					      cdclk_state->voltage_level, 150, 2);
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
>   	if (ret) {
>   		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
>   			  ret, cdclk);
> @@ -1673,12 +1653,10 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
>   	u32 val, divider;
>   	int ret;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
>   				SKL_CDCLK_PREPARE_FOR_CHANGE,
>   				SKL_CDCLK_READY_FOR_CHANGE,
>   				SKL_CDCLK_READY_FOR_CHANGE, 3);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   	if (ret) {
>   		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
>   			  ret);
> @@ -1715,10 +1693,8 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
>   	I915_WRITE(CDCLK_CTL, val);
>   
>   	/* inform PCU of the change */
> -	mutex_lock(&dev_priv->pcu_lock);
>   	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
>   				cdclk_state->voltage_level);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	intel_update_cdclk(dev_priv);
>   
> @@ -1854,12 +1830,10 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
>   	unsigned int vco = cdclk_state->vco;
>   	int ret;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
>   				SKL_CDCLK_PREPARE_FOR_CHANGE,
>   				SKL_CDCLK_READY_FOR_CHANGE,
>   				SKL_CDCLK_READY_FOR_CHANGE, 3);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   	if (ret) {
>   		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
>   			  ret);
> @@ -1876,10 +1850,8 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
>   	I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
>   			      skl_cdclk_decimal(cdclk));
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	/* TODO: add proper DVFS support. */
>   	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	intel_update_cdclk(dev_priv);
>   }
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index b3cbea18283f..c27bbb517f55 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -4934,10 +4934,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
>   	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
>   
>   	if (IS_BROADWELL(dev_priv)) {
> -		mutex_lock(&dev_priv->pcu_lock);
>   		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
>   						IPS_ENABLE | IPS_PCODE_CONTROL));
> -		mutex_unlock(&dev_priv->pcu_lock);
>   		/* Quoting Art Runyan: "its not safe to expect any particular
>   		 * value in IPS_CTL bit 31 after enabling IPS through the
>   		 * mailbox." Moreover, the mailbox may return a bogus state,
> @@ -4967,9 +4965,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
>   		return;
>   
>   	if (IS_BROADWELL(dev_priv)) {
> -		mutex_lock(&dev_priv->pcu_lock);
>   		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
> -		mutex_unlock(&dev_priv->pcu_lock);
>   		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
>   		if (intel_wait_for_register(dev_priv,
>   					    IPS_CTL, IPS_ENABLE, 0,
> @@ -8841,11 +8837,9 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
>   static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
>   {
>   	if (IS_HASWELL(dev_priv)) {
> -		mutex_lock(&dev_priv->pcu_lock);
>   		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
>   					    val))
>   			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
> -		mutex_unlock(&dev_priv->pcu_lock);
>   	} else {
>   		I915_WRITE(D_COMP_BDW, val);
>   		POSTING_READ(D_COMP_BDW);
> diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
> index 14ca5d3057a7..81259a4fbdfd 100644
> --- a/drivers/gpu/drm/i915/intel_hdcp.c
> +++ b/drivers/gpu/drm/i915/intel_hdcp.c
> @@ -68,10 +68,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
>   	 * differ in the key load trigger process from other platforms.
>   	 */
>   	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
> -		mutex_lock(&dev_priv->pcu_lock);
>   		ret = sandybridge_pcode_write(dev_priv,
>   					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
> -		mutex_unlock(&dev_priv->pcu_lock);
>   		if (ret) {
>   			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
>   			          ret);
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 1d52bbf1860a..376c7d4ff972 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -310,7 +310,6 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
>   {
>   	u32 val;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
> @@ -327,14 +326,12 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
>   		DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
>   
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   }
>   
>   static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
>   {
>   	u32 val;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
> @@ -345,7 +342,6 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
>   	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
>   
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   }
>   
>   #define FW_WM(value, plane) \
> @@ -2810,11 +2806,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
>   
>   		/* read the first set of memory latencies[0:3] */
>   		val = 0; /* data0 to be programmed to 0 for first set */
> -		mutex_lock(&dev_priv->pcu_lock);
>   		ret = sandybridge_pcode_read(dev_priv,
>   					     GEN9_PCODE_READ_MEM_LATENCY,
>   					     &val);
> -		mutex_unlock(&dev_priv->pcu_lock);
>   
>   		if (ret) {
>   			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
> @@ -2831,11 +2825,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
>   
>   		/* read the second set of memory latencies[4:7] */
>   		val = 1; /* data0 to be programmed to 1 for second set */
> -		mutex_lock(&dev_priv->pcu_lock);
>   		ret = sandybridge_pcode_read(dev_priv,
>   					     GEN9_PCODE_READ_MEM_LATENCY,
>   					     &val);
> -		mutex_unlock(&dev_priv->pcu_lock);
>   		if (ret) {
>   			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
>   			return;
> @@ -3622,13 +3614,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
>   		return 0;
>   
>   	DRM_DEBUG_KMS("Enabling the SAGV\n");
> -	mutex_lock(&dev_priv->pcu_lock);
> -
>   	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
>   				      GEN9_SAGV_ENABLE);
>   
>   	/* We don't need to wait for the SAGV when enabling */
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	/*
>   	 * Some skl systems, pre-release machines in particular,
> @@ -3659,15 +3648,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
>   		return 0;
>   
>   	DRM_DEBUG_KMS("Disabling the SAGV\n");
> -	mutex_lock(&dev_priv->pcu_lock);
> -
>   	/* bspec says to keep retrying for at least 1 ms */
>   	ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
>   				GEN9_SAGV_DISABLE,
>   				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
>   				1);
> -	mutex_unlock(&dev_priv->pcu_lock);
> -
>   	/*
>   	 * Some skl systems, pre-release machines in particular,
>   	 * don't actually have an SAGV.
> @@ -5684,7 +5669,6 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
>   	wm->level = VLV_WM_LEVEL_PM2;
>   
>   	if (IS_CHERRYVIEW(dev_priv)) {
> -		mutex_lock(&dev_priv->pcu_lock);
>   		vlv_punit_get(dev_priv);
>   
>   		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
> @@ -5716,7 +5700,6 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
>   		}
>   
>   		vlv_punit_put(dev_priv);
> -		mutex_unlock(&dev_priv->pcu_lock);
>   	}
>   
>   	for_each_intel_crtc(dev, crtc) {
> @@ -6324,7 +6307,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
>   	if (rps->enabled) {
>   		u8 freq;
>   
> @@ -6347,7 +6330,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
>   					rps->max_freq_softlimit)))
>   			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
>   	}
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_unlock(&rps->lock);
>   }
>   
>   void gen6_rps_idle(struct drm_i915_private *dev_priv)
> @@ -6361,7 +6344,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
>   	 */
>   	gen6_disable_rps_interrupts(dev_priv);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
>   	if (rps->enabled) {
>   		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
>   			vlv_set_rps_idle(dev_priv);
> @@ -6371,7 +6354,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
>   		I915_WRITE(GEN6_PMINTRMSK,
>   			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
>   	}
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_unlock(&rps->lock);
>   }
>   
>   void gen6_rps_boost(struct i915_request *rq,
> @@ -6412,7 +6395,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   	int err;
>   
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> +	lockdep_assert_held(&rps->lock);
>   	GEM_BUG_ON(val > rps->max_freq);
>   	GEM_BUG_ON(val < rps->min_freq);
>   
> @@ -6911,7 +6894,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
>   	int scaling_factor = 180;
>   	struct cpufreq_policy *policy;
>   
> -	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
> +	lockdep_assert_held(&rps->lock);
>   
>   	policy = cpufreq_cpu_get(0);
>   	if (policy) {
> @@ -7988,7 +7971,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
>   		intel_runtime_pm_get(dev_priv);
>   	}
>   
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&rps->lock);
>   
>   	/* Initialize RPS limits (for userspace) */
>   	if (IS_CHERRYVIEW(dev_priv))
> @@ -8028,7 +8011,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
>   	/* Finally allow us to boost to max by default */
>   	rps->boost_freq = rps->max_freq;
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_unlock(&rps->lock);
>   }
>   
>   void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
> @@ -8070,7 +8053,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
>   
>   static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
>   {
> -	lockdep_assert_held(&i915->pcu_lock);
> +	lockdep_assert_held(&i915->gt_pm.rps.lock);
>   
>   	if (!i915->gt_pm.llc_pstate.enabled)
>   		return;
> @@ -8082,7 +8065,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
>   
>   static void intel_disable_rc6(struct drm_i915_private *dev_priv)
>   {
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> +	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
>   
>   	if (!dev_priv->gt_pm.rc6.enabled)
>   		return;
> @@ -8101,7 +8084,7 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
>   
>   static void intel_disable_rps(struct drm_i915_private *dev_priv)
>   {
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> +	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
>   
>   	if (!dev_priv->gt_pm.rps.enabled)
>   		return;
> @@ -8122,19 +8105,19 @@ static void intel_disable_rps(struct drm_i915_private *dev_priv)
>   
>   void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
>   {
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&dev_priv->gt_pm.rps.lock);
>   
>   	intel_disable_rc6(dev_priv);
>   	intel_disable_rps(dev_priv);
>   	if (HAS_LLC(dev_priv))
>   		intel_disable_llc_pstate(dev_priv);
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_unlock(&dev_priv->gt_pm.rps.lock);
>   }
>   
>   static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
>   {
> -	lockdep_assert_held(&i915->pcu_lock);
> +	lockdep_assert_held(&i915->gt_pm.rps.lock);
>   
>   	if (i915->gt_pm.llc_pstate.enabled)
>   		return;
> @@ -8146,7 +8129,7 @@ static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
>   
>   static void intel_enable_rc6(struct drm_i915_private *dev_priv)
>   {
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> +	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
>   
>   	if (dev_priv->gt_pm.rc6.enabled)
>   		return;
> @@ -8169,7 +8152,7 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> +	lockdep_assert_held(&rps->lock);
>   
>   	if (rps->enabled)
>   		return;
> @@ -8206,7 +8189,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
>   	if (intel_vgpu_active(dev_priv))
>   		return;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
> +	mutex_lock(&dev_priv->gt_pm.rps.lock);
>   
>   	if (HAS_RC6(dev_priv))
>   		intel_enable_rc6(dev_priv);
> @@ -8214,7 +8197,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
>   	if (HAS_LLC(dev_priv))
>   		intel_enable_llc_pstate(dev_priv);
>   
> -	mutex_unlock(&dev_priv->pcu_lock);
> +	mutex_unlock(&dev_priv->gt_pm.rps.lock);
>   }
>   
>   static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
> @@ -9214,22 +9197,19 @@ static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
>   	}
>   }
>   
> -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
> +static int __sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
>   {
>   	int status;
>   
> -	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
> +	lockdep_assert_held(&dev_priv->sb_lock);
>   
>   	/* GEN6_PCODE_* are outside of the forcewake domain, we can
>   	 * use te fw I915_READ variants to reduce the amount of work
>   	 * required when reading/writing.
>   	 */
>   
> -	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
> -		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
> -				 mbox, __builtin_return_address(0));
> +	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
>   		return -EAGAIN;
> -	}
>   
>   	I915_WRITE_FW(GEN6_PCODE_DATA, *val);
>   	I915_WRITE_FW(GEN6_PCODE_DATA1, 0 > @@ -9237,11 +9217,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
>   
>   	if (__intel_wait_for_register_fw(dev_priv,
>   					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
> -					 500, 0, NULL)) {
> -		DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
> -			  mbox, __builtin_return_address(0));
> +					 500, 0, NULL))
>   		return -ETIMEDOUT;
> -	}
>   
>   	*val = I915_READ_FW(GEN6_PCODE_DATA);
>   	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
> @@ -9251,33 +9228,39 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
>   	else
>   		status = gen6_check_mailbox_status(dev_priv);
>   
> +	return status;
> +}
> +
> +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
> +{
> +	int status;
> +
> +	mutex_lock(&dev_priv->sb_lock);
> +	status = __sandybridge_pcode_read(dev_priv, mbox, val);
> +	mutex_unlock(&dev_priv->sb_lock);
> +
>   	if (status) {
>   		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
>   				 mbox, __builtin_return_address(0), status);
> -		return status;
>   	}
>   
> -	return 0;
> +	return status;
>   }
>   
> -int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
> -				    u32 mbox, u32 val,
> -				    int fast_timeout_us, int slow_timeout_ms)
> +static int __sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
> +					     u32 mbox, u32 val,
> +					     int fast_timeout_us,
> +					     int slow_timeout_ms)
>   {
>   	int status;
>   
> -	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
> -
>   	/* GEN6_PCODE_* are outside of the forcewake domain, we can
>   	 * use te fw I915_READ variants to reduce the amount of work
>   	 * required when reading/writing.
>   	 */
>   
> -	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
> -		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
> -				 val, mbox, __builtin_return_address(0));
> +	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
>   		return -EAGAIN;
> -	}
>   
>   	I915_WRITE_FW(GEN6_PCODE_DATA, val);
>   	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
> @@ -9286,11 +9269,8 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
>   	if (__intel_wait_for_register_fw(dev_priv,
>   					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
>   					 fast_timeout_us, slow_timeout_ms,
> -					 NULL)) {
> -		DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
> -			  val, mbox, __builtin_return_address(0));
> +					 NULL))
>   		return -ETIMEDOUT;
> -	}
>   
>   	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
>   
> @@ -9299,13 +9279,28 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
>   	else
>   		status = gen6_check_mailbox_status(dev_priv);
>   
> +	return status;
> +}
> +
> +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
> +				    u32 mbox, u32 val,
> +				    int fast_timeout_us,
> +				    int slow_timeout_ms)
> +{
> +	int status;
> +
> +	mutex_lock(&dev_priv->sb_lock);
> +	status = __sandybridge_pcode_write_timeout(dev_priv, mbox, val,
> +						   fast_timeout_us,
> +						   slow_timeout_ms);
> +	mutex_unlock(&dev_priv->sb_lock);
> +
>   	if (status) {
>   		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
>   				 val, mbox, __builtin_return_address(0), status);
> -		return status;
>   	}
>   
> -	return 0;
> +	return status;
>   }
>   
>   static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
> @@ -9314,7 +9309,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
>   {
>   	u32 val = request;
>   
> -	*status = sandybridge_pcode_read(dev_priv, mbox, &val);
> +	*status = __sandybridge_pcode_read(dev_priv, mbox, &val);
>   
>   	return *status || ((val & reply_mask) == reply);
>   }
> @@ -9344,7 +9339,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
>   	u32 status;
>   	int ret;
>   
> -	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
> +	mutex_lock(&dev_priv->sb_lock);
>   
>   #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
>   				   &status)
> @@ -9380,6 +9375,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
>   	preempt_enable();
>   
>   out:
> +	mutex_unlock(&dev_priv->sb_lock);
>   	return ret ? ret : status;
>   #undef COND
>   }
> @@ -9449,8 +9445,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
>   
>   void intel_pm_setup(struct drm_i915_private *dev_priv)
>   {
> -	mutex_init(&dev_priv->pcu_lock);
> -
> +	mutex_init(&dev_priv->gt_pm.rps.lock);
>   	atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
>   
>   	dev_priv->runtime_pm.suspended = false;
> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
> index 069b6a30468f..2cc64f0fda57 100644
> --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> @@ -815,7 +815,6 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
>   	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
>   			 PUNIT_PWRGT_PWR_GATE(power_well_id);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   #define COND \
> @@ -838,7 +837,6 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
>   
>   out:
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   }
>   
>   static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
> @@ -865,7 +863,6 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
>   	mask = PUNIT_PWRGT_MASK(power_well_id);
>   	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
> @@ -886,7 +883,6 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
>   	WARN_ON(ctrl != state);
>   
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	return enabled;
>   }
> @@ -1398,7 +1394,6 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
>   	bool enabled;
>   	u32 state, ctrl;
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
> @@ -1417,7 +1412,6 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
>   	WARN_ON(ctrl << 16 != state);
>   
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   
>   	return enabled;
>   }
> @@ -1432,7 +1426,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
>   
>   	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
>   
> -	mutex_lock(&dev_priv->pcu_lock);
>   	vlv_punit_get(dev_priv);
>   
>   #define COND \
> @@ -1455,7 +1448,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
>   
>   out:
>   	vlv_punit_put(dev_priv);
> -	mutex_unlock(&dev_priv->pcu_lock);
>   }
>   
>   static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
> diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
> index dc3b491b4d00..2d4e48e9e1d5 100644
> --- a/drivers/gpu/drm/i915/intel_sideband.c
> +++ b/drivers/gpu/drm/i915/intel_sideband.c
> @@ -142,8 +142,6 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
>   {
>   	u32 val = 0;
>   
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> -
>   	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
>   			SB_CRRDDA_NP, addr, &val);
>   
> @@ -152,8 +150,6 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
>   
>   int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
>   {
> -	lockdep_assert_held(&dev_priv->pcu_lock);
> -
>   	return vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
>   			       SB_CRWRDA_NP, addr, &val);
>   }
> 

Regards,

Hans



More information about the Intel-gfx mailing list