[Intel-gfx] [PATCH v2 10/11] drm/i915: Move sandybride pcode access to intel_sideband.c

Mika Kuoppala mika.kuoppala at linux.intel.com
Fri Jan 19 08:21:31 UTC 2018


Chris Wilson <chris at chris-wilson.co.uk> writes:

> sandybride_pcode is another sideband, so move it to their new home.
>

Up to this patch, omitting the waitboosting. Uptime 41min till
system hang on j1900.

With waitboosting patch it did survive without system hangs
for 24h+ but the frequency seemed to remain constant.

-Mika


> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h       |   5 -
>  drivers/gpu/drm/i915/intel_hdcp.c     |   3 +-
>  drivers/gpu/drm/i915/intel_pm.c       | 190 ----------------------------------
>  drivers/gpu/drm/i915/intel_sideband.c | 190 ++++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_sideband.h |   5 +
>  5 files changed, 197 insertions(+), 196 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 4d9e9741ed8f..a0bb0f018a76 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3706,11 +3706,6 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
>  extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
>  					    struct intel_display_error_state *error);
>  
> -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
> -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
> -int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
> -		      u32 reply_mask, u32 reply, int timeout_base_ms);
> -
>  /* intel_dpio_phy.c */
>  void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
>  			     enum dpio_phy *phy, enum dpio_channel *ch);
> diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
> index d0e97172b8ed..1afae93707a1 100644
> --- a/drivers/gpu/drm/i915/intel_hdcp.c
> +++ b/drivers/gpu/drm/i915/intel_hdcp.c
> @@ -11,8 +11,9 @@
>  #include <linux/i2c.h>
>  #include <linux/random.h>
>  
> -#include "intel_drv.h"
>  #include "i915_reg.h"
> +#include "intel_drv.h"
> +#include "intel_sideband.h"
>  
>  #define KEY_LOAD_TRIES	5
>  
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 18945264b835..a69b742147f7 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -9080,196 +9080,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
>  	}
>  }
>  
> -static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
> -{
> -	uint32_t flags =
> -		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
> -
> -	switch (flags) {
> -	case GEN6_PCODE_SUCCESS:
> -		return 0;
> -	case GEN6_PCODE_UNIMPLEMENTED_CMD:
> -		return -ENODEV;
> -	case GEN6_PCODE_ILLEGAL_CMD:
> -		return -ENXIO;
> -	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
> -	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
> -		return -EOVERFLOW;
> -	case GEN6_PCODE_TIMEOUT:
> -		return -ETIMEDOUT;
> -	default:
> -		MISSING_CASE(flags);
> -		return 0;
> -	}
> -}
> -
> -static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
> -{
> -	uint32_t flags =
> -		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
> -
> -	switch (flags) {
> -	case GEN6_PCODE_SUCCESS:
> -		return 0;
> -	case GEN6_PCODE_ILLEGAL_CMD:
> -		return -ENXIO;
> -	case GEN7_PCODE_TIMEOUT:
> -		return -ETIMEDOUT;
> -	case GEN7_PCODE_ILLEGAL_DATA:
> -		return -EINVAL;
> -	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
> -		return -EOVERFLOW;
> -	default:
> -		MISSING_CASE(flags);
> -		return 0;
> -	}
> -}
> -
> -static int __sandybridge_pcode_rw(struct drm_i915_private *dev_priv,
> -				  u32 mbox, u32 *val, bool is_read)
> -{
> -	int status;
> -
> -	lockdep_assert_held(&dev_priv->sb_lock);
> -
> -	/*
> -	 * GEN6_PCODE_* are outside of the forcewake domain, we can
> -	 * use te fw I915_READ variants to reduce the amount of work
> -	 * required when reading/writing.
> -	 */
> -
> -	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
> -		return -EAGAIN;
> -
> -	I915_WRITE_FW(GEN6_PCODE_DATA, *val);
> -	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
> -	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
> -
> -	if (__intel_wait_for_register_fw(dev_priv,
> -					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
> -					 500, 0, NULL))
> -		return -ETIMEDOUT;
> -
> -	if (is_read)
> -		*val = I915_READ_FW(GEN6_PCODE_DATA);
> -	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
> -
> -	if (INTEL_GEN(dev_priv) > 6)
> -		status = gen7_check_mailbox_status(dev_priv);
> -	else
> -		status = gen6_check_mailbox_status(dev_priv);
> -
> -	return status;
> -}
> -
> -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
> -{
> -	int status;
> -
> -	mutex_lock(&dev_priv->sb_lock);
> -	status = __sandybridge_pcode_rw(dev_priv, mbox, val, true);
> -	mutex_unlock(&dev_priv->sb_lock);
> -
> -	if (status) {
> -		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
> -				 mbox, __builtin_return_address(0), status);
> -	}
> -
> -	return status;
> -}
> -
> -int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
> -			    u32 mbox, u32 val)
> -{
> -	int status;
> -
> -	mutex_lock(&dev_priv->sb_lock);
> -	status = __sandybridge_pcode_rw(dev_priv, mbox, &val, false);
> -	mutex_unlock(&dev_priv->sb_lock);
> -
> -	if (status) {
> -		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
> -				 val, mbox, __builtin_return_address(0), status);
> -	}
> -
> -	return status;
> -}
> -
> -static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
> -				  u32 request, u32 reply_mask, u32 reply,
> -				  u32 *status)
> -{
> -	*status = __sandybridge_pcode_rw(dev_priv, mbox, &request, true);
> -
> -	return *status || ((request & reply_mask) == reply);
> -}
> -
> -/**
> - * skl_pcode_request - send PCODE request until acknowledgment
> - * @dev_priv: device private
> - * @mbox: PCODE mailbox ID the request is targeted for
> - * @request: request ID
> - * @reply_mask: mask used to check for request acknowledgment
> - * @reply: value used to check for request acknowledgment
> - * @timeout_base_ms: timeout for polling with preemption enabled
> - *
> - * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
> - * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
> - * The request is acknowledged once the PCODE reply dword equals @reply after
> - * applying @reply_mask. Polling is first attempted with preemption enabled
> - * for @timeout_base_ms and if this times out for another 50 ms with
> - * preemption disabled.
> - *
> - * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
> - * other error as reported by PCODE.
> - */
> -int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
> -		      u32 reply_mask, u32 reply, int timeout_base_ms)
> -{
> -	u32 status;
> -	int ret;
> -
> -	mutex_lock(&dev_priv->sb_lock);
> -
> -#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
> -				   &status)
> -
> -	/*
> -	 * Prime the PCODE by doing a request first. Normally it guarantees
> -	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
> -	 * _wait_for() doesn't guarantee when its passed condition is evaluated
> -	 * first, so send the first request explicitly.
> -	 */
> -	if (COND) {
> -		ret = 0;
> -		goto out;
> -	}
> -	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
> -	if (!ret)
> -		goto out;
> -
> -	/*
> -	 * The above can time out if the number of requests was low (2 in the
> -	 * worst case) _and_ PCODE was busy for some reason even after a
> -	 * (queued) request and @timeout_base_ms delay. As a workaround retry
> -	 * the poll with preemption disabled to maximize the number of
> -	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
> -	 * account for interrupts that could reduce the number of these
> -	 * requests, and for any quirks of the PCODE firmware that delays
> -	 * the request completion.
> -	 */
> -	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
> -	WARN_ON_ONCE(timeout_base_ms > 3);
> -	preempt_disable();
> -	ret = wait_for_atomic(COND, 50);
> -	preempt_enable();
> -
> -out:
> -	mutex_unlock(&dev_priv->sb_lock);
> -	return ret ? ret : status;
> -#undef COND
> -}
> -
>  static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
>  {
>  	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
> index 6815be9e5b7c..a9ba378bf9fc 100644
> --- a/drivers/gpu/drm/i915/intel_sideband.c
> +++ b/drivers/gpu/drm/i915/intel_sideband.c
> @@ -383,3 +383,193 @@ void vlv_flisdsi_put(struct drm_i915_private *dev_priv)
>  {
>  	vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_FLISDSI));
>  }
> +
> +static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
> +{
> +	uint32_t flags =
> +		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
> +
> +	switch (flags) {
> +	case GEN6_PCODE_SUCCESS:
> +		return 0;
> +	case GEN6_PCODE_UNIMPLEMENTED_CMD:
> +		return -ENODEV;
> +	case GEN6_PCODE_ILLEGAL_CMD:
> +		return -ENXIO;
> +	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
> +	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
> +		return -EOVERFLOW;
> +	case GEN6_PCODE_TIMEOUT:
> +		return -ETIMEDOUT;
> +	default:
> +		MISSING_CASE(flags);
> +		return 0;
> +	}
> +}
> +
> +static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
> +{
> +	uint32_t flags =
> +		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
> +
> +	switch (flags) {
> +	case GEN6_PCODE_SUCCESS:
> +		return 0;
> +	case GEN6_PCODE_ILLEGAL_CMD:
> +		return -ENXIO;
> +	case GEN7_PCODE_TIMEOUT:
> +		return -ETIMEDOUT;
> +	case GEN7_PCODE_ILLEGAL_DATA:
> +		return -EINVAL;
> +	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
> +		return -EOVERFLOW;
> +	default:
> +		MISSING_CASE(flags);
> +		return 0;
> +	}
> +}
> +
> +static int __sandybridge_pcode_rw(struct drm_i915_private *dev_priv,
> +				  u32 mbox, u32 *val, bool is_read)
> +{
> +	int status;
> +
> +	lockdep_assert_held(&dev_priv->sb_lock);
> +
> +	/*
> +	 * GEN6_PCODE_* are outside of the forcewake domain, we can
> +	 * use te fw I915_READ variants to reduce the amount of work
> +	 * required when reading/writing.
> +	 */
> +
> +	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
> +		return -EAGAIN;
> +
> +	I915_WRITE_FW(GEN6_PCODE_DATA, *val);
> +	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
> +	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
> +
> +	if (__intel_wait_for_register_fw(dev_priv,
> +					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
> +					 500, 0, NULL))
> +		return -ETIMEDOUT;
> +
> +	if (is_read)
> +		*val = I915_READ_FW(GEN6_PCODE_DATA);
> +	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
> +
> +	if (INTEL_GEN(dev_priv) > 6)
> +		status = gen7_check_mailbox_status(dev_priv);
> +	else
> +		status = gen6_check_mailbox_status(dev_priv);
> +
> +	return status;
> +}
> +
> +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
> +{
> +	int status;
> +
> +	mutex_lock(&dev_priv->sb_lock);
> +	status = __sandybridge_pcode_rw(dev_priv, mbox, val, true);
> +	mutex_unlock(&dev_priv->sb_lock);
> +
> +	if (status) {
> +		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
> +				 mbox, __builtin_return_address(0), status);
> +	}
> +
> +	return status;
> +}
> +
> +int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
> +			    u32 mbox, u32 val)
> +{
> +	int status;
> +
> +	mutex_lock(&dev_priv->sb_lock);
> +	status = __sandybridge_pcode_rw(dev_priv, mbox, &val, false);
> +	mutex_unlock(&dev_priv->sb_lock);
> +
> +	if (status) {
> +		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
> +				 val, mbox, __builtin_return_address(0), status);
> +	}
> +
> +	return status;
> +}
> +
> +static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
> +				  u32 request, u32 reply_mask, u32 reply,
> +				  u32 *status)
> +{
> +	*status = __sandybridge_pcode_rw(dev_priv, mbox, &request, true);
> +
> +	return *status || ((request & reply_mask) == reply);
> +}
> +
> +/**
> + * skl_pcode_request - send PCODE request until acknowledgment
> + * @dev_priv: device private
> + * @mbox: PCODE mailbox ID the request is targeted for
> + * @request: request ID
> + * @reply_mask: mask used to check for request acknowledgment
> + * @reply: value used to check for request acknowledgment
> + * @timeout_base_ms: timeout for polling with preemption enabled
> + *
> + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
> + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
> + * The request is acknowledged once the PCODE reply dword equals @reply after
> + * applying @reply_mask. Polling is first attempted with preemption enabled
> + * for @timeout_base_ms and if this times out for another 50 ms with
> + * preemption disabled.
> + *
> + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
> + * other error as reported by PCODE.
> + */
> +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
> +		      u32 reply_mask, u32 reply, int timeout_base_ms)
> +{
> +	u32 status;
> +	int ret;
> +
> +	mutex_lock(&dev_priv->sb_lock);
> +
> +#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
> +				   &status)
> +
> +	/*
> +	 * Prime the PCODE by doing a request first. Normally it guarantees
> +	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
> +	 * _wait_for() doesn't guarantee when its passed condition is evaluated
> +	 * first, so send the first request explicitly.
> +	 */
> +	if (COND) {
> +		ret = 0;
> +		goto out;
> +	}
> +	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
> +	if (!ret)
> +		goto out;
> +
> +	/*
> +	 * The above can time out if the number of requests was low (2 in the
> +	 * worst case) _and_ PCODE was busy for some reason even after a
> +	 * (queued) request and @timeout_base_ms delay. As a workaround retry
> +	 * the poll with preemption disabled to maximize the number of
> +	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
> +	 * account for interrupts that could reduce the number of these
> +	 * requests, and for any quirks of the PCODE firmware that delays
> +	 * the request completion.
> +	 */
> +	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
> +	WARN_ON_ONCE(timeout_base_ms > 3);
> +	preempt_disable();
> +	ret = wait_for_atomic(COND, 50);
> +	preempt_enable();
> +
> +out:
> +	mutex_unlock(&dev_priv->sb_lock);
> +	return ret ? ret : status;
> +#undef COND
> +}
> diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h
> index 46e917dd3973..999c1da2cd93 100644
> --- a/drivers/gpu/drm/i915/intel_sideband.h
> +++ b/drivers/gpu/drm/i915/intel_sideband.h
> @@ -68,4 +68,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
>  void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
>  		     enum intel_sbi_destination destination);
>  
> +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
> +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
> +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
> +		      u32 reply_mask, u32 reply, int timeout_base_ms);
> +
>  #endif /* _INTEL_SIDEBAND_H */
> -- 
> 2.15.1


More information about the Intel-gfx mailing list