[Intel-gfx] [RFC 01/10] drm/i915: do not pass dev_priv to low-level forcewake functions
Paulo Zanoni
paulo.r.zanoni at intel.com
Fri Mar 15 20:07:33 UTC 2019
Em qua, 2019-03-13 às 16:13 -0700, Daniele Ceraolo Spurio escreveu:
> The only usage we have for it is for the regs pointer. Save a pointer to
> the set and ack registers instead of the register offsets to remove this
> requirement
Reviewed-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
>
> Cc: Paulo Zanoni <paulo.r.zanoni at intel.com>
> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> ---
> drivers/gpu/drm/i915/intel_uncore.c | 100 +++++++++++++---------------
> drivers/gpu/drm/i915/intel_uncore.h | 9 ++-
> 2 files changed, 52 insertions(+), 57 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
> index 75646a1e0051..cb78dcddc9cb 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.c
> +++ b/drivers/gpu/drm/i915/intel_uncore.c
> @@ -58,16 +58,18 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
> return "unknown";
> }
>
> +#define fw_ack(d) readl((d)->reg_ack)
> +#define fw_set(d, val) writel((val), (d)->reg_set)
> +
> static inline void
> -fw_domain_reset(struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
> {
> /*
> * We don't really know if the powerwell for the forcewake domain we are
> * trying to reset here does exist at this point (engines could be fused
> * off in ICL+), so no waiting for acks
> */
> - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
> + fw_set(d, forcewake_domain_to_uncore(d)->fw_reset);
> }
>
> static inline void
> @@ -81,36 +83,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
> }
>
> static inline int
> -__wait_for_ack(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d,
> +__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
> const u32 ack,
> const u32 value)
> {
> - return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
> + return wait_for_atomic((fw_ack(d) & ack) == value,
> FORCEWAKE_ACK_TIMEOUT_MS);
> }
>
> static inline int
> -wait_ack_clear(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d,
> +wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
> const u32 ack)
> {
> - return __wait_for_ack(i915, d, ack, 0);
> + return __wait_for_ack(d, ack, 0);
> }
>
> static inline int
> -wait_ack_set(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d,
> +wait_ack_set(const struct intel_uncore_forcewake_domain *d,
> const u32 ack)
> {
> - return __wait_for_ack(i915, d, ack, ack);
> + return __wait_for_ack(d, ack, ack);
> }
>
> static inline void
> -fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
> {
> - if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
> + if (wait_ack_clear(d, FORCEWAKE_KERNEL))
> DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
> intel_uncore_forcewake_domain_to_str(d->id));
> }
> @@ -121,8 +119,7 @@ enum ack_type {
> };
>
> static int
> -fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d,
> +fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
> const enum ack_type type)
> {
> const u32 ack_bit = FORCEWAKE_KERNEL;
> @@ -146,72 +143,65 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
>
> pass = 1;
> do {
> - wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
> + wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
>
> - __raw_i915_write32(i915, d->reg_set,
> - _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
> + fw_set(d, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
> /* Give gt some time to relax before the polling frenzy */
> udelay(10 * pass);
> - wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
> + wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
>
> - ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
> + ack_detected = (fw_ack(d) & ack_bit) == value;
>
> - __raw_i915_write32(i915, d->reg_set,
> - _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
> + fw_set(d, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
> } while (!ack_detected && pass++ < 10);
>
> DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
> intel_uncore_forcewake_domain_to_str(d->id),
> type == ACK_SET ? "set" : "clear",
> - __raw_i915_read32(i915, d->reg_ack),
> + fw_ack(d),
> pass);
>
> return ack_detected ? 0 : -ETIMEDOUT;
> }
>
> static inline void
> -fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
> {
> - if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
> + if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
> return;
>
> - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
> - fw_domain_wait_ack_clear(i915, d);
> + if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
> + fw_domain_wait_ack_clear(d);
> }
>
> static inline void
> -fw_domain_get(struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_get(const struct intel_uncore_forcewake_domain *d)
> {
> - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
> + fw_set(d, forcewake_domain_to_uncore(d)->fw_set);
> }
>
> static inline void
> -fw_domain_wait_ack_set(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
> {
> - if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
> + if (wait_ack_set(d, FORCEWAKE_KERNEL))
> DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
> intel_uncore_forcewake_domain_to_str(d->id));
> }
>
> static inline void
> -fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
> {
> - if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
> + if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
> return;
>
> - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
> - fw_domain_wait_ack_set(i915, d);
> + if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
> + fw_domain_wait_ack_set(d);
> }
>
> static inline void
> -fw_domain_put(const struct drm_i915_private *i915,
> - const struct intel_uncore_forcewake_domain *d)
> +fw_domain_put(const struct intel_uncore_forcewake_domain *d)
> {
> - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
> + fw_set(d, forcewake_domain_to_uncore(d)->fw_clear);
> }
>
> static void
> @@ -223,12 +213,12 @@ fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
> GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
>
> for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
> - fw_domain_wait_ack_clear(i915, d);
> - fw_domain_get(i915, d);
> + fw_domain_wait_ack_clear(d);
> + fw_domain_get(d);
> }
>
> for_each_fw_domain_masked(d, fw_domains, i915, tmp)
> - fw_domain_wait_ack_set(i915, d);
> + fw_domain_wait_ack_set(d);
>
> i915->uncore.fw_domains_active |= fw_domains;
> }
> @@ -243,12 +233,12 @@ fw_domains_get_with_fallback(struct drm_i915_private *i915,
> GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
>
> for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
> - fw_domain_wait_ack_clear_fallback(i915, d);
> - fw_domain_get(i915, d);
> + fw_domain_wait_ack_clear_fallback(d);
> + fw_domain_get(d);
> }
>
> for_each_fw_domain_masked(d, fw_domains, i915, tmp)
> - fw_domain_wait_ack_set_fallback(i915, d);
> + fw_domain_wait_ack_set_fallback(d);
>
> i915->uncore.fw_domains_active |= fw_domains;
> }
> @@ -262,7 +252,7 @@ fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
> GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
>
> for_each_fw_domain_masked(d, fw_domains, i915, tmp)
> - fw_domain_put(i915, d);
> + fw_domain_put(d);
>
> i915->uncore.fw_domains_active &= ~fw_domains;
> }
> @@ -280,7 +270,7 @@ fw_domains_reset(struct drm_i915_private *i915,
> GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
>
> for_each_fw_domain_masked(d, fw_domains, i915, tmp)
> - fw_domain_reset(i915, d);
> + fw_domain_reset(d);
> }
>
> static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
> @@ -1350,8 +1340,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
> WARN_ON(!i915_mmio_reg_valid(reg_ack));
>
> d->wake_count = 0;
> - d->reg_set = reg_set;
> - d->reg_ack = reg_ack;
> + d->reg_set = dev_priv->regs + i915_mmio_reg_offset(reg_set);
> + d->reg_ack = dev_priv->regs + i915_mmio_reg_offset(reg_ack);
>
> d->id = domain_id;
>
> @@ -1373,7 +1363,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
>
> dev_priv->uncore.fw_domains |= BIT(domain_id);
>
> - fw_domain_reset(dev_priv, d);
> + fw_domain_reset(d);
> }
>
> static void fw_domain_fini(struct drm_i915_private *dev_priv,
> diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
> index e5e157d288de..b0a95469babf 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.h
> +++ b/drivers/gpu/drm/i915/intel_uncore.h
> @@ -116,8 +116,8 @@ struct intel_uncore {
> unsigned int wake_count;
> bool active;
> struct hrtimer timer;
> - i915_reg_t reg_set;
> - i915_reg_t reg_ack;
> + u32 __iomem *reg_set;
> + u32 __iomem *reg_ack;
> } fw_domain[FW_DOMAIN_ID_COUNT];
>
> struct {
> @@ -138,6 +138,11 @@ struct intel_uncore {
> #define for_each_fw_domain(domain__, dev_priv__, tmp__) \
> for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
>
> +static inline struct intel_uncore *
> +forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
> +{
> + return container_of(d, struct intel_uncore, fw_domain[d->id]);
> +}
>
> void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
> void intel_uncore_init(struct drm_i915_private *dev_priv);
More information about the Intel-gfx
mailing list