[Intel-xe] [PATCH] drm/xe: Invert mask and val in xe_mmio_wait32.

Matthew Brost matthew.brost at intel.com
Thu Jul 27 00:14:35 UTC 2023


On Wed, Jul 26, 2023 at 05:03:52PM -0400, Rodrigo Vivi wrote:
> The order: 'offset, mask, val'; is more common in other
> drivers and in special in i915, where any dev could copy
> a sequence and end up with unexpected behavior.
> 
> Done with coccinelle:
> @rule1@
> expression gt, reg, val, mask, timeout, out, atomic;
> @@
> - xe_mmio_wait32(gt, reg, val, mask, timeout, out, atomic)
> + xe_mmio_wait32(gt, reg, mask, val, timeout, out, atomic)
> 
> spatch -sp_file mmio.cocci *.c *.h compat-i915-headers/intel_uncore.h \
>        --in-place
> 
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>

Reviewed-by: Matthew Brost <matthew.brost at intel.com>

Just a heads up Daniele has a patch [1] in flight that will conflict with
one so I'd coordinate the merges together.

Matt

[1] https://patchwork.freedesktop.org/series/121390/

> ---
>  .../drm/xe/compat-i915-headers/intel_uncore.h |  6 ++---
>  drivers/gpu/drm/xe/xe_force_wake.c            |  2 +-
>  drivers/gpu/drm/xe/xe_gt.c                    |  3 +--
>  drivers/gpu/drm/xe/xe_gt_mcr.c                |  2 +-
>  drivers/gpu/drm/xe/xe_guc.c                   | 25 ++++++++-----------
>  drivers/gpu/drm/xe/xe_huc.c                   |  3 +--
>  drivers/gpu/drm/xe/xe_mmio.h                  |  4 +--
>  drivers/gpu/drm/xe/xe_pcode.c                 |  2 +-
>  drivers/gpu/drm/xe/xe_uc_fw.c                 |  2 +-
>  9 files changed, 21 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
> index ec2d4885bff0..cd26ddc0f69e 100644
> --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
> +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
> @@ -90,7 +90,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
>  {
>  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
>  
> -	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, value, mask,
> +	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
>  			      timeout * USEC_PER_MSEC, NULL, false);
>  }
>  
> @@ -100,7 +100,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
>  {
>  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
>  
> -	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, value, mask,
> +	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
>  			      timeout * USEC_PER_MSEC, NULL, false);
>  }
>  
> @@ -111,7 +111,7 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
>  {
>  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
>  
> -	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, value, mask,
> +	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
>  			      fast_timeout_us + 1000 * slow_timeout_ms,
>  			      out_value, false);
>  }
> diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
> index 7403673d532d..aba0784b608e 100644
> --- a/drivers/gpu/drm/xe/xe_force_wake.c
> +++ b/drivers/gpu/drm/xe/xe_force_wake.c
> @@ -120,7 +120,7 @@ static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
>  static int domain_sleep_wait(struct xe_gt *gt,
>  			     struct xe_force_wake_domain *domain)
>  {
> -	return xe_mmio_wait32(gt, domain->reg_ack, 0, domain->val,
> +	return xe_mmio_wait32(gt, domain->reg_ack, domain->val, 0,
>  			      XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
>  			      NULL, false);
>  }
> diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
> index 3e32d38aeeea..bb7794cf2c1a 100644
> --- a/drivers/gpu/drm/xe/xe_gt.c
> +++ b/drivers/gpu/drm/xe/xe_gt.c
> @@ -456,8 +456,7 @@ static int do_gt_reset(struct xe_gt *gt)
>  	int err;
>  
>  	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
> -	err = xe_mmio_wait32(gt, GDRST, 0, GRDOM_FULL, 5000,
> -			     NULL, false);
> +	err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
>  	if (err)
>  		xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n",
>  			  ERR_PTR(err));
> diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
> index c56815af0686..bb906cace7bd 100644
> --- a/drivers/gpu/drm/xe/xe_gt_mcr.c
> +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
> @@ -423,7 +423,7 @@ static void mcr_lock(struct xe_gt *gt)
>  	 * shares the same steering control register.
>  	 */
>  	if (GRAPHICS_VERx100(xe) >= 1270)
> -		ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0, 0x1, 10, NULL,
> +		ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0, 10, NULL,
>  				     true);
>  
>  	drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
> diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
> index 8ae026838702..2530b6243661 100644
> --- a/drivers/gpu/drm/xe/xe_guc.c
> +++ b/drivers/gpu/drm/xe/xe_guc.c
> @@ -290,8 +290,7 @@ int xe_guc_reset(struct xe_guc *guc)
>  
>  	xe_mmio_write32(gt, GDRST, GRDOM_GUC);
>  
> -	ret = xe_mmio_wait32(gt, GDRST, 0, GRDOM_GUC, 5000,
> -			     &gdrst, false);
> +	ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
>  	if (ret) {
>  		drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n",
>  			gdrst);
> @@ -386,10 +385,9 @@ static int guc_wait_ucode(struct xe_guc *guc)
>  	 * 200ms. Even at slowest clock, this should be sufficient. And
>  	 * in the working case, a larger timeout makes no difference.
>  	 */
> -	ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS,
> -			     FIELD_PREP(GS_UKERNEL_MASK,
> -					XE_GUC_LOAD_STATUS_READY),
> -			     GS_UKERNEL_MASK, 200000, &status, false);
> +	ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
> +			     FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
> +			     200000, &status, false);
>  
>  	if (ret) {
>  		struct drm_device *drm = &xe->drm;
> @@ -639,10 +637,9 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
>  
>  	xe_guc_notify(guc);
>  
> -	ret = xe_mmio_wait32(gt, reply_reg,
> -			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
> -					GUC_HXG_ORIGIN_GUC),
> -			     GUC_HXG_MSG_0_ORIGIN, 50000, &reply, false);
> +	ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
> +			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
> +			     50000, &reply, false);
>  	if (ret) {
>  timeout:
>  		drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
> @@ -654,11 +651,9 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
>  	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
>  	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
>  
> -		ret = xe_mmio_wait32(gt, reply_reg,
> -				     FIELD_PREP(GUC_HXG_MSG_0_TYPE,
> -						GUC_HXG_TYPE_RESPONSE_SUCCESS),
> -				     GUC_HXG_MSG_0_TYPE, 1000000, &header,
> -				     false);
> +		ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_TYPE,
> +				     FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
> +				     1000000, &header, false);
>  
>  		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
>  			     GUC_HXG_ORIGIN_GUC))
> diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
> index 373a65c77946..dc1708b4e94a 100644
> --- a/drivers/gpu/drm/xe/xe_huc.c
> +++ b/drivers/gpu/drm/xe/xe_huc.c
> @@ -85,8 +85,7 @@ int xe_huc_auth(struct xe_huc *huc)
>  		goto fail;
>  	}
>  
> -	ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO,
> -			     HUC_LOAD_SUCCESSFUL,
> +	ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO, HUC_LOAD_SUCCESSFUL,
>  			     HUC_LOAD_SUCCESSFUL, 100000, NULL, false);
>  	if (ret) {
>  		drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
> diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
> index 4953a9a3f1fb..d24badca8677 100644
> --- a/drivers/gpu/drm/xe/xe_mmio.h
> +++ b/drivers/gpu/drm/xe/xe_mmio.h
> @@ -107,8 +107,8 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
>  	return (reg_val & mask) != eval ? -EINVAL : 0;
>  }
>  
> -static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
> -				 u32 mask, u32 timeout_us, u32 *out_val,
> +static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask,
> +				 u32 val, u32 timeout_us, u32 *out_val,
>  				 bool atomic)
>  {
>  	ktime_t cur = ktime_get_raw();
> diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
> index e3ab1d3a367f..7f1bf2297f51 100644
> --- a/drivers/gpu/drm/xe/xe_pcode.c
> +++ b/drivers/gpu/drm/xe/xe_pcode.c
> @@ -68,7 +68,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
>  	xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
>  	xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
>  
> -	err = xe_mmio_wait32(gt, PCODE_MAILBOX, 0, PCODE_READY,
> +	err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
>  			     timeout_ms * 1000, NULL, atomic);
>  	if (err)
>  		return err;
> diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
> index def19e64f2c1..a65ddae6808c 100644
> --- a/drivers/gpu/drm/xe/xe_uc_fw.c
> +++ b/drivers/gpu/drm/xe/xe_uc_fw.c
> @@ -484,7 +484,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
>  			_MASKED_BIT_ENABLE(dma_flags | START_DMA));
>  
>  	/* Wait for DMA to finish */
> -	ret = xe_mmio_wait32(gt, DMA_CTRL, 0, START_DMA, 100000, &dma_ctrl,
> +	ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
>  			     false);
>  	if (ret)
>  		drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
> -- 
> 2.41.0
> 


More information about the Intel-xe mailing list