[PATCH] drm/xe/mmio: Use single logic for waiting functions
Ghimiray, Himal Prasad
himal.prasad.ghimiray at intel.com
Tue Jul 23 15:44:15 UTC 2024
On 23-07-2024 17:31, Gustavo Sousa wrote:
> The implementations for xe_mmio_wait32() and xe_mmio_wait32_not() are
> almost identical. Let is avoid duplication of logic by having them
seems typo ^
> calling a common __xe_mmio_wait32() function.
>
> Signed-off-by: Gustavo Sousa <gustavo.sousa at intel.com>
> ---
> drivers/gpu/drm/xe/xe_mmio.c | 103 +++++++++++++----------------------
> 1 file changed, 39 insertions(+), 64 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
> index ea3c37d3e13f..955ca6940161 100644
> --- a/drivers/gpu/drm/xe/xe_mmio.c
> +++ b/drivers/gpu/drm/xe/xe_mmio.c
> @@ -278,37 +278,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
> return (u64)udw << 32 | ldw;
> }
>
> -/**
> - * xe_mmio_wait32() - Wait for a register to match the desired masked value
> - * @gt: MMIO target GT
> - * @reg: register to read value from
> - * @mask: mask to be applied to the value read from the register
> - * @val: desired value after applying the mask
> - * @timeout_us: time out after this period of time. Wait logic tries to be
> - * smart, applying an exponential backoff until @timeout_us is reached.
> - * @out_val: if not NULL, points where to store the last unmasked value
> - * @atomic: needs to be true if calling from an atomic context
> - *
> - * This function polls for the desired masked value and returns zero on success
> - * or -ETIMEDOUT if timed out.
> - *
> - * Note that @timeout_us represents the minimum amount of time to wait before
> - * giving up. The actual time taken by this function can be a little more than
> - * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
> - * it is possible that this function succeeds even after @timeout_us has passed.
> - */
> -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> - u32 *out_val, bool atomic)
> +static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> + u32 *out_val, bool atomic, bool expect_match)
> {
> ktime_t cur = ktime_get_raw();
> const ktime_t end = ktime_add_us(cur, timeout_us);
> int ret = -ETIMEDOUT;
> s64 wait = 10;
> u32 read;
> + bool check;
>
> for (;;) {
> read = xe_mmio_read32(gt, reg);
> - if ((read & mask) == val) {
> +
> + check = (read & mask) == val;
> + if (!expect_match)
> + check = !check;
> +
> + if (check) {
> ret = 0;
> break;
> }
> @@ -329,7 +316,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
>
> if (ret != 0) {
> read = xe_mmio_read32(gt, reg);
> - if ((read & mask) == val)
> +
> + check = (read & mask) == val;
> + if (!expect_match)
> + check = !check;
> +
> + if (check)
> ret = 0;
> }
>
> @@ -340,62 +332,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
> }
>
> /**
> - * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
> + * xe_mmio_wait32() - Wait for a register to match the desired masked value
> * @gt: MMIO target GT
> * @reg: register to read value from
> * @mask: mask to be applied to the value read from the register
> - * @val: value to match after applying the mask
> + * @val: desired value after applying the mask
> * @timeout_us: time out after this period of time. Wait logic tries to be
> * smart, applying an exponential backoff until @timeout_us is reached.
> * @out_val: if not NULL, points where to store the last unmasked value
> * @atomic: needs to be true if calling from an atomic context
> *
> - * This function polls for a masked value to change from a given value and
> - * returns zero on success or -ETIMEDOUT if timed out.
> + * This function polls for the desired masked value and returns zero on success
> + * or -ETIMEDOUT if timed out.
> *
> * Note that @timeout_us represents the minimum amount of time to wait before
> * giving up. The actual time taken by this function can be a little more than
> * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
> * it is possible that this function succeeds even after @timeout_us has passed.
> */
> +int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> + u32 *out_val, bool atomic)
> +{
> + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
> +}
> +
> +/**
> + * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
> + * @gt: MMIO target GT
> + * @reg: register to read value from
> + * @mask: mask to be applied to the value read from the register
> + * @val: value to not to be matched after applying the mask
value not to be matched
> + * @timeout_us: time out after this period of time
> + * @out_val: if not NULL, points where to store the last unmasked value
> + * @atomic: needs to be true if calling from an atomic context
> + *
> + * This function works exactly like xe_mmio_wait32() with the exception that
> + * @val is expected not to be matched.
> + */
> int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> u32 *out_val, bool atomic)
> {
> - ktime_t cur = ktime_get_raw();
> - const ktime_t end = ktime_add_us(cur, timeout_us);
> - int ret = -ETIMEDOUT;
> - s64 wait = 10;
> - u32 read;
> -
> - for (;;) {
> - read = xe_mmio_read32(gt, reg);
> - if ((read & mask) != val) {
> - ret = 0;
> - break;
> - }
> -
> - cur = ktime_get_raw();
> - if (!ktime_before(cur, end))
> - break;
> -
> - if (ktime_after(ktime_add_us(cur, wait), end))
> - wait = ktime_us_delta(end, cur);
> -
> - if (atomic)
> - udelay(wait);
> - else
> - usleep_range(wait, wait << 1);
> - wait <<= 1;
> - }
> -
> - if (ret != 0) {
> - read = xe_mmio_read32(gt, reg);
> - if ((read & mask) != val)
> - ret = 0;
> - }
> -
> - if (out_val)
> - *out_val = read;
> -
> - return ret;
> + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
With nits addressed. Patch LGTM.
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> }
More information about the Intel-xe
mailing list