[PATCH v2] drm/amdkfd: handle IH ring1 overflow

Christian König christian.koenig at amd.com
Mon Nov 22 11:34:10 UTC 2021


Am 21.11.21 um 03:13 schrieb philip yang:
>
> On 2021-11-19 5:59 p.m., Felix Kuehling wrote:
>
>> On 2021-11-19 3:22 p.m., Philip Yang wrote:
>>> IH ring1 is used to process GPU retry fault, overflow is enabled to
>>> drain retry fault because we want receive other interrupts while
>>> handling retry fault to recover range. There is no overflow flag set
>>> when wptr pass rptr. Use timestamp of rptr and wptr to handle overflow
>>> and drain retry fault.
>>>
>>> Add helper function amdgpu_ih_decode_iv_ts to get 48bit timestamp from
>>> IV entry. drain retry fault check timestamp of rptr is larger than
>>> timestamp of (checkpoint_wptr - 32).
>>>
>>> Add function amdgpu_ih_process1 to process IH ring1 until timestamp of
>>> rptr is larger then timestamp of (rptr + 32).
>>>
>>> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c  | 98 
>>> +++++++++++++++++++------
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h  |  6 +-
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c |  2 +-
>>>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c    |  2 +-
>>>   4 files changed, 80 insertions(+), 28 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
>>> index f3d62e196901..ad12f9d5d86a 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
>>> @@ -165,51 +165,41 @@ void amdgpu_ih_ring_write(struct 
>>> amdgpu_ih_ring *ih, const uint32_t *iv,
>>>   }
>>>     /* Waiter helper that checks current rptr matches or passes 
>>> checkpoint wptr */
>>> -static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device 
>>> *adev,
>>> +static bool amdgpu_ih_has_checkpoint_processed_ts(struct 
>>> amdgpu_device *adev,
>>>                       struct amdgpu_ih_ring *ih,
>>> -                    uint32_t checkpoint_wptr,
>>> -                    uint32_t *prev_rptr)
>>> +                    uint64_t checkpoint_ts)
>>>   {
>>> -    uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask);
>>> -
>>> -    /* rptr has wrapped. */
>>> -    if (cur_rptr < *prev_rptr)
>>> -        cur_rptr += ih->ptr_mask + 1;
>>> -    *prev_rptr = cur_rptr;
>>> -
>>> -    /* check ring is empty to workaround missing wptr overflow flag */
>>> -    return cur_rptr >= checkpoint_wptr ||
>>> -           (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih);
>>> +    /* After wakeup, ih->rptr is the entry which is being 
>>> processed, check
>>> +     * the timestamp of previous entry which is processed.
>>> +     */
>>> +    return checkpoint_ts <= amdgpu_ih_decode_iv_ts(ih, ih->rptr - 32);
>>
>> This assumes a IV size of 32 bytes, which is not true for all ASICs. 
>> On GFXv8 and older GPUs it's on 16. OTOH, those chips don't have a 
>> ring1 and may not have a timestamp in the IV at all.
>>
> ring1 process is enabled for vega10/20, navi10, not for older GPUs, as 
> it is scheduled from self-irq.

I think we should try to avoid accessing the IH ring buffer outside of 
the handler anyway.

So instead of that here we should probably just always update the last 
decoded timestamp after a call to amdgpu_ih_decode_iv_helper().

>> And I think you need to be better at handling when the time stamps 
>> wrap. Keep in mind that the number of valid bits may vary between ASICs.
>>
> yes, 48bit time stamp will wrap around after 1 year, add function 
> amdgpu_ih_ts_after to compare time stamp with wrap around case.

Yeah, I think we need to handle that gracefully.

Regards,
Christian.

>>
>>>   }
>>>     /**
>>> - * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to 
>>> checkpoint
>>> + * amdgpu_ih_wait_on_checkpoint_process_ts - wait to process IVs up 
>>> to checkpoint
>>>    *
>>>    * @adev: amdgpu_device pointer
>>>    * @ih: ih ring to process
>>>    *
>>>    * Used to ensure ring has processed IVs up to the checkpoint 
>>> write pointer.
>>>    */
>>> -int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
>>> +int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device 
>>> *adev,
>>>                       struct amdgpu_ih_ring *ih)
>>
>> If this function is only meant to work on ring1 now, we should 
>> probably ensure that by checking that ih is really ring1.
>>
> Add dev_WARN_ONCE(adev->dev, ih != &adev->irq.ih1, "not ring1")) to 
> ensure this is only for ring1.
>
>> Do we need to keep the old solution for Vega20, which doesn't reroute 
>> interrupts to ring1?
>>
> Vega20 is not changed, because Vega20 retry fault delegate to ih_soft, 
> and wptr overflow is not enabled on ih_soft, no stale retry fault 
> issue, keep old interrupt handler for ih_soft.
>>
>>>   {
>>> -    uint32_t checkpoint_wptr, rptr;
>>> +    uint32_t checkpoint_wptr;
>>> +    uint64_t checkpoint_ts;
>>>         if (!ih->enabled || adev->shutdown)
>>>           return -ENODEV;
>>>         checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
>>> -    /* Order wptr with rptr. */
>>> +    /* Order wptr with ring data. */
>>>       rmb();
>>> -    rptr = READ_ONCE(ih->rptr);
>>> -
>>> -    /* wptr has wrapped. */
>>> -    if (rptr > checkpoint_wptr)
>>> -        checkpoint_wptr += ih->ptr_mask + 1;
>>> +    checkpoint_ts = amdgpu_ih_decode_iv_ts(ih, checkpoint_wptr - 32);
>>
>> Same as above.
> done in v3
>>
>>
>>>         return wait_event_interruptible(ih->wait_process,
>>> -                amdgpu_ih_has_checkpoint_processed(adev, ih,
>>> -                        checkpoint_wptr, &rptr));
>>> +                amdgpu_ih_has_checkpoint_processed_ts(adev, ih,
>>> +                        checkpoint_ts));
>>>   }
>>>     /**
>>> @@ -253,6 +243,56 @@ int amdgpu_ih_process(struct amdgpu_device 
>>> *adev, struct amdgpu_ih_ring *ih)
>>>       return IRQ_HANDLED;
>>>   }
>>>   +/**
>>> + * amdgpu_ih_process1 - interrupt handler work for IH ring1
>>> + *
>>> + * @adev: amdgpu_device pointer
>>> + * @ih: ih ring to process
>>> + *
>>> + * Interrupt handler of IH ring1, walk the IH ring1.
>>> + * Returns irq process return code.
>>> + */
>>> +int amdgpu_ih_process1(struct amdgpu_device *adev, struct 
>>> amdgpu_ih_ring *ih)
>>> +{
>>> +    uint64_t ts, ts_next;
>>> +    unsigned int count;
>>> +    u32 wptr;
>>> +
>>> +    if (!ih->enabled || adev->shutdown)
>>> +        return IRQ_NONE;
>>> +
>>> +    wptr = amdgpu_ih_get_wptr(adev, ih);
>>> +    if (ih->rptr == wptr)
>>> +        return 0;
>>> +
>>> +restart_ih:
>>> +    count = AMDGPU_IH_MAX_NUM_IVS;
>>> +
>>> +    ts = amdgpu_ih_decode_iv_ts(ih, ih->rptr);
>>> +    ts_next = amdgpu_ih_decode_iv_ts(ih, ih->rptr + 32);
>>
>> Same as above.
>>
> Done in v3
>>
>>> +    while (ts < ts_next && --count) {
>>> +        amdgpu_irq_dispatch(adev, ih);
>>> +        ih->rptr &= ih->ptr_mask;
>>> +        ts = ts_next;
>>> +        ts_next = amdgpu_ih_decode_iv_ts(ih, ih->rptr + 32);
>>> +    }
>>> +    /*
>>> +     * Process the last timestamp updated entry or one more entry
>>> +     * if count = 0, ts is timestamp of the entry.
>>> +     */
>>> +    amdgpu_irq_dispatch(adev, ih);
>>> +    amdgpu_ih_set_rptr(adev, ih);
>>> +    wake_up_all(&ih->wait_process);
>>> +
>>> +    wptr = amdgpu_ih_get_wptr(adev, ih);
>>> +    /* Order reading of wptr vs. reading of IH ring data */
>>> +    rmb();
>>> +    if (ts < amdgpu_ih_decode_iv_ts(ih, wptr - 32))
>>> +        goto restart_ih;
>>> +
>>> +    return IRQ_HANDLED;
>>> +}
>>> +
>>>   /**
>>>    * amdgpu_ih_decode_iv_helper - decode an interrupt vector
>>>    *
>>> @@ -298,3 +338,13 @@ void amdgpu_ih_decode_iv_helper(struct 
>>> amdgpu_device *adev,
>>>       /* wptr/rptr are in bytes! */
>>>       ih->rptr += 32;
>>>   }
>>> +
>>> +uint64_t amdgpu_ih_decode_iv_ts(struct amdgpu_ih_ring *ih, u32 rptr)
>>
>> This function needs to be in IP-version-specific code. Maybe add an 
>> offset parameter, that way you can handle different IV sizes in 
>> different ASIC generations.
>>
> Add decode_iv_ts function interface to amdgpu_ih_function, this will 
> be used for different ASICs, to handle different IV size and time 
> stamp offset. vega*, navi* ASICs use this as helper function.
>
> Regards,
>
> Philip
>
>>
>>> +{
>>> +    uint32_t index = (rptr & ih->ptr_mask) >> 2;
>>> +    uint32_t dw1, dw2;
>>> +
>>> +    dw1 = ih->ring[index + 1];
>>> +    dw2 = ih->ring[index + 2];
>>> +    return dw1 | ((u64)(dw2 & 0xffff) << 32);
>>> +}
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
>>> index 0649b59830a5..15e8fe0e5e40 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
>>> @@ -89,10 +89,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device 
>>> *adev, struct amdgpu_ih_ring *ih,
>>>   void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct 
>>> amdgpu_ih_ring *ih);
>>>   void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const 
>>> uint32_t *iv,
>>>                 unsigned int num_dw);
>>> -int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
>>> -                    struct amdgpu_ih_ring *ih);
>>> +int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device 
>>> *adev,
>>> +                        struct amdgpu_ih_ring *ih);
>>>   int amdgpu_ih_process(struct amdgpu_device *adev, struct 
>>> amdgpu_ih_ring *ih);
>>> +int amdgpu_ih_process1(struct amdgpu_device *adev, struct 
>>> amdgpu_ih_ring *ih);
>>>   void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
>>>                   struct amdgpu_ih_ring *ih,
>>>                   struct amdgpu_iv_entry *entry);
>>> +uint64_t amdgpu_ih_decode_iv_ts(struct amdgpu_ih_ring *ih, u32 rptr);
>>>   #endif
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> index e9023687dc9a..891486cca94b 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> @@ -224,7 +224,7 @@ static void amdgpu_irq_handle_ih1(struct 
>>> work_struct *work)
>>>       struct amdgpu_device *adev = container_of(work, struct 
>>> amdgpu_device,
>>>                             irq.ih1_work);
>>>   -    amdgpu_ih_process(adev, &adev->irq.ih1);
>>> +    amdgpu_ih_process1(adev, &adev->irq.ih1);
>>>   }
>>>     /**
>>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
>>> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>>> index 88360f23eb61..9e566ec54cf5 100644
>>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>>> @@ -1968,7 +1968,7 @@ static void svm_range_drain_retry_fault(struct 
>>> svm_range_list *svms)
>>>             pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
>>>   - amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev,
>>> + amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
>>> &pdd->dev->adev->irq.ih1);
>>>           pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, 
>>> svms);
>>>       }



More information about the amd-gfx mailing list