[PATCH v6 1/2] drm/amdgpu: add debugfs for reset registers list

Lazar, Lijo lijo.lazar at amd.com
Thu Feb 17 10:09:30 UTC 2022



On 2/17/2022 3:34 PM, Christian König wrote:
> 
> 
> Am 17.02.22 um 11:01 schrieb Lazar, Lijo:
>>
>>
>> On 2/17/2022 3:18 PM, Christian König wrote:
>>> Am 17.02.22 um 10:44 schrieb Lazar, Lijo:
>>>>
>>>>
>>>> On 2/17/2022 1:30 PM, Christian König wrote:
>>>>>
>>>>>
>>>>> Am 17.02.22 um 08:54 schrieb Somalapuram, Amaranath:
>>>>>>
>>>>>>
>>>>>> On 2/16/2022 8:26 PM, Christian König wrote:
>>>>>>> Am 16.02.22 um 14:11 schrieb Somalapuram, Amaranath:
>>>>>>>>
>>>>>>>> On 2/16/2022 3:41 PM, Christian König wrote:
>>>>>>>>
>>>>>>>>> Am 16.02.22 um 10:49 schrieb Somalapuram Amaranath:
>>>>>>>>>> List of register populated for dump collection during the GPU 
>>>>>>>>>> reset.
>>>>>>>>>>
>>>>>>>>>> Signed-off-by: Somalapuram Amaranath 
>>>>>>>>>> <Amaranath.Somalapuram at amd.com>
>>>>>>>>>> ---
>>>>>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h         |  5 ++
>>>>>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 95 
>>>>>>>>>> +++++++++++++++++++++
>>>>>>>>>>   2 files changed, 100 insertions(+)
>>>>>>>>>>
>>>>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>>>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>>>>>> index b85b67a88a3d..57965316873b 100644
>>>>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>>>>>> @@ -1097,6 +1097,11 @@ struct amdgpu_device {
>>>>>>>>>>         struct amdgpu_reset_control *reset_cntl;
>>>>>>>>>>       uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
>>>>>>>>>> +
>>>>>>>>>> +    /* reset dump register */
>>>>>>>>>> +    uint32_t            *reset_dump_reg_list;
>>>>>>>>>> +    int                             n_regs;
>>>>>>>>>> +    struct mutex            reset_dump_mutex;
>>>>>>>>>
>>>>>>>>> I think we should rather use the reset lock for this instead of 
>>>>>>>>> introducing just another mutex.
>>>>>>>>>
>>>>>>>>>>   };
>>>>>>>>>>     static inline struct amdgpu_device *drm_to_adev(struct 
>>>>>>>>>> drm_device *ddev)
>>>>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
>>>>>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>>>>>>>>>> index 164d6a9e9fbb..faf985c7cb93 100644
>>>>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>>>>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
>>>>>>>>>> @@ -1609,6 +1609,98 @@ 
>>>>>>>>>> DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
>>>>>>>>>>   DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
>>>>>>>>>>               amdgpu_debugfs_sclk_set, "%llu\n");
>>>>>>>>>>   +static ssize_t amdgpu_reset_dump_register_list_read(struct 
>>>>>>>>>> file *f,
>>>>>>>>>> +                char __user *buf, size_t size, loff_t *pos)
>>>>>>>>>> +{
>>>>>>>>>> +    struct amdgpu_device *adev = (struct amdgpu_device 
>>>>>>>>>> *)file_inode(f)->i_private;
>>>>>>>>>> +    char reg_offset[11];
>>>>>>>>>> +    int i, r, len = 0;
>>>>>>>>>> +
>>>>>>>>>> +    if (*pos)
>>>>>>>>>> +        return 0;
>>>>>>>>>> +
>>>>>>>>>> +    if (adev->n_regs == 0)
>>>>>>>>>> +        return 0;
>>>>>>>>>> +
>>>>>>>>>> +    for (i = 0; i < adev->n_regs; i++) {
>>>>>>>>>> +        sprintf(reg_offset, "0x%x ", 
>>>>>>>>>> adev->reset_dump_reg_list[i]);
>>>>>>>>>> +        r = copy_to_user(buf + len, reg_offset, 
>>>>>>>>>> strlen(reg_offset));
>>>>>>>>>> +
>>>>>>>>>> +        if (r)
>>>>>>>>>> +            return -EFAULT;
>>>>>>>>>> +
>>>>>>>>>> +        len += strlen(reg_offset);
>>>>>>>>>> +    }
>>>>>>>>>
>>>>>>>>> You need to hold the lock protecting adev->reset_dump_reg_list 
>>>>>>>>> and adev->n_regs while accessing those.
>>>>>>>>>
>>>>>>>>> (BTW: num_regs instead of n_regs would match more what we use 
>>>>>>>>> elsewhere, but is not a must have).
>>>>>>>>>
>>>>>>>> This is read function for user and returns only list of reg 
>>>>>>>> offsets, I did not understand correctly !
>>>>>>>>>> +
>>>>>>>>>> +    r = copy_to_user(buf + len, "\n", 1);
>>>>>>>>>> +
>>>>>>>>>> +    if (r)
>>>>>>>>>> +        return -EFAULT;
>>>>>>>>>> +
>>>>>>>>>> +    len++;
>>>>>>>>>> +    *pos += len;
>>>>>>>>>> +
>>>>>>>>>> +    return len;
>>>>>>>>>> +}
>>>>>>>>>> +
>>>>>>>>>> +static ssize_t amdgpu_reset_dump_register_list_write(struct 
>>>>>>>>>> file *f,
>>>>>>>>>> +            const char __user *buf, size_t size, loff_t *pos)
>>>>>>>>>> +{
>>>>>>>>>> +    struct amdgpu_device *adev = (struct amdgpu_device 
>>>>>>>>>> *)file_inode(f)->i_private;
>>>>>>>>>> +    char *reg_offset, *reg, reg_temp[11];
>>>>>>>>>> +    static int alloc_count;
>>>>>>>>>> +    int ret, i = 0, len = 0;
>>>>>>>>>> +
>>>>>>>>>> +    do {
>>>>>>>>>> +        reg_offset = reg_temp;
>>>>>>>>>> +        memset(reg_offset,  0, 11);
>>>>>>>>>> +        ret = copy_from_user(reg_offset, buf + len, min(11, 
>>>>>>>>>> ((int)size-len)));
>>>>>>>>>> +
>>>>>>>>>> +        if (ret)
>>>>>>>>>> +            goto failed;
>>>>>>>>>> +
>>>>>>>>>> +        reg = strsep(&reg_offset, " ");
>>>>>>>>>> +
>>>>>>>>>> +        if (alloc_count <= i) {
>>>>>>>>>
>>>>>>>>>> + adev->reset_dump_reg_list =  krealloc_array(
>>>>>>>>>> + adev->reset_dump_reg_list, 1,
>>>>>>>>>> +                            sizeof(uint32_t), GFP_KERNEL);
>>>>>>>>>> +            alloc_count++;
>>>>>>>>>> +        }
>>>>>>>>>> +
>>>>>>>>>> +        ret = kstrtouint(reg, 16, 
>>>>>>>>>> &adev->reset_dump_reg_list[i]);
>>>>>>>>>
>>>>>>>>> This here is modifying adev->reset_dump_reg_list as well and so 
>>>>>>>>> must be protected by a lock as well.
>>>>>>>>>
>>>>>>>>> The tricky part is that we can't allocate memory while holding 
>>>>>>>>> this lock (because we need it during reset as well).
>>>>>>>>>
>>>>>>>>> One solution for this is to read the register list into a local 
>>>>>>>>> array first and when that's done swap the local array with the 
>>>>>>>>> one in adev->reset_dump_reg_list while holding the lock.
>>>>>>>>>
>>>>>> krealloc_array should be inside lock or outside lock? this may be 
>>>>>> problem.
>>>>>>
>>>>>
>>>>> This *must* be outside the lock because we need to take the lock 
>>>>> during GPU reset which has a dependency to not allocate memory or 
>>>>> wait for locks under which memory is allocated.
>>>>>
>>>>> That's why I said you need an approach which first parses the 
>>>>> string from userspace, build up the register list and then swap 
>>>>> that with the existing one while holding the lock.
>>>>>
>>>>
>>>> Another approach would be to just protect debugfs write with 
>>>> down_read(&adev->reset_sem) or reset domain semaphore.
>>>
>>> No, exactly that doesn't work.
>>>
>>> See the down_write(&adev->reset_sem) would then wait for this reader 
>>> and the reader is allocating memory and allocating memory might wait 
>>> for the reset to finish => deadlock.
>>
>> I didn't get this part - allocating memory might wait for the reset to 
>> finish.
>>
>> down_write() is called as one of the first steps during device reset, 
>> and therefore device reset hasn't started. When you say " reset to 
>> finish", do you mean device reset or something else?
> 
> I mean device reset. Holding the reset lock prevents device reset from 
> starting because of the down_write() and core memory management might 
> wait for this before it continues allocating memory.
> 

Is this passed to core memory management by a component like drm or ttm?

I thought reset was mainly internal/contained to device level as we 
reset for multiple reasons and system as whole shouldn't be affected.

Thanks,
Lijo

> Regards,
> Christian.
> 
>>
>> Thanks,
>> Lijo
>>
>>>
>>> Regards,
>>> Christian.
>>>
>>>>
>>>> Other than that if apps are trying to read and modify the list at 
>>>> the same time, probably we should leave that to user mode since this 
>>>> is mainly a debug feature.
>>>>
>>>> Thanks,
>>>> Lijo
>>>>
>>>>> Regards,
>>>>> Christian.
>>>>>
>>>>>> Regards,
>>>>>>
>>>>>> S.Amarnath
>>>>>>
>>>>>>>>> Regards,
>>>>>>>>> Christian.
>>>>>>>>>
>>>>>>>> There are 2 situations:
>>>>>>>> 1st time creating list n_regs will be 0 and trace event will not 
>>>>>>>> be triggered
>>>>>>>> 2nd time while updating list n_regs is already set and 
>>>>>>>> adev->reset_dump_reg_list will have some offsets address 
>>>>>>>> (hypothetically speaking *during reset + update* read values 
>>>>>>>> from RREG32 will mix up of old list and new list)
>>>>>>>> its only critical when its freed and n_regs is not 0
>>>>>>>
>>>>>>> No, that won't work like this. See you *must* always hold a lock 
>>>>>>> when reading or writing the array.
>>>>>>>
>>>>>>> Otherwise it is perfectly possible that one thread sees only 
>>>>>>> halve of the updates of another thread.
>>>>>>>
>>>>>>> The only alternative would be RCU, atomic replace and manual 
>>>>>>> barrier handling, but that would be complete overkill for that 
>>>>>>> feature.
>>>>>>>
>>>>>>> Regards,
>>>>>>> Christian.
>>>>>>>
>>>>>>>>
>>>>>>>> Regards,
>>>>>>>> S.Amarnath
>>>>>>>>>> +
>>>>>>>>>> +        if (ret)
>>>>>>>>>> +            goto failed;
>>>>>>>>>> +
>>>>>>>>>> +        len += strlen(reg) + 1;
>>>>>>>>>> +        i++;
>>>>>>>>>> +
>>>>>>>>>> +    } while (len < size);
>>>>>>>>>> +
>>>>>>>>>> +    adev->n_regs = i;
>>>>>>>>>> +
>>>>>>>>>> +    return size;
>>>>>>>>>> +
>>>>>>>>>> +failed:
>>>>>>>>>> +    mutex_lock(&adev->reset_dump_mutex);
>>>>>>>>>> +    kfree(adev->reset_dump_reg_list);
>>>>>>>>>> +    adev->reset_dump_reg_list = NULL;
>>>>>>>>>> +    alloc_count = 0;
>>>>>>>>>> +    adev->n_regs = 0;
>>>>>>>>>> +    mutex_unlock(&adev->reset_dump_mutex);
>>>>>>>>>> +    return -EFAULT;
>>>>>>>>>> +}
>>>>>>>>>> +
>>>>>>>>>> +
>>>>>>>>>> +
>>>>>>>>>> +static const struct file_operations 
>>>>>>>>>> amdgpu_reset_dump_register_list = {
>>>>>>>>>> +    .owner = THIS_MODULE,
>>>>>>>>>> +    .read = amdgpu_reset_dump_register_list_read,
>>>>>>>>>> +    .write = amdgpu_reset_dump_register_list_write,
>>>>>>>>>> +    .llseek = default_llseek
>>>>>>>>>> +};
>>>>>>>>>> +
>>>>>>>>>>   int amdgpu_debugfs_init(struct amdgpu_device *adev)
>>>>>>>>>>   {
>>>>>>>>>>       struct dentry *root = 
>>>>>>>>>> adev_to_drm(adev)->primary->debugfs_root;
>>>>>>>>>> @@ -1618,6 +1710,7 @@ int amdgpu_debugfs_init(struct 
>>>>>>>>>> amdgpu_device *adev)
>>>>>>>>>>       if (!debugfs_initialized())
>>>>>>>>>>           return 0;
>>>>>>>>>>   +    mutex_init(&adev->reset_dump_mutex);
>>>>>>>>>>       ent = debugfs_create_file("amdgpu_preempt_ib", 0600, 
>>>>>>>>>> root, adev,
>>>>>>>>>>                     &fops_ib_preempt);
>>>>>>>>>>       if (IS_ERR(ent)) {
>>>>>>>>>> @@ -1672,6 +1765,8 @@ int amdgpu_debugfs_init(struct 
>>>>>>>>>> amdgpu_device *adev)
>>>>>>>>>> &amdgpu_debugfs_test_ib_fops);
>>>>>>>>>>       debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
>>>>>>>>>> &amdgpu_debugfs_vm_info_fops);
>>>>>>>>>> + debugfs_create_file("amdgpu_reset_dump_register_list", 0644, 
>>>>>>>>>> root, adev,
>>>>>>>>>> + &amdgpu_reset_dump_register_list);
>>>>>>>>>>         adev->debugfs_vbios_blob.data = adev->bios;
>>>>>>>>>>       adev->debugfs_vbios_blob.size = adev->bios_size;
>>>>>>>>>
>>>>>>>
>>>>>
>>>
> 


More information about the amd-gfx mailing list