[PATCH v5 2/3] drm/amdgpu: Reset the clear flag in buddy during resume

Arunpravin Paneer Selvam arunpravin.paneerselvam at amd.com
Wed Jul 16 11:00:52 UTC 2025


Hi Christian,

On 7/16/2025 4:17 PM, Christian König wrote:
> On 16.07.25 12:28, Arunpravin Paneer Selvam wrote:
>> Hi Dave,
>>
>> I am trying to push this series into drm-misc-fixes, but I get the below error when dim push-branch drm-misc-fixes.
>>
>> dim:ERROR:e24c180b4300("drm/amdgpu:Reset the clear flag in buddy during resume"): Mandatory Maintainer Acked-by missing., aborting
>>
>> Looks like maintainers Acked-by is required to push the patches into drm-misc-fixes ?
> Strange, it should work with my Acked-by. Let me give it a try.

Sure, thanks!

Regards,

Arun.

>
> Regards,
> Christian.
>
>> Regards,
>>
>> Arun.
>>
>> On 7/16/2025 1:21 PM, Arunpravin Paneer Selvam wrote:
>>> - Added a handler in DRM buddy manager to reset the cleared
>>>     flag for the blocks in the freelist.
>>>
>>> - This is necessary because, upon resuming, the VRAM becomes
>>>     cluttered with BIOS data, yet the VRAM backend manager
>>>     believes that everything has been cleared.
>>>
>>> v2:
>>>     - Add lock before accessing drm_buddy_clear_reset_blocks()(Matthew Auld)
>>>     - Force merge the two dirty blocks.(Matthew Auld)
>>>     - Add a new unit test case for this issue.(Matthew Auld)
>>>     - Having this function being able to flip the state either way would be
>>>       good. (Matthew Brost)
>>>
>>> v3(Matthew Auld):
>>>     - Do merge step first to avoid the use of extra reset flag.
>>>
>>> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
>>> Suggested-by: Christian König <christian.koenig at amd.com>
>>> Acked-by: Christian König <christian.koenig at amd.com>
>>> Reviewed-by: Matthew Auld <matthew.auld at intel.com>
>>> Cc: stable at vger.kernel.org
>>> Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
>>> Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3812
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_device.c   |  2 +
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h      |  1 +
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 17 ++++++++
>>>    drivers/gpu/drm/drm_buddy.c                  | 43 ++++++++++++++++++++
>>>    include/drm/drm_buddy.h                      |  2 +
>>>    5 files changed, 65 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 723ab95d8c48..ac92220f9fc3 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -5327,6 +5327,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
>>>            dev->dev->power.disable_depth--;
>>>    #endif
>>>        }
>>> +
>>> +    amdgpu_vram_mgr_clear_reset_blocks(adev);
>>>        adev->in_suspend = false;
>>>          if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>>> index 215c198e4aff..2309df3f68a9 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>>> @@ -155,6 +155,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
>>>                      uint64_t start, uint64_t size);
>>>    int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
>>>                          uint64_t start);
>>> +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
>>>      bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
>>>                    struct ttm_resource *res);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
>>> index abdc52b0895a..07c936e90d8e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
>>> @@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
>>>        return atomic64_read(&mgr->vis_usage);
>>>    }
>>>    +/**
>>> + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
>>> + *
>>> + * @adev: amdgpu device pointer
>>> + *
>>> + * Reset the cleared drm buddy blocks.
>>> + */
>>> +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
>>> +{
>>> +    struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
>>> +    struct drm_buddy *mm = &mgr->mm;
>>> +
>>> +    mutex_lock(&mgr->lock);
>>> +    drm_buddy_reset_clear(mm, false);
>>> +    mutex_unlock(&mgr->lock);
>>> +}
>>> +
>>>    /**
>>>     * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
>>>     *
>>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>>> index a1e652b7631d..a94061f373de 100644
>>> --- a/drivers/gpu/drm/drm_buddy.c
>>> +++ b/drivers/gpu/drm/drm_buddy.c
>>> @@ -405,6 +405,49 @@ drm_get_buddy(struct drm_buddy_block *block)
>>>    }
>>>    EXPORT_SYMBOL(drm_get_buddy);
>>>    +/**
>>> + * drm_buddy_reset_clear - reset blocks clear state
>>> + *
>>> + * @mm: DRM buddy manager
>>> + * @is_clear: blocks clear state
>>> + *
>>> + * Reset the clear state based on @is_clear value for each block
>>> + * in the freelist.
>>> + */
>>> +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
>>> +{
>>> +    u64 root_size, size, start;
>>> +    unsigned int order;
>>> +    int i;
>>> +
>>> +    size = mm->size;
>>> +    for (i = 0; i < mm->n_roots; ++i) {
>>> +        order = ilog2(size) - ilog2(mm->chunk_size);
>>> +        start = drm_buddy_block_offset(mm->roots[i]);
>>> +        __force_merge(mm, start, start + size, order);
>>> +
>>> +        root_size = mm->chunk_size << order;
>>> +        size -= root_size;
>>> +    }
>>> +
>>> +    for (i = 0; i <= mm->max_order; ++i) {
>>> +        struct drm_buddy_block *block;
>>> +
>>> +        list_for_each_entry_reverse(block, &mm->free_list[i], link) {
>>> +            if (is_clear != drm_buddy_block_is_clear(block)) {
>>> +                if (is_clear) {
>>> +                    mark_cleared(block);
>>> +                    mm->clear_avail += drm_buddy_block_size(mm, block);
>>> +                } else {
>>> +                    clear_reset(block);
>>> +                    mm->clear_avail -= drm_buddy_block_size(mm, block);
>>> +                }
>>> +            }
>>> +        }
>>> +    }
>>> +}
>>> +EXPORT_SYMBOL(drm_buddy_reset_clear);
>>> +
>>>    /**
>>>     * drm_buddy_free_block - free a block
>>>     *
>>> diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>>> index 9689a7c5dd36..513837632b7d 100644
>>> --- a/include/drm/drm_buddy.h
>>> +++ b/include/drm/drm_buddy.h
>>> @@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
>>>                 u64 new_size,
>>>                 struct list_head *blocks);
>>>    +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
>>> +
>>>    void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
>>>      void drm_buddy_free_list(struct drm_buddy *mm,


More information about the dri-devel mailing list