[PATCH v11 5/5] drm/amdgpu: add drm buddy support to amdgpu
Matthew Auld
matthew.william.auld at gmail.com
Fri Jan 28 14:18:38 UTC 2022
On Thu, 27 Jan 2022 at 14:11, Arunpravin
<Arunpravin.PaneerSelvam at amd.com> wrote:
>
> - Remove drm_mm references and replace with drm buddy functionalities
> - Add res cursor support for drm buddy
>
> v2(Matthew Auld):
> - replace spinlock with mutex as we call kmem_cache_zalloc
> (..., GFP_KERNEL) in drm_buddy_alloc() function
>
> - lock drm_buddy_block_trim() function as it calls
> mark_free/mark_split are all globally visible
>
> v3(Matthew Auld):
> - remove trim method error handling as we address the failure case
> at drm_buddy_block_trim() function
>
> v4:
> - fix warnings reported by kernel test robot <lkp at intel.com>
>
> v5:
> - fix merge conflict issue
>
> v6:
> - fix warnings reported by kernel test robot <lkp at intel.com>
>
> Signed-off-by: Arunpravin <Arunpravin.PaneerSelvam at amd.com>
> ---
> drivers/gpu/drm/Kconfig | 1 +
> .../gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 97 +++++--
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 7 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 259 ++++++++++--------
> 4 files changed, 231 insertions(+), 133 deletions(-)
<snip>
>
> -/**
> - * amdgpu_vram_mgr_virt_start - update virtual start address
> - *
> - * @mem: ttm_resource to update
> - * @node: just allocated node
> - *
> - * Calculate a virtual BO start address to easily check if everything is CPU
> - * accessible.
> - */
> -static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
> - struct drm_mm_node *node)
> -{
> - unsigned long start;
> -
> - start = node->start + node->size;
> - if (start > mem->num_pages)
> - start -= mem->num_pages;
> - else
> - start = 0;
> - mem->start = max(mem->start, start);
> -}
> -
> /**
> * amdgpu_vram_mgr_new - allocate new ranges
> *
> @@ -366,13 +357,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
> const struct ttm_place *place,
> struct ttm_resource **res)
> {
> - unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
> + unsigned long lpfn, pages_per_node, pages_left, pages, n_pages;
> + u64 vis_usage = 0, mem_bytes, max_bytes, min_page_size;
> struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
> struct amdgpu_device *adev = to_amdgpu_device(mgr);
> - uint64_t vis_usage = 0, mem_bytes, max_bytes;
> - struct ttm_range_mgr_node *node;
> - struct drm_mm *mm = &mgr->mm;
> - enum drm_mm_insert_mode mode;
> + struct amdgpu_vram_mgr_node *node;
> + struct drm_buddy *mm = &mgr->mm;
> + struct drm_buddy_block *block;
> unsigned i;
> int r;
>
> @@ -391,10 +382,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
> goto error_sub;
> }
>
> - if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
> + if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
> pages_per_node = ~0ul;
> - num_nodes = 1;
> - } else {
> + else {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> pages_per_node = HPAGE_PMD_NR;
> #else
> @@ -403,11 +393,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
> #endif
> pages_per_node = max_t(uint32_t, pages_per_node,
> tbo->page_alignment);
> - num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
> }
>
> - node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
> - GFP_KERNEL | __GFP_ZERO);
> + node = kzalloc(sizeof(*node), GFP_KERNEL);
> if (!node) {
> r = -ENOMEM;
> goto error_sub;
> @@ -415,9 +403,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
>
> ttm_resource_init(tbo, place, &node->base);
>
> - mode = DRM_MM_INSERT_BEST;
> + INIT_LIST_HEAD(&node->blocks);
> +
> if (place->flags & TTM_PL_FLAG_TOPDOWN)
> - mode = DRM_MM_INSERT_HIGH;
> + node->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
> +
> + if (place->fpfn || lpfn != man->size)
> + /* Allocate blocks in desired range */
> + node->flags |= DRM_BUDDY_RANGE_ALLOCATION;
> +
> + min_page_size = mgr->default_page_size;
> + BUG_ON(min_page_size < mm->chunk_size);
>
> pages_left = node->base.num_pages;
>
> @@ -425,36 +421,61 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
> pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
>
> i = 0;
> - spin_lock(&mgr->lock);
> while (pages_left) {
> - uint32_t alignment = tbo->page_alignment;
> -
> if (pages >= pages_per_node)
> - alignment = pages_per_node;
> -
> - r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
> - alignment, 0, place->fpfn,
> - lpfn, mode);
> - if (unlikely(r)) {
> - if (pages > pages_per_node) {
> - if (is_power_of_2(pages))
> - pages = pages / 2;
> - else
> - pages = rounddown_pow_of_two(pages);
> - continue;
> - }
> - goto error_free;
> + pages = pages_per_node;
> +
> + n_pages = pages;
> +
> + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
> + n_pages = roundup_pow_of_two(n_pages);
> + min_page_size = (u64)n_pages << PAGE_SHIFT;
> +
> + if (n_pages > lpfn)
> + lpfn = n_pages;
> }
>
> - vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
> - amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
> + mutex_lock(&mgr->lock);
> + r = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
> + (u64)lpfn << PAGE_SHIFT,
> + (u64)n_pages << PAGE_SHIFT,
> + min_page_size,
> + &node->blocks,
> + node->flags);
> + mutex_unlock(&mgr->lock);
> + if (unlikely(r))
> + goto error_free_blocks;
> +
> pages_left -= pages;
> ++i;
>
> if (pages > pages_left)
> pages = pages_left;
> }
> - spin_unlock(&mgr->lock);
> +
> + /* Free unused pages for contiguous allocation */
> + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
> + u64 actual_size = (u64)node->base.num_pages << PAGE_SHIFT;
> +
> + mutex_lock(&mgr->lock);
> + drm_buddy_block_trim(mm,
> + actual_size,
> + &node->blocks);
> + mutex_unlock(&mgr->lock);
> + }
> +
> + list_for_each_entry(block, &node->blocks, link)
> + vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
> +
> + block = list_first_entry_or_null(&node->blocks,
> + struct drm_buddy_block,
> + link);
> + if (!block) {
> + r = -ENOENT;
> + goto error_free_res;
> + }
> +
> + node->base.start = amdgpu_node_start(block) >> PAGE_SHIFT;
Hmm, does this work? It looks like there are various places checking
that res->start + res->num_pages <= visible_size, which IIUC should
only return true when the entire object is placed in the mappable
portion. i915 is doing something similar. Also it looks like
ttm_resource_compat() is potentially relying on this, like when moving
something from non-mappable -> mappable in
amdgpu_bo_fault_reserve_notify()?
Perhaps something like:
if (vis_usage == num_pages)
base.start = 0;
else
base.start = visible_size;
Otherwise I guess just keep amdgpu_vram_mgr_virt_start()?
More information about the amd-gfx
mailing list