[Intel-gfx] [PATCH] drm: Alloc high address for drm buddy topdown flag

Alex Deucher alexdeucher at gmail.com
Mon Jan 9 15:03:56 UTC 2023


On Mon, Jan 9, 2023 at 5:13 AM Christian König
<ckoenig.leichtzumerken at gmail.com> wrote:
>
> Am 07.01.23 um 16:15 schrieb Arunpravin Paneer Selvam:
> > As we are observing low numbers in viewperf graphics benchmark, we
> > are strictly not allowing the top down flag enabled allocations
> > to steal the memory space from cpu visible region.
> >
> > The approach is, we are sorting each order list entries in
> > ascending order and compare the last entry of each order
> > list in the freelist and return the max block.
> >
> > This patch improves the viewperf 3D benchmark scores.
> >
> > Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
>
> Acked-by: Christian König <christian.koenig at amd.com>, but somebody with more insight of the drm buddy allocator should take a closer look at this.

I'm not a drm_buddy expert either, but this patch fixes a lot of
issues on both dGPUs and APUs:
Acked-by: Alex Deucher <alexander.deucher at amd.com>

>
>
> > ---
> >   drivers/gpu/drm/drm_buddy.c | 81 ++++++++++++++++++++++++-------------
> >   1 file changed, 54 insertions(+), 27 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
> > index 11bb59399471..50916b2f2fc5 100644
> > --- a/drivers/gpu/drm/drm_buddy.c
> > +++ b/drivers/gpu/drm/drm_buddy.c
> > @@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
> >       kmem_cache_free(slab_blocks, block);
> >   }
> >
> > +static void list_insert_sorted(struct drm_buddy *mm,
> > +                            struct drm_buddy_block *block)
> > +{
> > +     struct drm_buddy_block *node;
> > +     struct list_head *head;
> > +
> > +     head = &mm->free_list[drm_buddy_block_order(block)];
> > +     if (list_empty(head)) {
> > +             list_add(&block->link, head);
> > +             return;
> > +     }
> > +
> > +     list_for_each_entry(node, head, link)
> > +             if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
> > +                     break;
> > +
> > +     __list_add(&block->link, node->link.prev, &node->link);
> > +}
> > +
> >   static void mark_allocated(struct drm_buddy_block *block)
> >   {
> >       block->header &= ~DRM_BUDDY_HEADER_STATE;
> > @@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
> >       block->header &= ~DRM_BUDDY_HEADER_STATE;
> >       block->header |= DRM_BUDDY_FREE;
> >
> > -     list_add(&block->link,
> > -              &mm->free_list[drm_buddy_block_order(block)]);
> > +     list_insert_sorted(mm, block);
> >   }
> >
> >   static void mark_split(struct drm_buddy_block *block)
> > @@ -387,20 +405,26 @@ alloc_range_bias(struct drm_buddy *mm,
> >   }
> >
> >   static struct drm_buddy_block *
> > -get_maxblock(struct list_head *head)
> > +get_maxblock(struct drm_buddy *mm, unsigned int order)
> >   {
> >       struct drm_buddy_block *max_block = NULL, *node;
> > +     unsigned int i;
> >
> > -     max_block = list_first_entry_or_null(head,
> > -                                          struct drm_buddy_block,
> > -                                          link);
> > -     if (!max_block)
> > -             return NULL;
> > +     for (i = order; i <= mm->max_order; ++i) {
> > +             if (!list_empty(&mm->free_list[i])) {
> > +                     node = list_last_entry(&mm->free_list[i],
> > +                                            struct drm_buddy_block,
> > +                                            link);
> > +                     if (!max_block) {
> > +                             max_block = node;
> > +                             continue;
> > +                     }
> >
> > -     list_for_each_entry(node, head, link) {
> > -             if (drm_buddy_block_offset(node) >
> > -                 drm_buddy_block_offset(max_block))
> > -                     max_block = node;
> > +                     if (drm_buddy_block_offset(node) >
> > +                             drm_buddy_block_offset(max_block)) {
> > +                             max_block = node;
> > +                     }
> > +             }
> >       }
> >
> >       return max_block;
> > @@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
> >                   unsigned long flags)
> >   {
> >       struct drm_buddy_block *block = NULL;
> > -     unsigned int i;
> > +     unsigned int tmp;
> >       int err;
> >
> > -     for (i = order; i <= mm->max_order; ++i) {
> > -             if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
> > -                     block = get_maxblock(&mm->free_list[i]);
> > -                     if (block)
> > -                             break;
> > -             } else {
> > -                     block = list_first_entry_or_null(&mm->free_list[i],
> > -                                                      struct drm_buddy_block,
> > -                                                      link);
> > -                     if (block)
> > -                             break;
> > +     if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
> > +             block = get_maxblock(mm, order);
> > +             if (block)
> > +                     /* Store the obtained block order */
> > +                     tmp = drm_buddy_block_order(block);
> > +     } else {
> > +             for (tmp = order; tmp <= mm->max_order; ++tmp) {
> > +                     if (!list_empty(&mm->free_list[tmp])) {
> > +                             block = list_last_entry(&mm->free_list[tmp],
> > +                                                     struct drm_buddy_block,
> > +                                                     link);
> > +                             if (block)
> > +                                     break;
> > +                     }
> >               }
> >       }
> >
> > @@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
> >
> >       BUG_ON(!drm_buddy_block_is_free(block));
> >
> > -     while (i != order) {
> > +     while (tmp != order) {
> >               err = split_block(mm, block);
> >               if (unlikely(err))
> >                       goto err_undo;
> >
> >               block = block->right;
> > -             i--;
> > +             tmp--;
> >       }
> >       return block;
> >
> >   err_undo:
> > -     if (i != order)
> > +     if (tmp != order)
> >               __drm_buddy_free(mm, block);
> >       return ERR_PTR(err);
> >   }
>


More information about the Intel-gfx mailing list