[PATCH] drm/ttm: optimize pool allocations a bit

Felix Kuehling felix.kuehling at amd.com
Tue Nov 8 05:57:12 UTC 2022


Am 2022-11-07 um 14:58 schrieb Christian König:
> If we got a page pool use it as much as possible.
>
> If we can't get more pages from the pool allocate as much as possible.
>
> Only if that still doesn't work reduce the order and try again.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_pool.c | 81 ++++++++++++++++++++++++----------
>   1 file changed, 57 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index 21b61631f73a..cf15874cf380 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -344,6 +344,27 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
>   	return p->private;
>   }
>   
> +/* Called when we got a page, either from a pool or newly allocated */
> +int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,

This function should be static.


> +			    struct page *p, dma_addr_t **dma_addr,
> +			    unsigned long *num_pages, struct page ***pages)
> +{
> +	unsigned int i;
> +	int r;
> +
> +	if (*dma_addr) {
> +		r = ttm_pool_map(pool, order, p, dma_addr);
> +		if (r)
> +			return r;
> +	}
> +
> +	*num_pages -= 1 << order;
> +	for (i = 1 << order; i; --i)
> +		*((*pages)++) = p++;
> +
> +	return 0;
> +}
> +
>   /**
>    * ttm_pool_alloc - Fill a ttm_tt object
>    *
> @@ -385,45 +406,57 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
>   	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
>   	     num_pages;
>   	     order = min_t(unsigned int, order, __fls(num_pages))) {
> -		bool apply_caching = false;
>   		struct ttm_pool_type *pt;
>   
>   		pt = ttm_pool_select_type(pool, tt->caching, order);
>   		p = pt ? ttm_pool_type_take(pt) : NULL;
>   		if (p) {
> -			apply_caching = true;
> -		} else {
> -			p = ttm_pool_alloc_page(pool, gfp_flags, order);
> -			if (p && PageHighMem(p))
> -				apply_caching = true;
> -		}
> -
> -		if (!p) {
> -			if (order) {
> -				--order;
> -				continue;
> -			}
> -			r = -ENOMEM;
> -			goto error_free_all;
> -		}
> -
> -		if (apply_caching) {
>   			r = ttm_pool_apply_caching(caching, pages,
>   						   tt->caching);
>   			if (r)
>   				goto error_free_page;
> -			caching = pages + (1 << order);
> +
> +			while (p) {

This looks like it should be a do-while loop. If you get here, there 
will be at least one iteration.

With those two nit-picks fixed, this patch is

Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>


> +				r = ttm_pool_page_allocated(pool, order, p,
> +							    &dma_addr,
> +							    &num_pages,
> +							    &pages);
> +				if (r)
> +					goto error_free_page;
> +
> +				if (num_pages < (1 << order))
> +					break;
> +
> +				p = ttm_pool_type_take(pt);
> +			}
> +			caching = pages;
>   		}
>   
> -		if (dma_addr) {
> -			r = ttm_pool_map(pool, order, p, &dma_addr);
> +		while (num_pages >= (1 << order) &&
> +		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
> +
> +			if (PageHighMem(p)) {
> +				r = ttm_pool_apply_caching(caching, pages,
> +							   tt->caching);
> +				if (r)
> +					goto error_free_page;
> +			}
> +			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
> +						    &num_pages, &pages);
>   			if (r)
>   				goto error_free_page;
> +			if (PageHighMem(p))
> +				caching = pages;
>   		}
>   
> -		num_pages -= 1 << order;
> -		for (i = 1 << order; i; --i)
> -			*(pages++) = p++;
> +		if (!p) {
> +			if (order) {
> +				--order;
> +				continue;
> +			}
> +			r = -ENOMEM;
> +			goto error_free_all;
> +		}
>   	}
>   
>   	r = ttm_pool_apply_caching(caching, pages, tt->caching);


More information about the dri-devel mailing list