[PATCH] drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures

Christian König ckoenig.leichtzumerken at gmail.com
Thu Nov 2 17:37:41 UTC 2017


Am 02.11.2017 um 18:34 schrieb Michel Dänzer:
> From: Michel Dänzer <michel.daenzer at amd.com>
>
> Memory allocation failure should generally be handled gracefully by
> callers. In particular, with transparent hugepage support, attempts
> to allocate huge pages can fail under memory pressure, but the callers
> fall back to allocating individual pages instead. In that case, there
> would be spurious
>
>   [TTM] Unable to get page %u
>
> error messages in dmesg.
>
> Signed-off-by: Michel Dänzer <michel.daenzer at amd.com>

Reminds me that I wanted to better test this code path, so how did you 
triggered that?

Anyway patch is Reviewed-by: Christian König <christian.koenig at amd.com>.

Thanks,
Christian.

> ---
>   drivers/gpu/drm/ttm/ttm_page_alloc.c     | 13 ++++++-------
>   drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 12 ++++++------
>   2 files changed, 12 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 4d688c8d7853..316f831ad5f0 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
>   		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
>   					GFP_KERNEL);
>   	if (!pages_to_free) {
> -		pr_err("Failed to allocate memory for pool free operation\n");
> +		pr_debug("Failed to allocate memory for pool free operation\n");
>   		return 0;
>   	}
>   
> @@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
>   	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
>   
>   	if (!caching_array) {
> -		pr_err("Unable to allocate table for new pages\n");
> +		pr_debug("Unable to allocate table for new pages\n");
>   		return -ENOMEM;
>   	}
>   
> @@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
>   		p = alloc_pages(gfp_flags, order);
>   
>   		if (!p) {
> -			pr_err("Unable to get page %u\n", i);
> +			pr_debug("Unable to get page %u\n", i);
>   
>   			/* store already allocated pages in the pool after
>   			 * setting the caching state */
> @@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
>   			++pool->nrefills;
>   			pool->npages += alloc_size;
>   		} else {
> -			pr_err("Failed to fill pool (%p)\n", pool);
> +			pr_debug("Failed to fill pool (%p)\n", pool);
>   			/* If we have any pages left put them to the pool. */
>   			list_for_each_entry(p, &new_pages, lru) {
>   				++cpages;
> @@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
>   		while (npages) {
>   			p = alloc_page(gfp_flags);
>   			if (!p) {
> -
> -				pr_err("Unable to allocate page\n");
> +				pr_debug("Unable to allocate page\n");
>   				return -ENOMEM;
>   			}
>   
> @@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
>   		/* If there is any pages in the list put them back to
>   		 * the pool.
>   		 */
> -		pr_err("Failed to allocate extra pages for large request\n");
> +		pr_debug("Failed to allocate extra pages for large request\n");
>   		ttm_put_pages(pages, count, flags, cstate);
>   		return r;
>   	}
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> index 96ad12906621..6b2627fe9bc1 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> @@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
>   					GFP_KERNEL);
>   
>   	if (!pages_to_free) {
> -		pr_err("%s: Failed to allocate memory for pool free operation\n",
> +		pr_debug("%s: Failed to allocate memory for pool free operation\n",
>   		       pool->dev_name);
>   		return 0;
>   	}
> @@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
>   	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
>   
>   	if (!caching_array) {
> -		pr_err("%s: Unable to allocate table for new pages\n",
> +		pr_debug("%s: Unable to allocate table for new pages\n",
>   		       pool->dev_name);
>   		return -ENOMEM;
>   	}
> @@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
>   	for (i = 0, cpages = 0; i < count; ++i) {
>   		dma_p = __ttm_dma_alloc_page(pool);
>   		if (!dma_p) {
> -			pr_err("%s: Unable to get page %u\n",
> -			       pool->dev_name, i);
> +			pr_debug("%s: Unable to get page %u\n",
> +				 pool->dev_name, i);
>   
>   			/* store already allocated pages in the pool after
>   			 * setting the caching state */
> @@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
>   			struct dma_page *d_page;
>   			unsigned cpages = 0;
>   
> -			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
> -			       pool->dev_name, pool->name, r);
> +			pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
> +				 pool->dev_name, pool->name, r);
>   
>   			list_for_each_entry(d_page, &d_pages, page_list) {
>   				cpages++;




More information about the dri-devel mailing list