[PATCH v2 2/7] drm/ttm/pool: Fix ttm_pool_alloc error path
Thomas Hellström
thomas.hellstrom at linux.intel.com
Wed Mar 8 08:58:11 UTC 2023
On 3/8/23 09:48, Christian König wrote:
> Am 07.03.23 um 15:46 schrieb Thomas Hellström:
>> When hitting an error, the error path forgot to unmap dma mappings and
>> could call set_pages_wb() on already uncached pages.
>>
>> Fix this by introducing a common __ttm_pool_free() function that
>> does the right thing.
>>
>> v2:
>> - Simplify __ttm_pool_free() (Christian König)
>>
>> Fixes: d099fc8f540a ("drm/ttm: new TT backend allocation pool v3")
>> Cc: Christian König <christian.koenig at amd.com>
>> Cc: Dave Airlie <airlied at redhat.com>
>> Cc: Christian Koenig <christian.koenig at amd.com>
>> Cc: Huang Rui <ray.huang at amd.com>
>> Cc: dri-devel at lists.freedesktop.org
>> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>> ---
>> drivers/gpu/drm/ttm/ttm_pool.c | 68 +++++++++++++++++++---------------
>> 1 file changed, 38 insertions(+), 30 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c
>> b/drivers/gpu/drm/ttm/ttm_pool.c
>> index aa116a7bbae3..0b6e20613d19 100644
>> --- a/drivers/gpu/drm/ttm/ttm_pool.c
>> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
>> @@ -367,6 +367,30 @@ static int ttm_pool_page_allocated(struct
>> ttm_pool *pool, unsigned int order,
>> return 0;
>> }
>> +static void __ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt,
>
> Maybe name that ttm_pool_free_range() and add a comment why we need
> it. Something like "/* Cleanup all pages in the tt between start_page
> till end_page */".
Sure, will do.
/Thomas
>
> Apart from that looks good to me.
>
> Regards,
> Christian.
>
>> + enum ttm_caching caching,
>> + pgoff_t start_page, pgoff_t end_page)
>> +{
>> + struct page **pages = tt->pages;
>> + unsigned int order;
>> + pgoff_t i, nr;
>> +
>> + for (i = start_page; i < end_page; i += nr, pages += nr) {
>> + struct ttm_pool_type *pt = NULL;
>> +
>> + order = ttm_pool_page_order(pool, *pages);
>> + nr = (1UL << order);
>> + if (tt->dma_address)
>> + ttm_pool_unmap(pool, tt->dma_address[i], nr);
>> +
>> + pt = ttm_pool_select_type(pool, caching, order);
>> + if (pt)
>> + ttm_pool_type_give(pt, *pages);
>> + else
>> + ttm_pool_free_page(pool, caching, order, *pages);
>> + }
>> +}
>> +
>> /**
>> * ttm_pool_alloc - Fill a ttm_tt object
>> *
>> @@ -382,12 +406,14 @@ static int ttm_pool_page_allocated(struct
>> ttm_pool *pool, unsigned int order,
>> int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
>> struct ttm_operation_ctx *ctx)
>> {
>> - unsigned long num_pages = tt->num_pages;
>> + pgoff_t num_pages = tt->num_pages;
>> dma_addr_t *dma_addr = tt->dma_address;
>> struct page **caching = tt->pages;
>> struct page **pages = tt->pages;
>> + enum ttm_caching page_caching;
>> gfp_t gfp_flags = GFP_USER;
>> - unsigned int i, order;
>> + pgoff_t caching_divide;
>> + unsigned int order;
>> struct page *p;
>> int r;
>> @@ -410,6 +436,7 @@ int ttm_pool_alloc(struct ttm_pool *pool,
>> struct ttm_tt *tt,
>> order = min_t(unsigned int, order, __fls(num_pages))) {
>> struct ttm_pool_type *pt;
>> + page_caching = tt->caching;
>> pt = ttm_pool_select_type(pool, tt->caching, order);
>> p = pt ? ttm_pool_type_take(pt) : NULL;
>> if (p) {
>> @@ -418,6 +445,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct
>> ttm_tt *tt,
>> if (r)
>> goto error_free_page;
>> + caching = pages;
>> do {
>> r = ttm_pool_page_allocated(pool, order, p,
>> &dma_addr,
>> @@ -426,14 +454,15 @@ int ttm_pool_alloc(struct ttm_pool *pool,
>> struct ttm_tt *tt,
>> if (r)
>> goto error_free_page;
>> + caching = pages;
>> if (num_pages < (1 << order))
>> break;
>> p = ttm_pool_type_take(pt);
>> } while (p);
>> - caching = pages;
>> }
>> + page_caching = ttm_cached;
>> while (num_pages >= (1 << order) &&
>> (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
>> @@ -442,6 +471,7 @@ int ttm_pool_alloc(struct ttm_pool *pool,
>> struct ttm_tt *tt,
>> tt->caching);
>> if (r)
>> goto error_free_page;
>> + caching = pages;
>> }
>> r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
>> &num_pages, &pages);
>> @@ -468,15 +498,13 @@ int ttm_pool_alloc(struct ttm_pool *pool,
>> struct ttm_tt *tt,
>> return 0;
>> error_free_page:
>> - ttm_pool_free_page(pool, tt->caching, order, p);
>> + ttm_pool_free_page(pool, page_caching, order, p);
>> error_free_all:
>> num_pages = tt->num_pages - num_pages;
>> - for (i = 0; i < num_pages; ) {
>> - order = ttm_pool_page_order(pool, tt->pages[i]);
>> - ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
>> - i += 1 << order;
>> - }
>> + caching_divide = caching - tt->pages;
>> + __ttm_pool_free(pool, tt, tt->caching, 0, caching_divide);
>> + __ttm_pool_free(pool, tt, ttm_cached, caching_divide, num_pages);
>> return r;
>> }
>> @@ -492,27 +520,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
>> */
>> void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
>> {
>> - unsigned int i;
>> -
>> - for (i = 0; i < tt->num_pages; ) {
>> - struct page *p = tt->pages[i];
>> - unsigned int order, num_pages;
>> - struct ttm_pool_type *pt;
>> -
>> - order = ttm_pool_page_order(pool, p);
>> - num_pages = 1ULL << order;
>> - if (tt->dma_address)
>> - ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
>> -
>> - pt = ttm_pool_select_type(pool, tt->caching, order);
>> - if (pt)
>> - ttm_pool_type_give(pt, tt->pages[i]);
>> - else
>> - ttm_pool_free_page(pool, tt->caching, order,
>> - tt->pages[i]);
>> -
>> - i += num_pages;
>> - }
>> + __ttm_pool_free(pool, tt, tt->caching, 0, tt->num_pages);
>> while (atomic_long_read(&allocated_pages) > page_pool_size)
>> ttm_pool_shrink();
>
More information about the dri-devel
mailing list