[PATCH 06/17] ttm/pool: make pool shrinker NUMA aware
Christian König
christian.koenig at amd.com
Mon Jun 30 10:15:27 UTC 2025
On 30.06.25 06:49, Dave Airlie wrote:
> From: Dave Airlie <airlied at redhat.com>
>
> This enable NUMA awareness for the shrinker on the
> ttm pools.
Looks good from my side, but the last time I suggested this Sima explicitly told me that it isn't very fruitful. Adding her to comment as well.
Christian.
>
> Cc: Christian Koenig <christian.koenig at amd.com>
> Cc: Dave Chinner <david at fromorbit.com>
> Signed-off-by: Dave Airlie <airlied at redhat.com>
> ---
> drivers/gpu/drm/ttm/ttm_pool.c | 38 +++++++++++++++++++---------------
> 1 file changed, 21 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index 95bbbc843b97..66cd963b24dc 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -416,12 +416,12 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
> return NULL;
> }
>
> -/* Free pages using the global shrinker list */
> -static unsigned int ttm_pool_shrink(void)
> +/* Free pages using the per-node shrinker list */
> +static unsigned int ttm_pool_shrink(int nid, unsigned long num_to_free)
> {
> + LIST_HEAD(dispose);
> struct ttm_pool_type *pt;
> unsigned int num_pages;
> - struct page *p;
>
> down_read(&pool_shrink_rwsem);
> spin_lock(&shrinker_lock);
> @@ -429,13 +429,10 @@ static unsigned int ttm_pool_shrink(void)
> list_move_tail(&pt->shrinker_list, &shrinker_list);
> spin_unlock(&shrinker_lock);
>
> - p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool));
> - if (p) {
> - ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
> - num_pages = 1 << pt->order;
> - } else {
> - num_pages = 0;
> - }
> + num_pages = list_lru_walk_node(&pt->pages, nid, pool_move_to_dispose_list, &dispose, &num_to_free);
> + num_pages *= 1 << pt->order;
> +
> + ttm_pool_dispose_list(pt, &dispose);
> up_read(&pool_shrink_rwsem);
>
> return num_pages;
> @@ -784,6 +781,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
> pt = ttm_pool_select_type(pool, page_caching, order);
> if (pt && allow_pools)
> p = ttm_pool_type_take(pt, ttm_pool_nid(pool));
> +
> /*
> * If that fails or previously failed, allocate from system.
> * Note that this also disallows additional pool allocations using
> @@ -932,8 +930,10 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
> {
> ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
>
> - while (atomic_long_read(&allocated_pages) > page_pool_size)
> - ttm_pool_shrink();
> + while (atomic_long_read(&allocated_pages) > page_pool_size) {
> + unsigned long diff = page_pool_size - atomic_long_read(&allocated_pages);
> + ttm_pool_shrink(ttm_pool_nid(pool), diff);
> + }
> }
> EXPORT_SYMBOL(ttm_pool_free);
>
> @@ -1190,7 +1190,7 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
> unsigned long num_freed = 0;
>
> do
> - num_freed += ttm_pool_shrink();
> + num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan);
> while (num_freed < sc->nr_to_scan &&
> atomic_long_read(&allocated_pages));
>
> @@ -1323,11 +1323,15 @@ static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
> .nr_to_scan = TTM_SHRINKER_BATCH,
> };
> unsigned long count;
> + int nid;
>
> fs_reclaim_acquire(GFP_KERNEL);
> - count = ttm_pool_shrinker_count(mm_shrinker, &sc);
> - seq_printf(m, "%lu/%lu\n", count,
> - ttm_pool_shrinker_scan(mm_shrinker, &sc));
> + for_each_node(nid) {
> + sc.nid = nid;
> + count = ttm_pool_shrinker_count(mm_shrinker, &sc);
> + seq_printf(m, "%d: %lu/%lu\n", nid, count,
> + ttm_pool_shrinker_scan(mm_shrinker, &sc));
> + }
> fs_reclaim_release(GFP_KERNEL);
>
> return 0;
> @@ -1375,7 +1379,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
> #endif
> #endif
>
> - mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
> + mm_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "drm-ttm_pool");
> if (!mm_shrinker)
> return -ENOMEM;
>
More information about the dri-devel
mailing list