[PATCH 02/18] drm/ttm: use gpu mm stats to track gpu memory allocations. (v3)
Dave Airlie
airlied at gmail.com
Mon Jul 14 05:18:17 UTC 2025
From: Dave Airlie <airlied at redhat.com>
This uses the newly introduced per-node gpu tracking stats,
to track GPU memory allocated via TTM and reclaimable memory in
the TTM page pools.
These stats will be useful later for system information and
later when mem cgroups are integrated.
Cc: Christian Koenig <christian.koenig at amd.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Johannes Weiner <hannes at cmpxchg.org>
Cc: linux-mm at kvack.org
Cc: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Dave Airlie <airlied at redhat.com>
---
v2: add reclaim parameters and adjust the right counters.
v3: drop the nid helper and get it from page.
---
drivers/gpu/drm/ttm/ttm_pool.c | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index baf27c70a419..ee2344089d47 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -150,8 +150,10 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
- if (p)
+ if (p) {
p->private = order;
+ mod_node_page_state(NODE_DATA(page_to_nid(p)), NR_GPU_ACTIVE, (1 << order));
+ }
return p;
}
@@ -186,7 +188,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
/* Reset the caching and pages of size 1 << order */
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
- unsigned int order, struct page *p)
+ unsigned int order, struct page *p, bool reclaim)
{
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
@@ -201,6 +203,9 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
#endif
if (!pool || !pool->use_dma_alloc) {
+ mod_node_page_state(NODE_DATA(page_to_nid(p)),
+ reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE,
+ -(1 << order));
__free_pages(p, order);
return;
}
@@ -276,6 +281,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
{
unsigned int i, num_pages = 1 << pt->order;
+ int nid = page_to_nid(p);
for (i = 0; i < num_pages; ++i) {
if (PageHighMem(p))
@@ -288,17 +294,24 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
list_add(&p->lru, &pt->pages);
spin_unlock(&pt->lock);
atomic_long_add(1 << pt->order, &allocated_pages);
+
+ mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, -num_pages);
+ mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, num_pages);
}
/* Take pages from a specific pool_type, return NULL when nothing available */
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
{
struct page *p;
+ int nid;
spin_lock(&pt->lock);
p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
if (p) {
+ nid = page_to_nid(p);
atomic_long_sub(1 << pt->order, &allocated_pages);
+ mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, (1 << pt->order));
+ mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, -(1 << pt->order));
list_del(&p->lru);
}
spin_unlock(&pt->lock);
@@ -331,7 +344,7 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
- ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
}
/* Return the pool_type to use for the given caching and order */
@@ -383,7 +396,7 @@ static unsigned int ttm_pool_shrink(void)
p = ttm_pool_type_take(pt);
if (p) {
- ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
num_pages = 1 << pt->order;
} else {
num_pages = 0;
@@ -475,7 +488,7 @@ static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page,
if (pt)
ttm_pool_type_give(pt, page);
else
- ttm_pool_free_page(pool, caching, order, page);
+ ttm_pool_free_page(pool, caching, order, page, false);
return nr;
}
@@ -780,7 +793,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
return 0;
error_free_page:
- ttm_pool_free_page(pool, page_caching, order, p);
+ ttm_pool_free_page(pool, page_caching, order, p, false);
error_free_all:
if (tt->restore)
--
2.49.0
More information about the dri-devel
mailing list