[PATCH 4/4] drm/ttm: check if the mem free space is under lower limit

Roger He Hongbo.He at amd.com
Thu Feb 8 09:06:01 UTC 2018


mem free space and lower limit both include two parts:
system memory and swap space.

For the OOM triggered by TTM, that is the case as below:
swap space is full of swapped pages and then system
memory will be filled up with ttm pages. and then any
memory allocation request will run into OOM.

to cover two cases:
a. if no swap disk at all or free swap space is under
   swap mem limit but available system mem is bigger than
   sys mem limit, allow TTM allocation;

b. if the available system mem is less than sys mem limit
   but free swap space is bigger than swap mem limit, allow
   TTM allocation.

v2: merge two memory limit(swap and system) into one
v3: keep original behavior except with ttm->page_flags
    TTM_PAGE_FLAG_NO_RETRY

Signed-off-by: Roger He <Hongbo.He at amd.com>
---
 drivers/gpu/drm/ttm/ttm_memory.c         | 34 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/ttm/ttm_page_alloc.c     | 22 ++++++++++++++-------
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 31 ++++++++++++++++++-----------
 include/drm/ttm/ttm_memory.h             |  5 +++++
 4 files changed, 73 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index aa0c381..16ab324 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -36,6 +36,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/swap.h>
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
@@ -375,6 +376,11 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 
 	si_meminfo(&si);
 
+	/* lower limit of swap space and 256MB is enough */
+	glob->lower_mem_limit = 256 << 8;
+	/* lower limit of ram and keep consistent with each zone->emer_mem */
+	glob->lower_mem_limit += si.totalram >> 2;
+
 	ret = ttm_mem_init_kernel_zone(glob, &si);
 	if (unlikely(ret != 0))
 		goto out_no_zone;
@@ -469,6 +475,34 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
 }
 EXPORT_SYMBOL(ttm_mem_global_free);
 
+/*
+ * check if the available mem is under total memory limit
+ *
+ * a. if no swap disk at all or free swap space is under swap_mem_limit
+ * but available system mem is bigger than sys_mem_limit, allow TTM
+ * allocation;
+ *
+ * b. if the available system mem is less than sys_mem_limit but free
+ * swap disk is bigger than swap_mem_limit, allow TTM allocation.
+ */
+bool
+ttm_check_under_lowerlimit(struct ttm_mem_global *glob, bool force_alloc)
+{
+	bool ret = false;
+	uint64_t available;
+
+	/* always allow allocation, e.g. when serving page fault or suspend */
+	if (force_alloc)
+		return false;
+
+	available = get_nr_swap_pages() + si_mem_available();
+	if (available < glob->lower_mem_limit)
+		ret = true;
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_check_under_lowerlimit);
+
 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 				  struct ttm_mem_zone *single_zone,
 				  uint64_t amount, bool reserve)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 5edcd89..1457a1c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1094,7 +1094,8 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-	unsigned i;
+	unsigned i, unpopulate_count = 0;
+	bool force_alloc = true;
 	int ret;
 
 	if (ttm->state != tt_unpopulated)
@@ -1102,17 +1103,20 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 
 	ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
 			    ttm->caching_state);
-	if (unlikely(ret != 0)) {
-		ttm_pool_unpopulate_helper(ttm, 0);
-		return ret;
-	}
+	if (unlikely(ret != 0))
+		goto error_populate;
+
+	if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+		force_alloc = ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC;
+	if (ttm_check_under_lowerlimit(mem_glob, force_alloc))
+		goto error_populate;
 
 	for (i = 0; i < ttm->num_pages; ++i) {
 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
 						PAGE_SIZE, ctx);
 		if (unlikely(ret != 0)) {
-			ttm_pool_unpopulate_helper(ttm, i);
-			return -ENOMEM;
+			unpopulate_count = i;
+			goto error_populate;
 		}
 	}
 
@@ -1126,6 +1130,10 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 
 	ttm->state = tt_unbound;
 	return 0;
+
+error_populate:
+	ttm_pool_unpopulate_helper(ttm, unpopulate_count);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL(ttm_pool_populate);
 
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 354e0e1..39b2a42 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -934,6 +934,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 	struct dma_pool *pool;
 	struct dma_page *d_page;
 	enum pool_type type;
+	bool force_alloc = true;
 	unsigned i;
 	int ret;
 
@@ -943,6 +944,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 	INIT_LIST_HEAD(&ttm_dma->pages_list);
 	i = 0;
 
+	if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+		force_alloc = ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC;
 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
@@ -964,12 +967,13 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 		if (!d_page)
 			break;
 
+		if (ttm_check_under_lowerlimit(mem_glob, force_alloc))
+			goto error_populate;
+
 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
 						pool->size, ctx);
-		if (unlikely(ret != 0)) {
-			ttm_dma_unpopulate(ttm_dma, dev);
-			return -ENOMEM;
-		}
+		if (unlikely(ret != 0))
+			goto error_populate;
 
 		d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
 		for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
@@ -996,17 +1000,16 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 
 	while (num_pages) {
 		d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
-		if (!d_page) {
-			ttm_dma_unpopulate(ttm_dma, dev);
-			return -ENOMEM;
-		}
+		if (!d_page)
+			goto error_populate;
+
+		if (ttm_check_under_lowerlimit(mem_glob, force_alloc))
+			goto error_populate;
 
 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
 						pool->size, ctx);
-		if (unlikely(ret != 0)) {
-			ttm_dma_unpopulate(ttm_dma, dev);
-			return -ENOMEM;
-		}
+		if (unlikely(ret != 0))
+			goto error_populate;
 
 		d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
 		++i;
@@ -1023,6 +1026,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 
 	ttm->state = tt_unbound;
 	return 0;
+
+error_populate:
+	ttm_dma_unpopulate(ttm_dma, dev);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_populate);
 
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 8936285..3aa30d6 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -49,6 +49,8 @@
  * @work: The workqueue callback for the shrink queue.
  * @lock: Lock to protect the @shrink - and the memory accounting members,
  * that is, essentially the whole structure with some exceptions.
+ * @lower_mem_limit: include lower limit of swap space and lower limit of
+ * system memory.
  * @zones: Array of pointers to accounting zones.
  * @num_zones: Number of populated entries in the @zones array.
  * @zone_kernel: Pointer to the kernel zone.
@@ -67,6 +69,7 @@ struct ttm_mem_global {
 	struct workqueue_struct *swap_queue;
 	struct work_struct work;
 	spinlock_t lock;
+	uint64_t lower_mem_limit;
 	struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
 	unsigned int num_zones;
 	struct ttm_mem_zone *zone_kernel;
@@ -90,4 +93,6 @@ extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
 				     struct page *page, uint64_t size);
 extern size_t ttm_round_pot(size_t size);
 extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
+extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
+					bool force_alloc);
 #endif
-- 
2.7.4



More information about the amd-gfx mailing list