[PATCH 33/49] vmalloc
Chris Wilson
chris at chris-wilson.co.uk
Fri Nov 11 20:40:02 UTC 2016
---
mm/vmalloc.c | 29 +++++++----------------------
1 file changed, 7 insertions(+), 22 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7e945c63c7ef..d4d6b70d34eb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -44,6 +44,8 @@ struct vfree_deferred {
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
+static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+
static void __vunmap(const void *, int);
static void free_work(struct work_struct *w)
@@ -319,7 +321,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
struct vmap_area *va;
struct rb_node *n;
unsigned long addr;
- int purged = 0;
struct vmap_area *first;
BUG_ON(!size);
@@ -428,19 +429,16 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
overflow:
spin_unlock(&vmap_area_lock);
- if (!purged) {
+ if (atomic_read(&vmap_lazy_nr) > (size >> PAGE_SHIFT)) {
purge_vmap_area_lazy();
- purged = 1;
goto retry;
}
if (gfpflags_allow_blocking(gfp_mask)) {
unsigned long freed = 0;
blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
- if (freed > 0) {
- purged = 0;
+ if (freed > 0)
goto retry;
- }
}
if (printk_ratelimit())
@@ -560,8 +558,6 @@ static unsigned long lazy_max_pages(void)
return log * (32UL * 1024 * 1024 / PAGE_SIZE);
}
-static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
-
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
@@ -587,7 +583,6 @@ void set_iounmap_nonlazy(void)
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
int sync, int force_flush)
{
- static DEFINE_SPINLOCK(purge_lock);
struct llist_node *valist;
struct vmap_area *va;
struct vmap_area *n_va;
@@ -598,12 +593,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
* should not expect such behaviour. This just simplifies locking for
* the case that isn't actually used at the moment anyway.
*/
- if (!sync && !force_flush) {
- if (!spin_trylock(&purge_lock))
- return;
- } else
- spin_lock(&purge_lock);
-
if (sync)
purge_fragmented_blocks_allcpus();
@@ -616,9 +605,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
}
- if (nr)
- atomic_sub(nr, &vmap_lazy_nr);
-
if (nr || force_flush)
flush_tlb_kernel_range(*start, *end);
@@ -627,8 +613,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
llist_for_each_entry_safe(va, n_va, valist, purge_list)
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
+
+ atomic_sub(nr, &vmap_lazy_nr);
}
- spin_unlock(&purge_lock);
}
/*
@@ -2370,7 +2357,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
struct vm_struct **vms;
int area, area2, last_area, term_area;
unsigned long base, start, end, last_end;
- bool purged = false;
/* verify parameters and allocate data structures */
BUG_ON(offset_in_page(align) || !is_power_of_2(align));
@@ -2439,9 +2425,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
*/
if (base + last_end < vmalloc_start + last_end) {
spin_unlock(&vmap_area_lock);
- if (!purged) {
+ if (atomic_read(&vmap_lazy_nr)) {
purge_vmap_area_lazy();
- purged = true;
goto retry;
}
goto err_free;
--
2.10.2
More information about the Intel-gfx-trybot
mailing list