aboutsummaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c899
1 files changed, 551 insertions, 348 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a3d66b3dc5c..f64632b6719 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -26,12 +26,32 @@
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/llist.h>
+
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
-bool vmap_lazy_unmap __read_mostly = true;
+struct vfree_deferred {
+ struct llist_head list;
+ struct work_struct wq;
+};
+static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
+
+static void __vunmap(const void *, int);
+
+static void free_work(struct work_struct *w)
+{
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+ struct llist_node *llnode = llist_del_all(&p->list);
+ while (llnode) {
+ void *p = llnode;
+ llnode = llist_next(llnode);
+ __vunmap(p, 1);
+ }
+}
/*** Page table manipulation functions ***/
@@ -251,20 +271,17 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
#define VM_LAZY_FREEING 0x02
#define VM_VM_AREA 0x04
-struct vmap_area {
- unsigned long va_start;
- unsigned long va_end;
- unsigned long flags;
- struct rb_node rb_node; /* address sorted rbtree */
- struct list_head list; /* address sorted list */
- struct list_head purge_list; /* "lazy purge" list */
- void *private;
- struct rcu_head rcu_head;
-};
-
static DEFINE_SPINLOCK(vmap_area_lock);
+/* Export for kexec only */
+LIST_HEAD(vmap_area_list);
static struct rb_root vmap_area_root = RB_ROOT;
-static LIST_HEAD(vmap_area_list);
+
+/* The vmap cache globals are protected by vmap_area_lock */
+static struct rb_node *free_vmap_cache;
+static unsigned long cached_hole_size;
+static unsigned long cached_vstart;
+static unsigned long cached_align;
+
static unsigned long vmap_area_pcpu_hole;
static struct vmap_area *__find_vmap_area(unsigned long addr)
@@ -277,7 +294,7 @@ static struct vmap_area *__find_vmap_area(unsigned long addr)
va = rb_entry(n, struct vmap_area, rb_node);
if (addr < va->va_start)
n = n->rb_left;
- else if (addr > va->va_start)
+ else if (addr >= va->va_end)
n = n->rb_right;
else
return va;
@@ -308,7 +325,7 @@ static void __insert_vmap_area(struct vmap_area *va)
rb_link_node(&va->rb_node, parent, p);
rb_insert_color(&va->rb_node, &vmap_area_root);
- /* address-sort this list so it is usable like the vmlist */
+ /* address-sort this list */
tmp = rb_prev(&va->rb_node);
if (tmp) {
struct vmap_area *prev;
@@ -333,101 +350,145 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
struct rb_node *n;
unsigned long addr;
int purged = 0;
+ struct vmap_area *first;
BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
+ BUG_ON(!is_power_of_2(align));
va = kmalloc_node(sizeof(struct vmap_area),
gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
-retry:
- addr = ALIGN(vstart, align);
+ /*
+ * Only scan the relevant parts containing pointers to other objects
+ * to avoid false negatives.
+ */
+ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+retry:
spin_lock(&vmap_area_lock);
- if (addr + size - 1 < addr)
- goto overflow;
+ /*
+ * Invalidate cache if we have more permissive parameters.
+ * cached_hole_size notes the largest hole noticed _below_
+ * the vmap_area cached in free_vmap_cache: if size fits
+ * into that hole, we want to scan from vstart to reuse
+ * the hole instead of allocating above free_vmap_cache.
+ * Note that __free_vmap_area may update free_vmap_cache
+ * without updating cached_hole_size or cached_align.
+ */
+ if (!free_vmap_cache ||
+ size < cached_hole_size ||
+ vstart < cached_vstart ||
+ align < cached_align) {
+nocache:
+ cached_hole_size = 0;
+ free_vmap_cache = NULL;
+ }
+ /* record if we encounter less permissive parameters */
+ cached_vstart = vstart;
+ cached_align = align;
+
+ /* find starting point for our search */
+ if (free_vmap_cache) {
+ first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
+ addr = ALIGN(first->va_end, align);
+ if (addr < vstart)
+ goto nocache;
+ if (addr + size < addr)
+ goto overflow;
+
+ } else {
+ addr = ALIGN(vstart, align);
+ if (addr + size < addr)
+ goto overflow;
- /* XXX: could have a last_hole cache */
- n = vmap_area_root.rb_node;
- if (n) {
- struct vmap_area *first = NULL;
+ n = vmap_area_root.rb_node;
+ first = NULL;
- do {
+ while (n) {
struct vmap_area *tmp;
tmp = rb_entry(n, struct vmap_area, rb_node);
if (tmp->va_end >= addr) {
- if (!first && tmp->va_start < addr + size)
- first = tmp;
- n = n->rb_left;
- } else {
first = tmp;
+ if (tmp->va_start <= addr)
+ break;
+ n = n->rb_left;
+ } else
n = n->rb_right;
- }
- } while (n);
+ }
if (!first)
goto found;
+ }
- if (first->va_end < addr) {
- n = rb_next(&first->rb_node);
- if (n)
- first = rb_entry(n, struct vmap_area, rb_node);
- else
- goto found;
- }
+ /* from the starting point, walk areas until a suitable hole is found */
+ while (addr + size > first->va_start && addr + size <= vend) {
+ if (addr + cached_hole_size < first->va_start)
+ cached_hole_size = first->va_start - addr;
+ addr = ALIGN(first->va_end, align);
+ if (addr + size < addr)
+ goto overflow;
- while (addr + size > first->va_start && addr + size <= vend) {
- addr = ALIGN(first->va_end + PAGE_SIZE, align);
- if (addr + size - 1 < addr)
- goto overflow;
+ if (list_is_last(&first->list, &vmap_area_list))
+ goto found;
- n = rb_next(&first->rb_node);
- if (n)
- first = rb_entry(n, struct vmap_area, rb_node);
- else
- goto found;
- }
- }
-found:
- if (addr + size > vend) {
-overflow:
- spin_unlock(&vmap_area_lock);
- if (!purged) {
- purge_vmap_area_lazy();
- purged = 1;
- goto retry;
- }
- if (printk_ratelimit())
- printk(KERN_WARNING
- "vmap allocation for size %lu failed: "
- "use vmalloc=<size> to increase size.\n", size);
- kfree(va);
- return ERR_PTR(-EBUSY);
+ first = list_entry(first->list.next,
+ struct vmap_area, list);
}
- BUG_ON(addr & (align-1));
+found:
+ if (addr + size > vend)
+ goto overflow;
va->va_start = addr;
va->va_end = addr + size;
va->flags = 0;
__insert_vmap_area(va);
+ free_vmap_cache = &va->rb_node;
spin_unlock(&vmap_area_lock);
- return va;
-}
+ BUG_ON(va->va_start & (align-1));
+ BUG_ON(va->va_start < vstart);
+ BUG_ON(va->va_end > vend);
-static void rcu_free_va(struct rcu_head *head)
-{
- struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
+ return va;
+overflow:
+ spin_unlock(&vmap_area_lock);
+ if (!purged) {
+ purge_vmap_area_lazy();
+ purged = 1;
+ goto retry;
+ }
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "vmap allocation for size %lu failed: "
+ "use vmalloc=<size> to increase size.\n", size);
kfree(va);
+ return ERR_PTR(-EBUSY);
}
static void __free_vmap_area(struct vmap_area *va)
{
BUG_ON(RB_EMPTY_NODE(&va->rb_node));
+
+ if (free_vmap_cache) {
+ if (va->va_end < cached_vstart) {
+ free_vmap_cache = NULL;
+ } else {
+ struct vmap_area *cache;
+ cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
+ if (va->va_start <= cache->va_start) {
+ free_vmap_cache = rb_prev(&va->rb_node);
+ /*
+ * We don't try to update cached_hole_size or
+ * cached_align, but it won't go very wrong.
+ */
+ }
+ }
+ }
rb_erase(&va->rb_node, &vmap_area_root);
RB_CLEAR_NODE(&va->rb_node);
list_del_rcu(&va->list);
@@ -441,7 +502,7 @@ static void __free_vmap_area(struct vmap_area *va)
if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
- call_rcu(&va->rcu_head, rcu_free_va);
+ kfree_rcu(va, rcu_head);
}
/*
@@ -503,9 +564,6 @@ static unsigned long lazy_max_pages(void)
{
unsigned int log;
- if (!vmap_lazy_unmap)
- return 0;
-
log = fls(num_online_cpus());
return log * (32UL * 1024 * 1024 / PAGE_SIZE);
@@ -566,7 +624,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (va->va_end > *end)
*end = va->va_end;
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
- unmap_vmap_area(va);
list_add_tail(&va->purge_list, &valist);
va->flags |= VM_LAZY_FREEING;
va->flags &= ~VM_LAZY_FREE;
@@ -611,10 +668,11 @@ static void purge_vmap_area_lazy(void)
}
/*
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
- * called for the correct range previously.
+ * Free a vmap area, caller ensuring that the area has been unmapped
+ * and flush_cache_vunmap had been called for the correct range
+ * previously.
*/
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
+static void free_vmap_area_noflush(struct vmap_area *va)
{
va->flags |= VM_LAZY_FREE;
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
@@ -623,6 +681,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va)
}
/*
+ * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
+ * called for the correct range previously.
+ */
+static void free_unmap_vmap_area_noflush(struct vmap_area *va)
+{
+ unmap_vmap_area(va);
+ free_vmap_area_noflush(va);
+}
+
+/*
* Free and unmap a vmap area
*/
static void free_unmap_vmap_area(struct vmap_area *va)
@@ -675,9 +743,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
-#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
- VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
- VMALLOC_PAGES / NR_CPUS / 16))
+#define VMAP_BBMAP_BITS \
+ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
+ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
+ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
@@ -691,9 +760,7 @@ struct vmap_block_queue {
struct vmap_block {
spinlock_t lock;
struct vmap_area *va;
- struct vmap_block_queue *vbq;
unsigned long free, dirty;
- DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
struct list_head free_list;
struct rcu_head rcu_head;
@@ -743,7 +810,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
VMALLOC_START, VMALLOC_END,
node, gfp_mask);
- if (unlikely(IS_ERR(va))) {
+ if (IS_ERR(va)) {
kfree(vb);
return ERR_CAST(va);
}
@@ -759,7 +826,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
vb->va = va;
vb->free = VMAP_BBMAP_BITS;
vb->dirty = 0;
- bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
INIT_LIST_HEAD(&vb->free_list);
@@ -771,7 +837,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
radix_tree_preload_end();
vbq = &get_cpu_var(vmap_block_queue);
- vb->vbq = vbq;
spin_lock(&vbq->lock);
list_add_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -780,13 +845,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
return vb;
}
-static void rcu_free_vb(struct rcu_head *head)
-{
- struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
-
- kfree(vb);
-}
-
static void free_vmap_block(struct vmap_block *vb)
{
struct vmap_block *tmp;
@@ -798,8 +856,8 @@ static void free_vmap_block(struct vmap_block *vb)
spin_unlock(&vmap_block_tree_lock);
BUG_ON(tmp != vb);
- free_unmap_vmap_area_noflush(vb->va);
- call_rcu(&vb->rcu_head, rcu_free_vb);
+ free_vmap_area_noflush(vb->va);
+ kfree_rcu(vb, rcu_head);
}
static void purge_fragmented_blocks(int cpu)
@@ -819,7 +877,6 @@ static void purge_fragmented_blocks(int cpu)
if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
vb->free = 0; /* prevent further allocs after releasing lock */
vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
- bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list);
@@ -837,11 +894,6 @@ static void purge_fragmented_blocks(int cpu)
}
}
-static void purge_fragmented_blocks_thiscpu(void)
-{
- purge_fragmented_blocks(smp_processor_id());
-}
-
static void purge_fragmented_blocks_allcpus(void)
{
int cpu;
@@ -856,10 +908,17 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
unsigned long addr = 0;
unsigned int order;
- int purge = 0;
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+ if (WARN_ON(size == 0)) {
+ /*
+ * Allocating 0 bytes isn't what caller wants since
+ * get_order(0) returns funny result. Just warn and terminate
+ * early.
+ */
+ return NULL;
+ }
order = get_order(size);
again:
@@ -872,17 +931,7 @@ again:
if (vb->free < 1UL << order)
goto next;
- i = bitmap_find_free_region(vb->alloc_map,
- VMAP_BBMAP_BITS, order);
-
- if (i < 0) {
- if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
- /* fragmented and no outstanding allocations */
- BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
- purge = 1;
- }
- goto next;
- }
+ i = VMAP_BBMAP_BITS - vb->free;
addr = vb->va->va_start + (i << PAGE_SHIFT);
BUG_ON(addr_to_vb_idx(addr) !=
addr_to_vb_idx(vb->va->va_start));
@@ -898,9 +947,6 @@ next:
spin_unlock(&vb->lock);
}
- if (purge)
- purge_fragmented_blocks_thiscpu();
-
put_cpu_var(vmap_block_queue);
rcu_read_unlock();
@@ -936,6 +982,8 @@ static void vb_free(const void *addr, unsigned long size)
rcu_read_unlock();
BUG_ON(!vb);
+ vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
+
spin_lock(&vb->lock);
BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
@@ -976,29 +1024,25 @@ void vm_unmap_aliases(void)
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- int i;
+ int i, j;
spin_lock(&vb->lock);
i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
- while (i < VMAP_BBMAP_BITS) {
+ if (i < VMAP_BBMAP_BITS) {
unsigned long s, e;
- int j;
- j = find_next_zero_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
+
+ j = find_last_bit(vb->dirty_map,
+ VMAP_BBMAP_BITS);
+ j = j + 1; /* need exclusive index */
s = vb->va->va_start + (i << PAGE_SHIFT);
e = vb->va->va_start + (j << PAGE_SHIFT);
- vunmap_page_range(s, e);
flush = 1;
if (s < start)
start = s;
if (e > end)
end = e;
-
- i = j;
- i = find_next_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
}
spin_unlock(&vb->lock);
}
@@ -1041,6 +1085,12 @@ EXPORT_SYMBOL(vm_unmap_ram);
* @node: prefer to allocate data structures on this node
* @prot: memory protection to use. PAGE_KERNEL for regular RAM
*
+ * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
+ * faster than vmap so it's good. But if you mix long-life and short-life
+ * objects with vm_map_ram(), it could consume lots of address space through
+ * fragmentation (especially on a 32bit machine). You could see failures in
+ * the end. Please use this function for short-lived objects.
+ *
* Returns: a pointer to the address that has been mapped, or %NULL on failure
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
@@ -1072,6 +1122,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
}
EXPORT_SYMBOL(vm_map_ram);
+static struct vm_struct *vmlist __initdata;
+/**
+ * vm_area_add_early - add vmap area early during boot
+ * @vm: vm_struct to add
+ *
+ * This function is used to add fixed kernel vm area to vmlist before
+ * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
+ * should contain proper values and the other fields should be zero.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_add_early(struct vm_struct *vm)
+{
+ struct vm_struct *tmp, **p;
+
+ BUG_ON(vmap_initialized);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr) {
+ BUG_ON(tmp->addr < vm->addr + vm->size);
+ break;
+ } else
+ BUG_ON(tmp->addr + tmp->size > vm->addr);
+ }
+ vm->next = *p;
+ *p = vm;
+}
+
/**
* vm_area_register_early - register vmap area early during boot
* @vm: vm_struct to register
@@ -1094,8 +1171,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
vm->addr = (void *)addr;
- vm->next = vmlist;
- vmlist = vm;
+ vm_area_add_early(vm);
}
void __init vmalloc_init(void)
@@ -1106,18 +1182,23 @@ void __init vmalloc_init(void)
for_each_possible_cpu(i) {
struct vmap_block_queue *vbq;
+ struct vfree_deferred *p;
vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free);
+ p = &per_cpu(vfree_deferred, i);
+ init_llist_head(&p->list);
+ INIT_WORK(&p->wq, free_work);
}
/* Import existing vmlist entries. */
for (tmp = vmlist; tmp; tmp = tmp->next) {
va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
- va->flags = tmp->flags | VM_VM_AREA;
+ va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
+ va->vm = tmp;
__insert_vmap_area(va);
}
@@ -1169,6 +1250,7 @@ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
vunmap_page_range(addr, addr + size);
}
+EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
/**
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
@@ -1186,11 +1268,12 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
vunmap_page_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
+EXPORT_SYMBOL_GPL(unmap_kernel_range);
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
unsigned long addr = (unsigned long)area->addr;
- unsigned long end = addr + area->size - PAGE_SIZE;
+ unsigned long end = addr + get_vm_area_size(area);
int err;
err = vmap_page_range(addr, end, prot, *pages);
@@ -1203,50 +1286,40 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
}
EXPORT_SYMBOL_GPL(map_vm_area);
-/*** Old vmalloc interfaces ***/
-DEFINE_RWLOCK(vmlist_lock);
-struct vm_struct *vmlist;
-
-static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+ unsigned long flags, const void *caller)
{
- struct vm_struct *tmp, **p;
-
+ spin_lock(&vmap_area_lock);
vm->flags = flags;
vm->addr = (void *)va->va_start;
vm->size = va->va_end - va->va_start;
vm->caller = caller;
- va->private = vm;
+ va->vm = vm;
va->flags |= VM_VM_AREA;
+ spin_unlock(&vmap_area_lock);
+}
- write_lock(&vmlist_lock);
- for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
- if (tmp->addr >= vm->addr)
- break;
- }
- vm->next = *p;
- *p = vm;
- write_unlock(&vmlist_lock);
+static void clear_vm_uninitialized_flag(struct vm_struct *vm)
+{
+ /*
+ * Before removing VM_UNINITIALIZED,
+ * we should make sure that vm has proper values.
+ * Pair with smp_rmb() in show_numa_info().
+ */
+ smp_wmb();
+ vm->flags &= ~VM_UNINITIALIZED;
}
static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
- unsigned long end, int node, gfp_t gfp_mask, void *caller)
+ unsigned long end, int node, gfp_t gfp_mask, const void *caller)
{
- static struct vmap_area *va;
+ struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());
- if (flags & VM_IOREMAP) {
- int bit = fls(size);
-
- if (bit > IOREMAP_MAX_ORDER)
- bit = IOREMAP_MAX_ORDER;
- else if (bit < PAGE_SHIFT)
- bit = PAGE_SHIFT;
-
- align = 1ul << bit;
- }
+ if (flags & VM_IOREMAP)
+ align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
size = PAGE_ALIGN(size);
if (unlikely(!size))
@@ -1267,24 +1340,25 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
}
- insert_vmalloc_vm(area, va, flags, caller);
+ setup_vmalloc_vm(area, va, flags, caller);
+
return area;
}
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
- __builtin_return_address(0));
+ return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
+ GFP_KERNEL, __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(__get_vm_area);
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
- void *caller)
+ const void *caller)
{
- return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
- caller);
+ return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
+ GFP_KERNEL, caller);
}
/**
@@ -1299,30 +1373,32 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
- -1, GFP_KERNEL, __builtin_return_address(0));
+ NUMA_NO_NODE, GFP_KERNEL,
+ __builtin_return_address(0));
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
- void *caller)
-{
- return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
- -1, GFP_KERNEL, caller);
-}
-
-struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
- int node, gfp_t gfp_mask)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
- node, gfp_mask, __builtin_return_address(0));
+ NUMA_NO_NODE, GFP_KERNEL, caller);
}
-static struct vm_struct *find_vm_area(const void *addr)
+/**
+ * find_vm_area - find a continuous kernel virtual area
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and return it.
+ * It is up to the caller to do all required locking to keep the returned
+ * pointer valid.
+ */
+struct vm_struct *find_vm_area(const void *addr)
{
struct vmap_area *va;
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA)
- return va->private;
+ return va->vm;
return NULL;
}
@@ -1341,18 +1417,12 @@ struct vm_struct *remove_vm_area(const void *addr)
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA) {
- struct vm_struct *vm = va->private;
- struct vm_struct *tmp, **p;
- /*
- * remove from list and disallow access to this vm_struct
- * before unmap. (address range confliction is maintained by
- * vmap.)
- */
- write_lock(&vmlist_lock);
- for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
- ;
- *p = tmp->next;
- write_unlock(&vmlist_lock);
+ struct vm_struct *vm = va->vm;
+
+ spin_lock(&vmap_area_lock);
+ va->vm = NULL;
+ va->flags &= ~VM_VM_AREA;
+ spin_unlock(&vmap_area_lock);
vmap_debug_free_range(va->va_start, va->va_end);
free_unmap_vmap_area(va);
@@ -1370,10 +1440,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
if (!addr)
return;
- if ((PAGE_SIZE-1) & (unsigned long)addr) {
- WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
+ if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
+ addr))
return;
- }
area = remove_vm_area(addr);
if (unlikely(!area)) {
@@ -1404,7 +1473,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
kfree(area);
return;
}
-
+
/**
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
@@ -1413,15 +1482,26 @@ static void __vunmap(const void *addr, int deallocate_pages)
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
* NULL, no operation is performed.
*
- * Must not be called in interrupt context.
+ * Must not be called in NMI context (strictly speaking, only if we don't
+ * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
+ * conventions for vfree() arch-depenedent would be a really bad idea)
+ *
+ * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
*/
void vfree(const void *addr)
{
- BUG_ON(in_interrupt());
+ BUG_ON(in_nmi());
kmemleak_free(addr);
- __vunmap(addr, 1);
+ if (!addr)
+ return;
+ if (unlikely(in_interrupt())) {
+ struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
+ } else
+ __vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);
@@ -1438,7 +1518,8 @@ void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
might_sleep();
- __vunmap(addr, 0);
+ if (addr)
+ __vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);
@@ -1478,28 +1559,28 @@ EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller);
+ int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node)
{
+ const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
+ nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
- PAGE_KERNEL, node, caller);
+ PAGE_KERNEL, node, area->caller);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
- area->caller = caller;
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
@@ -1508,11 +1589,12 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
+ gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
- if (node < 0)
- page = alloc_page(gfp_mask);
+ if (node == NUMA_NO_NODE)
+ page = alloc_page(tmp_mask);
else
- page = alloc_pages_node(node, gfp_mask, 0);
+ page = alloc_pages_node(node, tmp_mask, order);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
@@ -1527,41 +1609,31 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
return area->addr;
fail:
+ warn_alloc_failed(gfp_mask, order,
+ "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
+ (area->nr_pages*PAGE_SIZE), area->size);
vfree(area->addr);
return NULL;
}
-void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
-{
- void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
- __builtin_return_address(0));
-
- /*
- * A ref_count = 3 is needed because the vm_struct and vmap_area
- * structures allocated in the __get_vm_area_node() function contain
- * references to the virtual address of the vmalloc'ed block.
- */
- kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
-
- return addr;
-}
-
/**
- * __vmalloc_node - allocate virtually contiguous memory
+ * __vmalloc_node_range - allocate virtually contiguous memory
* @size: allocation size
* @align: desired alignment
+ * @start: vm area range start
+ * @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
- * @node: node to use for allocation or -1
+ * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
-static void *__vmalloc_node(unsigned long size, unsigned long align,
- gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller)
+void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+ pgprot_t prot, int node, const void *caller)
{
struct vm_struct *area;
void *addr;
@@ -1569,29 +1641,64 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
- return NULL;
-
- area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
- VMALLOC_END, node, gfp_mask, caller);
+ goto fail;
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
+ start, end, node, gfp_mask, caller);
if (!area)
+ goto fail;
+
+ addr = __vmalloc_area_node(area, gfp_mask, prot, node);
+ if (!addr)
return NULL;
- addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ /*
+ * In this function, newly allocated vm_struct has VM_UNINITIALIZED
+ * flag. It means that vm_struct is not fully initialized.
+ * Now, it is fully initialized, so remove this flag here.
+ */
+ clear_vm_uninitialized_flag(area);
/*
- * A ref_count = 3 is needed because the vm_struct and vmap_area
- * structures allocated in the __get_vm_area_node() function contain
- * references to the virtual address of the vmalloc'ed block.
+ * A ref_count = 2 is needed because vm_struct allocated in
+ * __get_vm_area_node() contains a reference to the virtual address of
+ * the vmalloc'ed block.
*/
- kmemleak_alloc(addr, real_size, 3, gfp_mask);
+ kmemleak_alloc(addr, real_size, 2, gfp_mask);
return addr;
+
+fail:
+ warn_alloc_failed(gfp_mask, 0,
+ "vmalloc: allocation failure: %lu bytes\n",
+ real_size);
+ return NULL;
+}
+
+/**
+ * __vmalloc_node - allocate virtually contiguous memory
+ * @size: allocation size
+ * @align: desired alignment
+ * @gfp_mask: flags for the page level allocator
+ * @prot: protection mask for the allocated pages
+ * @node: node to use for allocation or NUMA_NO_NODE
+ * @caller: caller's return address
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator with @gfp_mask flags. Map them into contiguous
+ * kernel virtual space, using a pagetable protection of @prot.
+ */
+static void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
+ int node, const void *caller)
+{
+ return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+ gfp_mask, prot, node, caller);
}
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_node(size, 1, gfp_mask, prot, -1,
+ return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);
@@ -1614,7 +1721,8 @@ static inline void *__vmalloc_node_flags(unsigned long size,
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
+ return __vmalloc_node_flags(size, NUMA_NO_NODE,
+ GFP_KERNEL | __GFP_HIGHMEM);
}
EXPORT_SYMBOL(vmalloc);
@@ -1630,7 +1738,7 @@ EXPORT_SYMBOL(vmalloc);
*/
void *vzalloc(unsigned long size)
{
- return __vmalloc_node_flags(size, -1,
+ return __vmalloc_node_flags(size, NUMA_NO_NODE,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);
@@ -1649,7 +1757,8 @@ void *vmalloc_user(unsigned long size)
ret = __vmalloc_node(size, SHMLBA,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
- PAGE_KERNEL, -1, __builtin_return_address(0));
+ PAGE_KERNEL, NUMA_NO_NODE,
+ __builtin_return_address(0));
if (ret) {
area = find_vm_area(ret);
area->flags |= VM_USERMAP;
@@ -1714,7 +1823,7 @@ EXPORT_SYMBOL(vzalloc_node);
void *vmalloc_exec(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
}
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
@@ -1735,7 +1844,7 @@ void *vmalloc_exec(unsigned long size)
void *vmalloc_32(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_32);
@@ -1752,7 +1861,7 @@ void *vmalloc_32_user(unsigned long size)
void *ret;
ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
if (ret) {
area = find_vm_area(ret);
area->flags |= VM_USERMAP;
@@ -1791,9 +1900,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
* we can expect USER0 is not used (see vread/vwrite's
* function description)
*/
- void *map = kmap_atomic(p, KM_USER0);
+ void *map = kmap_atomic(p);
memcpy(buf, map + offset, length);
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
} else
memset(buf, 0, length);
@@ -1830,9 +1939,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* we can expect USER0 is not used (see vread/vwrite's
* function description)
*/
- void *map = kmap_atomic(p, KM_USER0);
+ void *map = kmap_atomic(p);
memcpy(map + offset, buf, length);
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
}
addr += length;
buf += length;
@@ -1859,9 +1968,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0.
- * @buf should be kernel's buffer. Because this function uses KM_USER0,
- * the caller should guarantee KM_USER0 is not used.
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
@@ -1872,7 +1979,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
long vread(char *buf, char *addr, unsigned long count)
{
- struct vm_struct *tmp;
+ struct vmap_area *va;
+ struct vm_struct *vm;
char *vaddr, *buf_start = buf;
unsigned long buflen = count;
unsigned long n;
@@ -1881,10 +1989,17 @@ long vread(char *buf, char *addr, unsigned long count)
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
- read_lock(&vmlist_lock);
- for (tmp = vmlist; count && tmp; tmp = tmp->next) {
- vaddr = (char *) tmp->addr;
- if (addr >= vaddr + tmp->size - PAGE_SIZE)
+ spin_lock(&vmap_area_lock);
+ list_for_each_entry(va, &vmap_area_list, list) {
+ if (!count)
+ break;
+
+ if (!(va->flags & VM_VM_AREA))
+ continue;
+
+ vm = va->vm;
+ vaddr = (char *) vm->addr;
+ if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
@@ -1894,10 +2009,10 @@ long vread(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + tmp->size - PAGE_SIZE - addr;
+ n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
- if (!(tmp->flags & VM_IOREMAP))
+ if (!(vm->flags & VM_IOREMAP))
aligned_vread(buf, addr, n);
else /* IOREMAP area is treated as memory hole */
memset(buf, 0, n);
@@ -1906,7 +2021,7 @@ long vread(char *buf, char *addr, unsigned long count)
count -= n;
}
finished:
- read_unlock(&vmlist_lock);
+ spin_unlock(&vmap_area_lock);
if (buf == buf_start)
return 0;
@@ -1935,21 +2050,18 @@ finished:
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0.
- * @buf should be kernel's buffer. Because this function uses KM_USER0,
- * the caller should guarantee KM_USER0 is not used.
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vwrite() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
* any informaion, as /dev/kmem.
- *
- * The caller should guarantee KM_USER1 is not used.
*/
long vwrite(char *buf, char *addr, unsigned long count)
{
- struct vm_struct *tmp;
+ struct vmap_area *va;
+ struct vm_struct *vm;
char *vaddr;
unsigned long n, buflen;
int copied = 0;
@@ -1959,10 +2071,17 @@ long vwrite(char *buf, char *addr, unsigned long count)
count = -(unsigned long) addr;
buflen = count;
- read_lock(&vmlist_lock);
- for (tmp = vmlist; count && tmp; tmp = tmp->next) {
- vaddr = (char *) tmp->addr;
- if (addr >= vaddr + tmp->size - PAGE_SIZE)
+ spin_lock(&vmap_area_lock);
+ list_for_each_entry(va, &vmap_area_list, list) {
+ if (!count)
+ break;
+
+ if (!(va->flags & VM_VM_AREA))
+ continue;
+
+ vm = va->vm;
+ vaddr = (char *) vm->addr;
+ if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
@@ -1971,10 +2090,10 @@ long vwrite(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + tmp->size - PAGE_SIZE - addr;
+ n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
- if (!(tmp->flags & VM_IOREMAP)) {
+ if (!(vm->flags & VM_IOREMAP)) {
aligned_vwrite(buf, addr, n);
copied++;
}
@@ -1983,49 +2102,50 @@ long vwrite(char *buf, char *addr, unsigned long count)
count -= n;
}
finished:
- read_unlock(&vmlist_lock);
+ spin_unlock(&vmap_area_lock);
if (!copied)
return 0;
return buflen;
}
/**
- * remap_vmalloc_range - map vmalloc pages to userspace
- * @vma: vma to cover (map full range of vma)
- * @addr: vmalloc memory
- * @pgoff: number of pages into addr before first page to map
+ * remap_vmalloc_range_partial - map vmalloc pages to userspace
+ * @vma: vma to cover
+ * @uaddr: target user address to start at
+ * @kaddr: virtual address of vmalloc kernel memory
+ * @size: size of map area
*
* Returns: 0 for success, -Exxx on failure
*
- * This function checks that addr is a valid vmalloc'ed area, and
- * that it is big enough to cover the vma. Will return failure if
- * that criteria isn't met.
+ * This function checks that @kaddr is a valid vmalloc'ed area,
+ * and that it is big enough to cover the range starting at
+ * @uaddr in @vma. Will return failure if that criteria isn't
+ * met.
*
* Similar to remap_pfn_range() (see mm/memory.c)
*/
-int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
- unsigned long pgoff)
+int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+ void *kaddr, unsigned long size)
{
struct vm_struct *area;
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- if ((PAGE_SIZE-1) & (unsigned long)addr)
+ size = PAGE_ALIGN(size);
+
+ if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
return -EINVAL;
- area = find_vm_area(addr);
+ area = find_vm_area(kaddr);
if (!area)
return -EINVAL;
if (!(area->flags & VM_USERMAP))
return -EINVAL;
- if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
+ if (kaddr + size > area->addr + area->size)
return -EINVAL;
- addr += pgoff << PAGE_SHIFT;
do {
- struct page *page = vmalloc_to_page(addr);
+ struct page *page = vmalloc_to_page(kaddr);
int ret;
ret = vm_insert_page(vma, uaddr, page);
@@ -2033,45 +2153,74 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
return ret;
uaddr += PAGE_SIZE;
- addr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
+ kaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ } while (size > 0);
- /* Prevent "things" like memory migration? VM_flags need a cleanup... */
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
+EXPORT_SYMBOL(remap_vmalloc_range_partial);
+
+/**
+ * remap_vmalloc_range - map vmalloc pages to userspace
+ * @vma: vma to cover (map full range of vma)
+ * @addr: vmalloc memory
+ * @pgoff: number of pages into addr before first page to map
+ *
+ * Returns: 0 for success, -Exxx on failure
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * that it is big enough to cover the vma. Will return failure if
+ * that criteria isn't met.
+ *
+ * Similar to remap_pfn_range() (see mm/memory.c)
+ */
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+{
+ return remap_vmalloc_range_partial(vma, vma->vm_start,
+ addr + (pgoff << PAGE_SHIFT),
+ vma->vm_end - vma->vm_start);
+}
EXPORT_SYMBOL(remap_vmalloc_range);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
*/
-void __attribute__((weak)) vmalloc_sync_all(void)
+void __weak vmalloc_sync_all(void)
{
}
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
- /* apply_to_page_range() does all the hard work. */
+ pte_t ***p = data;
+
+ if (p) {
+ *(*p) = pte;
+ (*p)++;
+ }
return 0;
}
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
+ * @ptes: returns the PTEs for the address space
*
* Returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings
- * are created. If the kernel address space is not shared
- * between processes, it syncs the pagetable across all
- * processes.
+ * are created.
+ *
+ * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
+ * allocated for the VM area are returned.
*/
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{
struct vm_struct *area;
@@ -2085,15 +2234,11 @@ struct vm_struct *alloc_vm_area(size_t size)
* of kernel virtual address space and mapped into init_mm.
*/
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
- area->size, f, NULL)) {
+ size, f, ptes ? &ptes : NULL)) {
free_vm_area(area);
return NULL;
}
- /* Make sure the pagetables are constructed in process kernel
- mappings */
- vmalloc_sync_all();
-
return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);
@@ -2197,17 +2342,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
* @sizes: array containing size of each area
* @nr_vms: the number of areas to allocate
* @align: alignment, all entries in @offsets and @sizes must be aligned to this
- * @gfp_mask: allocation mask
*
* Returns: kmalloc'd vm_struct pointer array pointing to allocated
* vm_structs on success, %NULL on failure
*
* Percpu allocator wants to use congruent vm areas so that it can
* maintain the offsets among percpu areas. This function allocates
- * congruent vmalloc areas for it. These areas tend to be scattered
- * pretty far, distance between two areas easily going up to
- * gigabytes. To avoid interacting with regular vmallocs, these areas
- * are allocated from top.
+ * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
+ * be scattered pretty far, distance between two areas easily going up
+ * to gigabytes. To avoid interacting with regular vmallocs, these
+ * areas are allocated from top.
*
* Despite its complicated look, this allocator is rather simple. It
* does everything top-down and scans areas from the end looking for
@@ -2218,7 +2362,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
*/
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
- size_t align, gfp_t gfp_mask)
+ size_t align)
{
const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
@@ -2228,8 +2372,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
unsigned long base, start, end, last_end;
bool purged = false;
- gfp_mask &= GFP_RECLAIM_MASK;
-
/* verify parameters and allocate data structures */
BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
for (last_area = 0, area = 0; area < nr_vms; area++) {
@@ -2262,14 +2404,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
- vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask);
- vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask);
+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
if (!vas || !vms)
- goto err_free;
+ goto err_free2;
for (area = 0; area < nr_vms; area++) {
- vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask);
- vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask);
+ vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
+ vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
if (!vas[area] || !vms[area])
goto err_free;
}
@@ -2355,19 +2497,18 @@ found:
/* insert all vm's */
for (area = 0; area < nr_vms; area++)
- insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
- pcpu_get_vm_areas);
+ setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
+ pcpu_get_vm_areas);
kfree(vas);
return vms;
err_free:
for (area = 0; area < nr_vms; area++) {
- if (vas)
- kfree(vas[area]);
- if (vms)
- kfree(vms[area]);
+ kfree(vas[area]);
+ kfree(vms[area]);
}
+err_free2:
kfree(vas);
kfree(vms);
return NULL;
@@ -2392,19 +2533,19 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
- __acquires(&vmlist_lock)
+ __acquires(&vmap_area_lock)
{
loff_t n = *pos;
- struct vm_struct *v;
+ struct vmap_area *va;
- read_lock(&vmlist_lock);
- v = vmlist;
- while (n > 0 && v) {
+ spin_lock(&vmap_area_lock);
+ va = list_entry((&vmap_area_list)->next, typeof(*va), list);
+ while (n > 0 && &va->list != &vmap_area_list) {
n--;
- v = v->next;
+ va = list_entry(va->list.next, typeof(*va), list);
}
- if (!n)
- return v;
+ if (!n && &va->list != &vmap_area_list)
+ return va;
return NULL;
@@ -2412,26 +2553,35 @@ static void *s_start(struct seq_file *m, loff_t *pos)
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- struct vm_struct *v = p;
+ struct vmap_area *va = p, *next;
++*pos;
- return v->next;
+ next = list_entry(va->list.next, typeof(*va), list);
+ if (&next->list != &vmap_area_list)
+ return next;
+
+ return NULL;
}
static void s_stop(struct seq_file *m, void *p)
- __releases(&vmlist_lock)
+ __releases(&vmap_area_lock)
{
- read_unlock(&vmlist_lock);
+ spin_unlock(&vmap_area_lock);
}
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
{
- if (NUMA_BUILD) {
+ if (IS_ENABLED(CONFIG_NUMA)) {
unsigned int nr, *counters = m->private;
if (!counters)
return;
+ /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+ smp_rmb();
+ if (v->flags & VM_UNINITIALIZED)
+ return;
+
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
for (nr = 0; nr < v->nr_pages; nr++)
@@ -2445,18 +2595,23 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
static int s_show(struct seq_file *m, void *p)
{
- struct vm_struct *v = p;
+ struct vmap_area *va = p;
+ struct vm_struct *v;
- seq_printf(m, "0x%p-0x%p %7ld",
- v->addr, v->addr + v->size, v->size);
+ /*
+ * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
+ * behalf of vmap area is being tear down or vm_map_ram allocation.
+ */
+ if (!(va->flags & VM_VM_AREA))
+ return 0;
- if (v->caller) {
- char buff[KSYM_SYMBOL_LEN];
+ v = va->vm;
- seq_putc(m, ' ');
- sprint_symbol(buff, (unsigned long)v->caller);
- seq_puts(m, buff);
- }
+ seq_printf(m, "0x%pK-0x%pK %7ld",
+ v->addr, v->addr + v->size, v->size);
+
+ if (v->caller)
+ seq_printf(m, " %pS", v->caller);
if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages);
@@ -2465,19 +2620,19 @@ static int s_show(struct seq_file *m, void *p)
seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
if (v->flags & VM_IOREMAP)
- seq_printf(m, " ioremap");
+ seq_puts(m, " ioremap");
if (v->flags & VM_ALLOC)
- seq_printf(m, " vmalloc");
+ seq_puts(m, " vmalloc");
if (v->flags & VM_MAP)
- seq_printf(m, " vmap");
+ seq_puts(m, " vmap");
if (v->flags & VM_USERMAP)
- seq_printf(m, " user");
+ seq_puts(m, " user");
if (v->flags & VM_VPAGES)
- seq_printf(m, " vpages");
+ seq_puts(m, " vpages");
show_numa_info(m, v);
seq_putc(m, '\n');
@@ -2496,7 +2651,7 @@ static int vmalloc_open(struct inode *inode, struct file *file)
unsigned int *ptr = NULL;
int ret;
- if (NUMA_BUILD) {
+ if (IS_ENABLED(CONFIG_NUMA)) {
ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
@@ -2523,5 +2678,53 @@ static int __init proc_vmalloc_init(void)
return 0;
}
module_init(proc_vmalloc_init);
+
+void get_vmalloc_info(struct vmalloc_info *vmi)
+{
+ struct vmap_area *va;
+ unsigned long free_area_size;
+ unsigned long prev_end;
+
+ vmi->used = 0;
+ vmi->largest_chunk = 0;
+
+ prev_end = VMALLOC_START;
+
+ spin_lock(&vmap_area_lock);
+
+ if (list_empty(&vmap_area_list)) {
+ vmi->largest_chunk = VMALLOC_TOTAL;
+ goto out;
+ }
+
+ list_for_each_entry(va, &vmap_area_list, list) {
+ unsigned long addr = va->va_start;
+
+ /*
+ * Some archs keep another range for modules in vmalloc space
+ */
+ if (addr < VMALLOC_START)
+ continue;
+ if (addr >= VMALLOC_END)
+ break;
+
+ if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+ continue;
+
+ vmi->used += (va->va_end - va->va_start);
+
+ free_area_size = addr - prev_end;
+ if (vmi->largest_chunk < free_area_size)
+ vmi->largest_chunk = free_area_size;
+
+ prev_end = va->va_end;
+ }
+
+ if (VMALLOC_END - prev_end > vmi->largest_chunk)
+ vmi->largest_chunk = VMALLOC_END - prev_end;
+
+out:
+ spin_unlock(&vmap_area_lock);
+}
#endif