aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig6
-rw-r--r--mm/backing-dev.c7
-rw-r--r--mm/bootmem.c20
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/kmemleak.c470
-rw-r--r--mm/memcontrol.c151
-rw-r--r--mm/memory.c87
-rw-r--r--mm/mempolicy.c84
-rw-r--r--mm/mempool.c4
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c43
-rw-r--r--mm/oom_kill.c64
-rw-r--r--mm/page-writeback.c13
-rw-r--r--mm/page_alloc.c86
-rw-r--r--mm/page_cgroup.c24
-rw-r--r--mm/percpu.c72
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_acl.c29
-rw-r--r--mm/slab.c32
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c139
-rw-r--r--mm/swapfile.c20
-rw-r--r--mm/thrash.c32
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c21
28 files changed, 882 insertions, 571 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index c948d4ca8bd..fe5f674d7a7 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -225,9 +225,9 @@ config DEFAULT_MMAP_MIN_ADDR
For most ia64, ppc64 and x86 users with lots of address space
a value of 65536 is reasonable and should cause no problems.
On arm and other archs it should not be higher than 32768.
- Programs which use vm86 functionality would either need additional
- permissions from either the LSM or the capabilities module or have
- this protection disabled.
+ Programs which use vm86 functionality or have some need to map
+ this low address space will need CAP_SYS_RAWIO or disable this
+ protection by setting the value to 0.
This value can be changed after boot using the
/proc/sys/vm/mmap_min_addr tunable.
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 493b468a503..c86edd24429 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -283,7 +283,6 @@ static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
};
-
void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
{
enum bdi_state bit;
@@ -308,18 +307,18 @@ EXPORT_SYMBOL(set_bdi_congested);
/**
* congestion_wait - wait for a backing_dev to become uncongested
- * @rw: READ or WRITE
+ * @sync: SYNC or ASYNC IO
* @timeout: timeout in jiffies
*
* Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
* write congestion. If no backing_devs are congested then just wait for the
* next write to be completed.
*/
-long congestion_wait(int rw, long timeout)
+long congestion_wait(int sync, long timeout)
{
long ret;
DEFINE_WAIT(wait);
- wait_queue_head_t *wqh = &congestion_wqh[rw];
+ wait_queue_head_t *wqh = &congestion_wqh[sync];
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
ret = io_schedule_timeout(timeout);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 282df0a09e6..701740c9e81 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -12,6 +12,7 @@
#include <linux/pfn.h>
#include <linux/bootmem.h>
#include <linux/module.h>
+#include <linux/kmemleak.h>
#include <asm/bug.h>
#include <asm/io.h>
@@ -335,6 +336,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
{
unsigned long start, end;
+ kmemleak_free_part(__va(physaddr), size);
+
start = PFN_UP(physaddr);
end = PFN_DOWN(physaddr + size);
@@ -354,6 +357,8 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
{
unsigned long start, end;
+ kmemleak_free_part(__va(addr), size);
+
start = PFN_UP(addr);
end = PFN_DOWN(addr + size);
@@ -516,6 +521,7 @@ find_block:
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
start_off);
memset(region, 0, size);
+ kmemleak_alloc(region, size, 1, 0);
return region;
}
@@ -536,11 +542,15 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
return kzalloc(size, GFP_NOWAIT);
#ifdef CONFIG_HAVE_ARCH_BOOTMEM
- bootmem_data_t *p_bdata;
-
- p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
- if (p_bdata)
- return alloc_bootmem_core(p_bdata, size, align, goal, limit);
+ {
+ bootmem_data_t *p_bdata;
+
+ p_bdata = bootmem_arch_preferred_node(bdata, size, align,
+ goal, limit);
+ if (p_bdata)
+ return alloc_bootmem_core(p_bdata, size, align,
+ goal, limit);
+ }
#endif
return NULL;
}
diff --git a/mm/dmapool.c b/mm/dmapool.c
index b1f0885dda2..3df063706f5 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
unsigned pages = 0;
unsigned blocks = 0;
+ spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
+ spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
diff --git a/mm/filemap.c b/mm/filemap.c
index 22396713feb..ccea3b665c1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2272,6 +2272,7 @@ again:
pagefault_enable();
flush_dcache_page(page);
+ mark_page_accessed(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
if (unlikely(status < 0))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a56e6f3ce97..cafdcee154e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1985,7 +1985,7 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
}
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, int write_access)
+ unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
@@ -2053,7 +2053,7 @@ retry:
* any allocations necessary to record that reservation occur outside
* the spinlock.
*/
- if (write_access && !(vma->vm_flags & VM_SHARED))
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
@@ -2072,7 +2072,7 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
- if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
}
@@ -2091,7 +2091,7 @@ backout_unlocked:
}
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access)
+ unsigned long address, unsigned int flags)
{
pte_t *ptep;
pte_t entry;
@@ -2112,7 +2112,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
mutex_lock(&hugetlb_instantiation_mutex);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
- ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
+ ret = hugetlb_no_page(mm, vma, address, ptep, flags);
goto out_mutex;
}
@@ -2126,7 +2126,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* page now as it is used to determine if a reservation has been
* consumed.
*/
- if (write_access && !pte_write(entry)) {
+ if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
@@ -2143,7 +2143,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_page_table_lock;
- if (write_access) {
+ if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
pagecache_page);
@@ -2152,7 +2152,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
+ if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+ flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, entry);
out_page_table_lock:
@@ -2369,7 +2370,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
long chg = region_truncate(&inode->i_mapping->private_list, offset);
spin_lock(&inode->i_lock);
- inode->i_blocks -= blocks_per_huge_page(h);
+ inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
hugetlb_put_quota(inode->i_mapping, (chg - freed));
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 58ec86c9e58..487267310a8 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -48,10 +48,10 @@
* scanned. This list is only modified during a scanning episode when the
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
* Note that the kmemleak_object.use_count is incremented when an object is
- * added to the gray_list and therefore cannot be freed
- * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
- * file together with modifications to the memory scanning parameters
- * including the scan_thread pointer
+ * added to the gray_list and therefore cannot be freed. This mutex also
+ * prevents multiple users of the "kmemleak" debugfs file together with
+ * modifications to the memory scanning parameters including the scan_thread
+ * pointer
*
* The kmemleak_object structures have a use_count incremented or decremented
* using the get_object()/put_object() functions. When the use_count becomes
@@ -61,6 +61,8 @@
* structure.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -101,14 +103,16 @@
* Kmemleak configuration and common defines.
*/
#define MAX_TRACE 16 /* stack trace length */
-#define REPORTS_NR 50 /* maximum number of reported leaks */
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
-#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
+#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
#define BYTES_PER_POINTER sizeof(void *)
+/* GFP bitmask for kmemleak internal allocations */
+#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
+
/* scanning area inside a memory block */
struct kmemleak_scan_area {
struct hlist_node node;
@@ -154,6 +158,8 @@ struct kmemleak_object {
#define OBJECT_REPORTED (1 << 1)
/* flag set to not scan the object */
#define OBJECT_NO_SCAN (1 << 2)
+/* flag set on newly allocated objects */
+#define OBJECT_NEW (1 << 3)
/* the list of all allocated objects */
static LIST_HEAD(object_list);
@@ -181,27 +187,21 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0);
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
-/* used for yielding the CPU to other tasks during scanning */
-static unsigned long next_scan_yield;
static struct task_struct *scan_thread;
-static unsigned long jiffies_scan_yield;
+/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
+static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
-static int kmemleak_stack_scan;
-/* mutex protecting the memory scanning */
+static int kmemleak_stack_scan = 1;
+/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
-/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
-static DEFINE_MUTEX(kmemleak_mutex);
-
-/* number of leaks reported (for limitation purposes) */
-static int reported_leaks;
/*
- * Early object allocation/freeing logging. Kkmemleak is initialized after the
+ * Early object allocation/freeing logging. Kmemleak is initialized after the
* kernel allocator. However, both the kernel allocator and kmemleak may
- * allocate memory blocks which need to be tracked. Kkmemleak defines an
+ * allocate memory blocks which need to be tracked. Kmemleak defines an
* arbitrary buffer to hold the allocation/freeing information before it is
* fully initialized.
*/
@@ -210,6 +210,7 @@ static int reported_leaks;
enum {
KMEMLEAK_ALLOC,
KMEMLEAK_FREE,
+ KMEMLEAK_FREE_PART,
KMEMLEAK_NOT_LEAK,
KMEMLEAK_IGNORE,
KMEMLEAK_SCAN_AREA,
@@ -230,7 +231,7 @@ struct early_log {
};
/* early logging buffer and current position */
-static struct early_log early_log[200];
+static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
static int crt_early_log;
static void kmemleak_disable(void);
@@ -245,10 +246,10 @@ static void kmemleak_disable(void);
/*
* Macro invoked when a serious kmemleak condition occured and cannot be
- * recovered from. Kkmemleak will be disabled and further allocation/freeing
+ * recovered from. Kmemleak will be disabled and further allocation/freeing
* tracing no longer available.
*/
-#define kmemleak_panic(x...) do { \
+#define kmemleak_stop(x...) do { \
kmemleak_warn(x); \
kmemleak_disable(); \
} while (0)
@@ -273,13 +274,9 @@ static int color_gray(const struct kmemleak_object *object)
return object->min_count != -1 && object->count >= object->min_count;
}
-/*
- * Objects are considered referenced if their color is gray and they have not
- * been deleted.
- */
-static int referenced_object(struct kmemleak_object *object)
+static int color_black(const struct kmemleak_object *object)
{
- return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
+ return object->min_count == -1;
}
/*
@@ -290,42 +287,28 @@ static int referenced_object(struct kmemleak_object *object)
static int unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
- time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
+ time_before_eq(object->jiffies + jiffies_min_age,
+ jiffies_last_scan);
}
/*
- * Printing of the (un)referenced objects information, either to the seq file
- * or to the kernel log. The print_referenced/print_unreferenced functions
- * must be called with the object->lock held.
+ * Printing of the unreferenced objects information to the seq file. The
+ * print_unreferenced function must be called with the object->lock held.
*/
-#define print_helper(seq, x...) do { \
- struct seq_file *s = (seq); \
- if (s) \
- seq_printf(s, x); \
- else \
- pr_info(x); \
-} while (0)
-
-static void print_referenced(struct kmemleak_object *object)
-{
- pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n",
- object->pointer, object->size);
-}
-
static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
- print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
- print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
- print_helper(seq, " backtrace:\n");
+ seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
+ object->pointer, object->size);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
- print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
+ seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
}
}
@@ -341,7 +324,7 @@ static void dump_object_info(struct kmemleak_object *object)
trace.nr_entries = object->trace_len;
trace.entries = object->trace;
- pr_notice("kmemleak: Object 0x%08lx (size %zu):\n",
+ pr_notice("Object 0x%08lx (size %zu):\n",
object->tree_node.start, object->size);
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
@@ -369,7 +352,7 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
object = prio_tree_entry(node, struct kmemleak_object,
tree_node);
if (!alias && object->pointer != ptr) {
- kmemleak_warn("kmemleak: Found object by alias");
+ kmemleak_warn("Found object by alias");
object = NULL;
}
} else
@@ -462,10 +445,9 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
struct prio_tree_node *node;
struct stack_trace trace;
- object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK);
+ object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
if (!object) {
- kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object "
- "structure\n");
+ kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
return;
}
@@ -474,7 +456,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
INIT_HLIST_HEAD(&object->area_list);
spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED;
+ object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
object->pointer = ptr;
object->size = size;
object->min_count = min_count;
@@ -524,8 +506,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
if (node != &object->tree_node) {
unsigned long flags;
- kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object "
- "search tree (already existing)\n", ptr);
+ kmemleak_stop("Cannot insert 0x%lx into the object search tree "
+ "(already existing)\n", ptr);
object = lookup_object(ptr, 1);
spin_lock_irqsave(&object->lock, flags);
dump_object_info(object);
@@ -542,39 +524,87 @@ out:
* Remove the metadata (struct kmemleak_object) for a memory block from the
* object_list and object_tree_root and decrement its use_count.
*/
-static void delete_object(unsigned long ptr)
+static void __delete_object(struct kmemleak_object *object)
{
unsigned long flags;
- struct kmemleak_object *object;
write_lock_irqsave(&kmemleak_lock, flags);
- object = lookup_object(ptr, 0);
- if (!object) {
- kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n",
- ptr);
- write_unlock_irqrestore(&kmemleak_lock, flags);
- return;
- }
prio_tree_remove(&object_tree_root, &object->tree_node);
list_del_rcu(&object->object_list);
write_unlock_irqrestore(&kmemleak_lock, flags);
WARN_ON(!(object->flags & OBJECT_ALLOCATED));
- WARN_ON(atomic_read(&object->use_count) < 1);
+ WARN_ON(atomic_read(&object->use_count) < 2);
/*
* Locking here also ensures that the corresponding memory block
* cannot be freed when it is being scanned.
*/
spin_lock_irqsave(&object->lock, flags);
- if (object->flags & OBJECT_REPORTED)
- print_referenced(object);
object->flags &= ~OBJECT_ALLOCATED;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
}
/*
+ * Look up the metadata (struct kmemleak_object) corresponding to ptr and
+ * delete it.
+ */
+static void delete_object_full(unsigned long ptr)
+{
+ struct kmemleak_object *object;
+
+ object = find_and_get_object(ptr, 0);
+ if (!object) {
+#ifdef DEBUG
+ kmemleak_warn("Freeing unknown object at 0x%08lx\n",
+ ptr);
+#endif
+ return;
+ }
+ __delete_object(object);
+ put_object(object);
+}
+
+/*
+ * Look up the metadata (struct kmemleak_object) corresponding to ptr and
+ * delete it. If the memory block is partially freed, the function may create
+ * additional metadata for the remaining parts of the block.
+ */
+static void delete_object_part(unsigned long ptr, size_t size)
+{
+ struct kmemleak_object *object;
+ unsigned long start, end;
+
+ object = find_and_get_object(ptr, 1);
+ if (!object) {
+#ifdef DEBUG
+ kmemleak_warn("Partially freeing unknown object at 0x%08lx "
+ "(size %zu)\n", ptr, size);
+#endif
+ return;
+ }
+ __delete_object(object);
+
+ /*
+ * Create one or two objects that may result from the memory block
+ * split. Note that partial freeing is only done by free_bootmem() and
+ * this happens before kmemleak_init() is called. The path below is
+ * only executed during early log recording in kmemleak_init(), so
+ * GFP_KERNEL is enough.
+ */
+ start = object->pointer;
+ end = object->pointer + object->size;
+ if (ptr > start)
+ create_object(start, ptr - start, object->min_count,
+ GFP_KERNEL);
+ if (ptr + size < end)
+ create_object(ptr + size, end - ptr - size, object->min_count,
+ GFP_KERNEL);
+
+ put_object(object);
+}
+/*
* Make a object permanently as gray-colored so that it can no longer be
* reported as a leak. This is used in general to mark a false positive.
*/
@@ -585,8 +615,7 @@ static void make_gray_object(unsigned long ptr)
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n",
- ptr);
+ kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
return;
}
@@ -607,8 +636,7 @@ static void make_black_object(unsigned long ptr)
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n",
- ptr);
+ kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
return;
}
@@ -631,21 +659,20 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Adding scan area to unknown "
- "object at 0x%08lx\n", ptr);
+ kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
+ ptr);
return;
}
- area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK);
+ area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
if (!area) {
- kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
+ kmemleak_warn("Cannot allocate a scan area\n");
goto out;
}
spin_lock_irqsave(&object->lock, flags);
if (offset + length > object->size) {
- kmemleak_warn("kmemleak: Scan area larger than object "
- "0x%08lx\n", ptr);
+ kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
goto out_unlock;
@@ -674,8 +701,7 @@ static void object_no_scan(unsigned long ptr)
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Not scanning unknown object at "
- "0x%08lx\n", ptr);
+ kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
return;
}
@@ -696,7 +722,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_panic("kmemleak: Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded\n");
+ kmemleak_disable();
return;
}
@@ -741,13 +768,28 @@ void kmemleak_free(const void *ptr)
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- delete_object((unsigned long)ptr);
+ delete_object_full((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
/*
+ * Partial memory freeing function callback. This function is usually called
+ * from bootmem allocator when (part of) a memory block is freed.
+ */
+void kmemleak_free_part(const void *ptr, size_t size)
+{
+ pr_debug("%s(0x%p)\n", __func__, ptr);
+
+ if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ delete_object_part((unsigned long)ptr, size);
+ else if (atomic_read(&kmemleak_early_log))
+ log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(kmemleak_free_part);
+
+/*
* Mark an already allocated memory block as a false positive. This will cause
* the block to no longer be reported as leak and always be scanned.
*/
@@ -808,21 +850,6 @@ void kmemleak_no_scan(const void *ptr)
EXPORT_SYMBOL(kmemleak_no_scan);
/*
- * Yield the CPU so that other tasks get a chance to run. The yielding is
- * rate-limited to avoid excessive number of calls to the schedule() function
- * during memory scanning.
- */
-static void scan_yield(void)
-{
- might_sleep();
-
- if (time_is_before_eq_jiffies(next_scan_yield)) {
- schedule();
- next_scan_yield = jiffies + jiffies_scan_yield;
- }
-}
-
-/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -848,7 +875,7 @@ static int scan_should_stop(void)
* found to the gray list.
*/
static void scan_block(void *_start, void *_end,
- struct kmemleak_object *scanned)
+ struct kmemleak_object *scanned, int allow_resched)
{
unsigned long *ptr;
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
@@ -859,18 +886,11 @@ static void scan_block(void *_start, void *_end,
unsigned long pointer = *ptr;
struct kmemleak_object *object;
+ if (allow_resched)
+ cond_resched();
if (scan_should_stop())
break;
- /*
- * When scanning a memory block with a corresponding
- * kmemleak_object, the CPU yielding is handled in the calling
- * code since it holds the object->lock to avoid the block
- * freeing.
- */
- if (!scanned)
- scan_yield();
-
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -931,12 +951,12 @@ static void scan_object(struct kmemleak_object *object)
goto out;
if (hlist_empty(&object->area_list))
scan_block((void *)object->pointer,
- (void *)(object->pointer + object->size), object);
+ (void *)(object->pointer + object->size), object, 0);
else
hlist_for_each_entry(area, elem, &object->area_list, node)
scan_block((void *)(object->pointer + area->offset),
(void *)(object->pointer + area->offset
- + area->length), object);
+ + area->length), object, 0);
out:
spin_unlock_irqrestore(&object->lock, flags);
}
@@ -952,6 +972,10 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i;
+ int new_leaks = 0;
+ int gray_list_pass = 0;
+
+ jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@@ -963,13 +987,14 @@ static void kmemleak_scan(void)
* 1 reference to any object at this point.
*/
if (atomic_read(&object->use_count) > 1) {
- pr_debug("kmemleak: object->use_count = %d\n",
+ pr_debug("object->use_count = %d\n",
atomic_read(&object->use_count));
dump_object_info(object);
}
#endif
/* reset the reference count (whiten the object) */
object->count = 0;
+ object->flags &= ~OBJECT_NEW;
if (color_gray(object) && get_object(object))
list_add_tail(&object->gray_list, &gray_list);
@@ -978,14 +1003,14 @@ static void kmemleak_scan(void)
rcu_read_unlock();
/* data/bss scanning */
- scan_block(_sdata, _edata, NULL);
- scan_block(__bss_start, __bss_stop, NULL);
+ scan_block(_sdata, _edata, NULL, 1);
+ scan_block(__bss_start, __bss_stop, NULL, 1);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
for_each_possible_cpu(i)
scan_block(__per_cpu_start + per_cpu_offset(i),
- __per_cpu_end + per_cpu_offset(i), NULL);
+ __per_cpu_end + per_cpu_offset(i), NULL, 1);
#endif
/*
@@ -1007,7 +1032,7 @@ static void kmemleak_scan(void)
/* only scan if page is in use */
if (page_count(page) == 0)
continue;
- scan_block(page, page + 1, NULL);
+ scan_block(page, page + 1, NULL, 1);
}
}
@@ -1019,7 +1044,8 @@ static void kmemleak_scan(void)
read_lock(&tasklist_lock);
for_each_process(task)
scan_block(task_stack_page(task),
- task_stack_page(task) + THREAD_SIZE, NULL);
+ task_stack_page(task) + THREAD_SIZE,
+ NULL, 0);
read_unlock(&tasklist_lock);
}
@@ -1031,9 +1057,10 @@ static void kmemleak_scan(void)
* kmemleak objects cannot be freed from outside the loop because their
* use_count was increased.
*/
+repeat:
object = list_entry(gray_list.next, typeof(*object), gray_list);
while (&object->gray_list != &gray_list) {
- scan_yield();
+ cond_resched();
/* may add new objects to the list */
if (!scan_should_stop())
@@ -1048,7 +1075,59 @@ static void kmemleak_scan(void)
object = tmp;
}
+
+ if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
+ goto scan_end;
+
+ /*
+ * Check for new objects allocated during this scanning and add them
+ * to the gray list.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if ((object->flags & OBJECT_NEW) && !color_black(object) &&
+ get_object(object)) {
+ object->flags &= ~OBJECT_NEW;
+ list_add_tail(&object->gray_list, &gray_list);
+ }
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+ if (!list_empty(&gray_list))
+ goto repeat;
+
+scan_end:
WARN_ON(!list_empty(&gray_list));
+
+ /*
+ * If scanning was stopped or new objects were being allocated at a
+ * higher rate than gray list scanning, do not report any new
+ * unreferenced objects.
+ */
+ if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
+ return;
+
+ /*
+ * Scanning result reporting.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if (unreferenced_object(object) &&
+ !(object->flags & OBJECT_REPORTED)) {
+ object->flags |= OBJECT_REPORTED;
+ new_leaks++;
+ }
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+ if (new_leaks)
+ pr_info("%d new suspected memory leaks (see "
+ "/sys/kernel/debug/kmemleak)\n", new_leaks);
+
}
/*
@@ -1059,7 +1138,8 @@ static int kmemleak_scan_thread(void *arg)
{
static int first_run = 1;
- pr_info("kmemleak: Automatic memory scanning thread starte