aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug59
-rw-r--r--lib/Makefile7
-rw-r--r--lib/bcd.c8
-rw-r--r--lib/crc32.c9
-rw-r--r--lib/decompress.c9
-rw-r--r--lib/dma-debug.c5
-rw-r--r--lib/dynamic_debug.c56
-rw-r--r--lib/flex_proportions.c2
-rw-r--r--lib/gcd.c3
-rw-r--r--lib/gen_crc32table.c6
-rw-r--r--lib/genalloc.c88
-rw-r--r--lib/idr.c32
-rw-r--r--lib/interval_tree.c10
-rw-r--r--lib/interval_tree_test_main.c105
-rw-r--r--lib/kasprintf.c2
-rw-r--r--lib/kobject_uevent.c5
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/parser.c10
-rw-r--r--lib/plist.c4
-rw-r--r--lib/prio_tree.c466
-rw-r--r--lib/rbtree.c656
-rw-r--r--lib/rbtree_test.c234
-rw-r--r--lib/scatterlist.c35
-rw-r--r--lib/spinlock_debug.c32
-rw-r--r--lib/swiotlb.c33
-rw-r--r--lib/vsprintf.c139
26 files changed, 1073 insertions, 946 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2403a63b5da..28e9d6c9894 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -196,12 +196,13 @@ config LOCKUP_DETECTOR
thresholds can be controlled through the sysctl watchdog_thresh.
config HARDLOCKUP_DETECTOR
- def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
- !HAVE_NMI_WATCHDOG
+ def_bool y
+ depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
+ depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
- depends on LOCKUP_DETECTOR
+ depends on HARDLOCKUP_DETECTOR
help
Say Y here to enable the kernel to panic on "hard lockups",
which are bugs that cause the kernel to loop in kernel
@@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC
config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
int
- depends on LOCKUP_DETECTOR
+ depends on HARDLOCKUP_DETECTOR
range 0 1
default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
default 1 if BOOTPARAM_HARDLOCKUP_PANIC
@@ -449,11 +450,12 @@ config SLUB_STATS
out which slabs are relevant to a particular load.
Try running: slabinfo -DA
+config HAVE_DEBUG_KMEMLEAK
+ bool
+
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
- depends on DEBUG_KERNEL && EXPERIMENTAL && \
- (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
-
+ depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK
select DEBUG_FS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
@@ -629,6 +631,20 @@ config PROVE_RCU_REPEATEDLY
Say N if you are unsure.
+config PROVE_RCU_DELAY
+ bool "RCU debugging: preemptible RCU race provocation"
+ depends on DEBUG_KERNEL && PREEMPT_RCU
+ default n
+ help
+ There is a class of races that involve an unlikely preemption
+ of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
+ been set to INT_MIN. This feature inserts a delay at that
+ point to increase the probability of these races.
+
+ Say Y to increase probability of preemption of __rcu_read_unlock().
+
+ Say N if you are unsure.
+
config SPARSE_RCU_POINTER
bool "RCU debugging: sparse-based checks for pointer usage"
default n
@@ -735,11 +751,12 @@ config DEBUG_HIGHMEM
This options enables addition error checking for high memory systems.
Disable for production systems.
+config HAVE_DEBUG_BUGVERBOSE
+ bool
+
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
- depends on BUG
- depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
- FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
+ depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
default y
help
Say Y here to make BUG() panics output the file name and line number
@@ -781,6 +798,15 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VM_RB
+ bool "Debug VM red-black trees"
+ depends on DEBUG_VM
+ help
+ Enable this to turn on more extended checks in the virtual-memory
+ system that may impact performance.
+
+ If unsure, say N.
+
config DEBUG_VIRTUAL
bool "Debug VM translations"
depends on DEBUG_KERNEL && X86
@@ -1265,6 +1291,19 @@ config LATENCYTOP
source mm/Kconfig.debug
source kernel/trace/Kconfig
+config RBTREE_TEST
+ tristate "Red-Black tree test"
+ depends on m && DEBUG_KERNEL
+ help
+ A benchmark measuring the performance of the rbtree library.
+ Also includes rbtree invariant checks.
+
+config INTERVAL_TREE_TEST
+ tristate "Interval tree test"
+ depends on m && DEBUG_KERNEL
+ help
+ A benchmark measuring the performance of the interval tree library
+
config PROVIDE_OHCI1394_DMA_INIT
bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
diff --git a/lib/Makefile b/lib/Makefile
index ca856b69a21..821a1622911 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -9,7 +9,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
- idr.o int_sqrt.o extable.o prio_tree.o \
+ idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o
@@ -140,6 +140,11 @@ $(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
+obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
+obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
+
+interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
+
obj-$(CONFIG_ASN1) += asn1_decoder.o
hostprogs-y := gen_crc32table
diff --git a/lib/bcd.c b/lib/bcd.c
index 55efaf74234..40d304efe27 100644
--- a/lib/bcd.c
+++ b/lib/bcd.c
@@ -1,14 +1,14 @@
#include <linux/bcd.h>
#include <linux/export.h>
-unsigned bcd2bin(unsigned char val)
+unsigned _bcd2bin(unsigned char val)
{
return (val & 0x0f) + (val >> 4) * 10;
}
-EXPORT_SYMBOL(bcd2bin);
+EXPORT_SYMBOL(_bcd2bin);
-unsigned char bin2bcd(unsigned val)
+unsigned char _bin2bcd(unsigned val)
{
return ((val / 10) << 4) + val % 10;
}
-EXPORT_SYMBOL(bin2bcd);
+EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/crc32.c b/lib/crc32.c
index 61774b8db4d..072fbd8234d 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len,
+ (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
+ return crc32_le_generic(crc, p, len,
+ (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
EXPORT_SYMBOL(crc32_le);
@@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len,
+ (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/decompress.c b/lib/decompress.c
index 3d766b7f60a..31a80427728 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/init.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
@@ -31,11 +32,13 @@
# define unlzo NULL
#endif
-static const struct compress_format {
+struct compress_format {
unsigned char magic[2];
const char *name;
decompress_fn decompressor;
-} compressed_formats[] = {
+};
+
+static const struct compress_format compressed_formats[] __initdata = {
{ {037, 0213}, "gzip", gunzip },
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
@@ -45,7 +48,7 @@ static const struct compress_format {
{ {0, 0}, NULL, NULL }
};
-decompress_fn decompress_method(const unsigned char *inbuf, int len,
+decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
const char **name)
{
const struct compress_format *cf;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 66ce4148913..b9087bff008 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page",
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
-/* little merge helper - remove it after the merge window */
-#ifndef BUS_NOTIFY_UNBOUND_DRIVER
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
-#endif
-
/*
* The access to some variables in this macro is racy. We can't use atomic_t
* here because all these variables are exported to debugfs. Some of them even
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7ca29a0a301..e7f7d993357 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -521,25 +521,25 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
int pos_after_tid;
int pos = 0;
- pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG);
+ *buf = '\0';
+
if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
if (in_interrupt())
- pos += snprintf(buf + pos, remaining(pos), "%s ",
- "<intr>");
+ pos += snprintf(buf + pos, remaining(pos), "<intr> ");
else
pos += snprintf(buf + pos, remaining(pos), "[%d] ",
- task_pid_vnr(current));
+ task_pid_vnr(current));
}
pos_after_tid = pos;
if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
pos += snprintf(buf + pos, remaining(pos), "%s:",
- desc->modname);
+ desc->modname);
if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
pos += snprintf(buf + pos, remaining(pos), "%s:",
- desc->function);
+ desc->function);
if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
pos += snprintf(buf + pos, remaining(pos), "%d:",
- desc->lineno);
+ desc->lineno);
if (pos - pos_after_tid)
pos += snprintf(buf + pos, remaining(pos), " ");
if (pos >= PREFIX_SIZE)
@@ -559,9 +559,13 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
BUG_ON(!fmt);
va_start(args, fmt);
+
vaf.fmt = fmt;
vaf.va = &args;
- res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
+
+ res = printk(KERN_DEBUG "%s%pV",
+ dynamic_emit_prefix(descriptor, buf), &vaf);
+
va_end(args);
return res;
@@ -574,15 +578,26 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
struct va_format vaf;
va_list args;
int res;
- char buf[PREFIX_SIZE];
BUG_ON(!descriptor);
BUG_ON(!fmt);
va_start(args, fmt);
+
vaf.fmt = fmt;
vaf.va = &args;
- res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+
+ if (!dev) {
+ res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ } else {
+ char buf[PREFIX_SIZE];
+
+ res = dev_printk_emit(7, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
+ }
+
va_end(args);
return res;
@@ -592,20 +607,35 @@ EXPORT_SYMBOL(__dynamic_dev_dbg);
#ifdef CONFIG_NET
int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev, const char *fmt, ...)
+ const struct net_device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int res;
- char buf[PREFIX_SIZE];
BUG_ON(!descriptor);
BUG_ON(!fmt);
va_start(args, fmt);
+
vaf.fmt = fmt;
vaf.va = &args;
- res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+
+ if (dev && dev->dev.parent) {
+ char buf[PREFIX_SIZE];
+
+ res = dev_printk_emit(7, dev->dev.parent,
+ "%s%s %s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), &vaf);
+ } else if (dev) {
+ res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
+ } else {
+ res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ }
+
va_end(args);
return res;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index c785554f952..ebf3bac460b 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p)
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
- u64 events;
+ s64 events;
unsigned long flags;
local_irq_save(flags);
diff --git a/lib/gcd.c b/lib/gcd.c
index cce4f3cd14b..3657f129d7b 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
if (a < b)
swap(a, b);
+
+ if (!b)
+ return a;
while ((r = a % b) != 0) {
a = b;
b = r;
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f8d5439e2d..71fcfcd9641 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -109,7 +109,7 @@ int main(int argc, char** argv)
if (CRC_LE_BITS > 1) {
crc32init_le();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32table_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32table_le, LE_TABLE_ROWS,
@@ -119,7 +119,7 @@ int main(int argc, char** argv)
if (CRC_BE_BITS > 1) {
crc32init_be();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32table_be[%d][%d] = {",
BE_TABLE_ROWS, BE_TABLE_SIZE);
output_table(crc32table_be, LE_TABLE_ROWS,
@@ -128,7 +128,7 @@ int main(int argc, char** argv)
}
if (CRC_LE_BITS > 1) {
crc32cinit_le();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32ctable_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32ctable_le, LE_TABLE_ROWS,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 6bc04aab6ec..ca208a92628 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
+ pool->algo = gen_pool_first_fit;
+ pool->data = NULL;
}
return pool;
}
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy);
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
- * Uses a first-fit algorithm. Can not be used in NMI handler on
- * architectures without NMI-safe cmpxchg implementation.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
retry:
- start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
- start_bit, nbits, 0);
+ start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
+ pool->data);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool)
return size;
}
EXPORT_SYMBOL_GPL(gen_pool_size);
+
+/**
+ * gen_pool_set_algo - set the allocation algorithm
+ * @pool: pool to change allocation algorithm
+ * @algo: custom algorithm function
+ * @data: additional data used by @algo
+ *
+ * Call @algo for each memory allocation in the pool.
+ * If @algo is NULL use gen_pool_first_fit as default
+ * memory allocation function.
+ */
+void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
+{
+ rcu_read_lock();
+
+ pool->algo = algo;
+ if (!pool->algo)
+ pool->algo = gen_pool_first_fit;
+
+ pool->data = data;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(gen_pool_set_algo);
+
+/**
+ * gen_pool_first_fit - find the first available region
+ * of memory matching the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data)
+{
+ return bitmap_find_next_zero_area(map, size, start, nr, 0);
+}
+EXPORT_SYMBOL(gen_pool_first_fit);
+
+/**
+ * gen_pool_best_fit - find the best fitting region of memory
+ * macthing the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ *
+ * Iterate over the bitmap to find the smallest free region
+ * which we can allocate the memory.
+ */
+unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data)
+{
+ unsigned long start_bit = size;
+ unsigned long len = size + 1;
+ unsigned long index;
+
+ index = bitmap_find_next_zero_area(map, size, start, nr, 0);
+
+ while (index < size) {
+ int next_bit = find_next_bit(map, size, index + nr);
+ if ((next_bit - index) < len) {
+ len = next_bit - index;
+ start_bit = index;
+ if (len == nr)
+ return start_bit;
+ }
+ index = bitmap_find_next_zero_area(map, size,
+ next_bit + 1, nr, 0);
+ }
+
+ return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_best_fit);
diff --git a/lib/idr.c b/lib/idr.c
index 4046e29c0a9..648239079dd 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -20,7 +20,7 @@
* that id to this code and it returns your pointer.
* You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
+ * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
- while (idp->id_free_cnt < IDR_FREE_MAX) {
+ while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
if (new == NULL)
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
- if ((id >= MAX_ID_BIT) || (id < 0))
+ if ((id >= MAX_IDR_BIT) || (id < 0))
return IDR_NOMORE_SPACE;
if (l == 0)
break;
@@ -223,7 +223,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+ while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
@@ -265,7 +265,7 @@ build_up:
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
int id;
id = idr_get_empty_slot(idp, starting_id, pa);
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id)
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_LEVEL];
+ struct idr_layer **pa[MAX_IDR_LEVEL];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id)
to_free->bitmap = to_free->count = 0;
free_layer(to_free);
}
- while (idp->id_free_cnt >= IDR_FREE_MAX) {
+ while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
/*
* Note: we don't call the rcu callback here, since the only
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp)
int n, id, max;
int bt_mask;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id)
n = (p->layer+1) * IDR_BITS;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return NULL;
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp,
{
int n, id, max, error = 0;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each);
*/
void *idr_get_next(struct idr *idp, int *nextidp)
{
- struct idr_layer *p, *pa[MAX_LEVEL];
+ struct idr_layer *p, *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
int id = *nextidp;
int n, max;
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
n = (p->layer+1) * IDR_BITS;
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get);
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct ida_bitmap *bitmap;
unsigned long flags;
int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
if (t < 0)
return _idr_rc_to_errno(t);
- if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
+ if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
if (t != idr_id)
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
}
id = idr_id * IDA_BITMAP_BITS + t;
- if (id >= MAX_ID_BIT)
+ if (id >= MAX_IDR_BIT)
return -ENOSPC;
__set_bit(t, bitmap->bitmap);
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
new file mode 100644
index 00000000000..e6eb406f2d6
--- /dev/null
+++ b/lib/interval_tree.c
@@ -0,0 +1,10 @@
+#include <linux/init.h>
+#include <linux/interval_tree.h>
+#include <linux/interval_tree_generic.h>
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
+ unsigned long, __subtree_last,
+ START, LAST,, interval_tree)
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c
new file mode 100644
index 00000000000..b25903987f7
--- /dev/null
+++ b/lib/interval_tree_test_main.c
@@ -0,0 +1,105 @@
+#include <linux/module.h>
+#include <linux/interval_tree.h>
+#include <linux/random.h>
+#include <asm/timex.h>
+
+#define NODES 100
+#define PERF_LOOPS 100000
+#define SEARCHES 100
+#define SEARCH_LOOPS 10000
+
+static struct rb_root root = RB_ROOT;
+static struct interval_tree_node nodes[NODES];
+static u32 queries[SEARCHES];
+
+static struct rnd_state rnd;
+
+static inline unsigned long
+search(unsigned long query, struct rb_root *root)
+{
+ struct interval_tree_node *node;
+ unsigned long results = 0;
+
+ for (node = interval_tree_iter_first(root, query, query); node;
+ node = interval_tree_iter_next(node, query, query))
+ results++;
+ return results;
+}
+
+static void init(void)
+{
+ int i;
+ for (i = 0; i < NODES; i++) {
+ u32 a = prandom32(&rnd), b = prandom32(&rnd);
+ if (a <= b) {
+ nodes[i].start = a;
+ nodes[i].last = b;
+ } else {
+ nodes[i].start = b;
+ nodes[i].last = a;
+ }
+ }
+ for (i = 0; i < SEARCHES; i++)
+ queries[i] = prandom32(&rnd);
+}
+
+static int interval_tree_test_init(void)
+{
+ int i, j;
+ unsigned long results;
+ cycles_t time1, time2, time;
+
+ printk(KERN_ALERT "interval tree insert/remove");
+
+ prandom32_seed(&rnd, 3141592653589793238ULL);
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < PERF_LOOPS; i++) {
+ for (j = 0; j < NODES; j++)
+ interval_tree_insert(nodes + j, &root);
+ for (j = 0; j < NODES; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, PERF_LOOPS);
+ printk(" -> %llu cycles\n", (unsigned long long)time);
+
+ printk(KERN_ALERT "interval tree search");
+
+ for (j = 0; j < NODES; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ time1 = get_cycles();
+
+ results = 0;
+ for (i = 0; i < SEARCH_LOOPS; i++)
+ for (j = 0; j < SEARCHES; j++)
+ results += search(queries[j], &root);
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, SEARCH_LOOPS);
+ results = div_u64(results, SEARCH_LOOPS);
+ printk(" -> %llu cycles (%lu results)\n",
+ (unsigned long long)time, results);
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void interval_tree_test_exit(void)
+{
+ printk(KERN_ALERT "test exit\n");
+}
+
+module_init(interval_tree_test_init)
+module_exit(interval_tree_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michel Lespinasse");
+MODULE_DESCRIPTION("Interval Tree test");
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index ae0de80c1c8..32f12150fc4 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -21,7 +21,7 @@ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
len = vsnprintf(NULL, 0, fmt, aq);
va_end(aq);
- p = kmalloc(len+1, gfp);
+ p = kmalloc_track_caller(len+1, gfp);
if (!p)
return NULL;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 0401d2916d9..52e5abbc41d 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net)
struct uevent_sock *ue_sk;
struct netlink_kernel_cfg cfg = {
.groups = 1,
+ .flags = NL_CFG_F_NONROOT_RECV,
};
ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
if (!ue_sk)
return -ENOMEM;
- ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
- THIS_MODULE, &cfg);
+ ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
if (!ue_sk->sk) {
printk(KERN_ERR
"kobject_uevent: unable to create netlink socket!\n");
@@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = {
static int __init kobject_uevent_init(void)
{
- netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
return register_pernet_subsys(&uevent_net_ops);
}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 4226dfeb517..18eca7809b0 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_U64] = sizeof(u64),
[NLA_MSECS] = sizeof(u64),
[NLA_NESTED] = NLA_HDRLEN,
+ [NLA_S8] = sizeof(s8),
+ [NLA_S16] = sizeof(s16),
+ [NLA_S32] = sizeof(s32),
+ [NLA_S64] = sizeof(s64),
};
static int validate_nla(const struct nlattr *nla, int maxtype,
diff --git a/lib/parser.c b/lib/parser.c
index c4341008483..52cfa69f73d 100644
--- a/