aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig16
-rw-r--r--lib/Kconfig.debug47
-rw-r--r--lib/Makefile6
-rw-r--r--lib/cpu_rmap.c269
-rw-r--r--lib/debugobjects.c9
-rw-r--r--lib/dynamic_debug.c61
-rw-r--r--lib/find_next_bit.c18
-rw-r--r--lib/kernel_lock.c143
-rw-r--r--lib/kstrtox.c227
-rw-r--r--lib/list_debug.c39
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/plist.c135
-rw-r--r--lib/radix-tree.c7
-rw-r--r--lib/rbtree.c3
-rw-r--r--lib/rwsem.c10
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/swiotlb.c6
-rw-r--r--lib/test-kstrtox.c739
-rw-r--r--lib/textsearch.c10
-rw-r--r--lib/vsprintf.c157
-rw-r--r--lib/zlib_deflate/deflate.c31
-rw-r--r--lib/zlib_deflate/defutil.h17
22 files changed, 1568 insertions, 388 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index b9fef78ed04..9c10e38fc60 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -22,6 +22,9 @@ config GENERIC_FIND_FIRST_BIT
config GENERIC_FIND_NEXT_BIT
bool
+config GENERIC_FIND_BIT_LE
+ bool
+
config GENERIC_FIND_LAST_BIT
bool
default y
@@ -240,6 +243,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
depends on EXPERIMENTAL && BROKEN
+config CPU_RMAP
+ bool
+ depends on SMP
+
#
# Netlink attribute parsing support is select'ed if needed
#
@@ -256,6 +263,13 @@ config LRU_CACHE
tristate
config AVERAGE
- bool
+ bool "Averaging functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require averaging functions, but a module built outside
+ the kernel tree does. Such modules that use library averaging
+ functions require Y here.
+
+ If unsure, say N.
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3967c2356e3..df9234c5f9d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -9,6 +9,17 @@ config PRINTK_TIME
operations. This is useful for identifying long delays
in kernel startup.
+config DEFAULT_MESSAGE_LOGLEVEL
+ int "Default message log level (1-7)"
+ range 1 7
+ default "4"
+ help
+ Default log level for printk statements with no specified priority.
+
+ This was hard-coded to KERN_WARNING since at least 2.6.10 but folks
+ that are auditing their logs closely may want to set it to a lower
+ priority.
+
config ENABLE_WARN_DEPRECATED
bool "Enable __deprecated logic"
default y
@@ -102,11 +113,6 @@ config HEADERS_CHECK
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
- depends on UNDEFINED || (BLACKFIN)
- default y
- # This option is on purpose disabled for now.
- # It will be enabled when we are down to a reasonable number
- # of section mismatch warnings (< 10 for an allyesconfig build)
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
@@ -176,6 +182,23 @@ config HARDLOCKUP_DETECTOR
def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
!ARCH_HAS_NMI_WATCHDOG
+config BOOTPARAM_HARDLOCKUP_PANIC
+ bool "Panic (Reboot) On Hard Lockups"
+ depends on LOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to panic on "hard lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode with interrupts disabled for more than 60 seconds.
+
+ Say N if unsure.
+
+config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
+ int
+ depends on LOCKUP_DETECTOR
+ range 0 1
+ default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
+ default 1 if BOOTPARAM_HARDLOCKUP_PANIC
+
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
depends on LOCKUP_DETECTOR
@@ -470,15 +493,6 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
-config BKL
- bool "Big Kernel Lock" if (SMP || PREEMPT)
- default y
- help
- This is the traditional lock that is used in old code instead
- of proper locking. All drivers that use the BKL should depend
- on this symbol.
- Say Y here unless you are working on removing the BKL.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -805,7 +819,7 @@ config ARCH_WANT_FRAME_POINTERS
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \
- (CRIS || M68K || M68KNOMMU || FRV || UML || \
+ (CRIS || M68K || FRV || UML || \
AVR32 || SUPERH || BLACKFIN || MN10300) || \
ARCH_WANT_FRAME_POINTERS
default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
@@ -1236,3 +1250,6 @@ source "samples/Kconfig"
source "lib/Kconfig.kgdb"
source "lib/Kconfig.kmemcheck"
+
+config TEST_KSTRTOX
+ tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index 0544c814d82..ef0f2857115 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,6 +22,8 @@ lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o
+obj-y += kstrtox.o
+obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -38,12 +40,12 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
+lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o
obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
-obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -111,6 +113,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_AVERAGE) += average.o
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 00000000000..987acfafeb8
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities. This can be seen as a reverse-map of
+ * CPU affinity. However, we do not assume that the object affinities
+ * cover all CPUs in the system. For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+ struct cpu_rmap *rmap;
+ unsigned int cpu;
+ size_t obj_offset;
+
+ /* This is a silly number of objects, and we use u16 indices. */
+ if (size > 0xffff)
+ return NULL;
+
+ /* Offset of object pointer array from base structure */
+ obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+ sizeof(void *));
+
+ rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+ if (!rmap)
+ return NULL;
+
+ rmap->obj = (void **)((char *)rmap + obj_offset);
+
+ /* Initially assign CPUs to objects on a rota, since we have
+ * no idea where the objects are. Use infinite distance, so
+ * any object with known distance is preferable. Include the
+ * CPUs that are not present/online, since we definitely want
+ * any newly-hotplugged CPUs to have some object assigned.
+ */
+ for_each_possible_cpu(cpu) {
+ rmap->near[cpu].index = cpu % size;
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ }
+
+ rmap->size = size;
+ return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+ const struct cpumask *mask, u16 dist)
+{
+ int neigh;
+
+ for_each_cpu(neigh, mask) {
+ if (rmap->near[cpu].dist > dist &&
+ rmap->near[neigh].dist <= dist) {
+ rmap->near[cpu].index = rmap->near[neigh].index;
+ rmap->near[cpu].dist = dist;
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+ unsigned index;
+ unsigned int cpu;
+
+ pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+ for_each_possible_cpu(cpu) {
+ index = rmap->near[cpu].index;
+ pr_info("cpu %d -> obj %u (distance %u)\n",
+ cpu, index, rmap->near[cpu].dist);
+ }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+ u16 index;
+
+ BUG_ON(rmap->used >= rmap->size);
+ index = rmap->used++;
+ rmap->obj[index] = obj;
+ return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity)
+{
+ cpumask_var_t update_mask;
+ unsigned int cpu;
+
+ if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* Invalidate distance for all CPUs for which this used to be
+ * the nearest object. Mark those CPUs for update.
+ */
+ for_each_online_cpu(cpu) {
+ if (rmap->near[cpu].index == index) {
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ cpumask_set_cpu(cpu, update_mask);
+ }
+ }
+
+ debug_print_rmap(rmap, "after invalidating old distances");
+
+ /* Set distance to 0 for all CPUs in the new affinity mask.
+ * Mark all CPUs within their NUMA nodes for update.
+ */
+ for_each_cpu(cpu, affinity) {
+ rmap->near[cpu].index = index;
+ rmap->near[cpu].dist = 0;
+ cpumask_or(update_mask, update_mask,
+ cpumask_of_node(cpu_to_node(cpu)));
+ }
+
+ debug_print_rmap(rmap, "after updating neighbours");
+
+ /* Update distances based on topology */
+ for_each_cpu(cpu, update_mask) {
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_thread_cpumask(cpu), 1))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_core_cpumask(cpu), 2))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ cpumask_of_node(cpu_to_node(cpu)), 3))
+ continue;
+ /* We could continue into NUMA node distances, but for now
+ * we give up.
+ */
+ }
+
+ debug_print_rmap(rmap, "after copying neighbours");
+
+ free_cpumask_var(update_mask);
+ return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+ struct irq_affinity_notify notify;
+ struct cpu_rmap *rmap;
+ u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+ struct irq_glue *glue;
+ u16 index;
+
+ if (!rmap)
+ return;
+
+ for (index = 0; index < rmap->used; index++) {
+ glue = rmap->obj[index];
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
+ }
+ irq_run_affinity_notifiers();
+
+ kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+ struct irq_glue *glue =
+ container_of(notify, struct irq_glue, notify);
+ int rc;
+
+ rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+ if (rc)
+ pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+ kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+ struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ int rc;
+
+ if (!glue)
+ return -ENOMEM;
+ glue->notify.notify = irq_cpu_rmap_notify;
+ glue->notify.release = irq_cpu_rmap_release;
+ glue->rmap = rmap;
+ glue->index = cpu_rmap_add(rmap, glue);
+ rc = irq_set_affinity_notifier(irq, &glue->notify);
+ if (rc)
+ kfree(glue);
+ return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index deebcc57d4e..9d86e45086f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -249,14 +249,17 @@ static struct debug_bucket *get_bucket(unsigned long addr)
static void debug_print_object(struct debug_obj *obj, char *msg)
{
+ struct debug_obj_descr *descr = obj->descr;
static int limit;
- if (limit < 5 && obj->descr != descr_test) {
+ if (limit < 5 && descr != descr_test) {
+ void *hint = descr->debug_hint ?
+ descr->debug_hint(obj->object) : NULL;
limit++;
WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
- "object type: %s\n",
+ "object type: %s hint: %pS\n",
msg, obj_states[obj->state], obj->astate,
- obj->descr->name);
+ descr->name, hint);
}
debug_objects_warnings++;
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index b335acb43be..75ca78f3a8c 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -7,6 +7,7 @@
* Copyright (C) 2008 Jason Baron <jbaron@redhat.com>
* By Greg Banks <gnb@melbourne.sgi.com>
* Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
+ * Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
*/
#include <linux/kernel.h>
@@ -27,6 +28,8 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/jump_label.h>
+#include <linux/hardirq.h>
+#include <linux/sched.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
@@ -63,15 +66,25 @@ static inline const char *basename(const char *path)
return tail ? tail+1 : path;
}
+static struct { unsigned flag:8; char opt_char; } opt_array[] = {
+ { _DPRINTK_FLAGS_PRINT, 'p' },
+ { _DPRINTK_FLAGS_INCL_MODNAME, 'm' },
+ { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' },
+ { _DPRINTK_FLAGS_INCL_LINENO, 'l' },
+ { _DPRINTK_FLAGS_INCL_TID, 't' },
+};
+
/* format a string into buf[] which describes the _ddebug's flags */
static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
size_t maxlen)
{
char *p = buf;
+ int i;
BUG_ON(maxlen < 4);
- if (dp->flags & _DPRINTK_FLAGS_PRINT)
- *p++ = 'p';
+ for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
+ if (dp->flags & opt_array[i].flag)
+ *p++ = opt_array[i].opt_char;
if (p == buf)
*p++ = '-';
*p = '\0';
@@ -343,7 +356,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
unsigned int *maskp)
{
unsigned flags = 0;
- int op = '=';
+ int op = '=', i;
switch (*str) {
case '+':
@@ -358,13 +371,14 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
printk(KERN_INFO "%s: op='%c'\n", __func__, op);
for ( ; *str ; ++str) {
- switch (*str) {
- case 'p':
- flags |= _DPRINTK_FLAGS_PRINT;
- break;
- default:
- return -EINVAL;
+ for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
+ if (*str == opt_array[i].opt_char) {
+ flags |= opt_array[i].flag;
+ break;
+ }
}
+ if (i < 0)
+ return -EINVAL;
}
if (flags == 0)
return -EINVAL;
@@ -413,6 +427,35 @@ static int ddebug_exec_query(char *query_string)
return 0;
}
+int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+{
+ va_list args;
+ int res;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ res = printk(KERN_DEBUG);
+ if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) {
+ if (in_interrupt())
+ res += printk(KERN_CONT "<intr> ");
+ else
+ res += printk(KERN_CONT "[%d] ", task_pid_vnr(current));
+ }
+ if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME)
+ res += printk(KERN_CONT "%s:", descriptor->modname);
+ if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
+ res += printk(KERN_CONT "%s:", descriptor->function);
+ if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO)
+ res += printk(KERN_CONT "%d ", descriptor->lineno);
+ res += vprintk(fmt, args);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_pr_debug);
+
static __initdata char ddebug_setup_string[1024];
static __init int ddebug_setup_query(char *str)
{
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 24c59ded47a..b0a8767282b 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -160,6 +160,7 @@ EXPORT_SYMBOL(find_first_zero_bit);
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
#ifdef __BIG_ENDIAN
+#ifdef CONFIG_GENERIC_FIND_BIT_LE
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swabp(const unsigned long * x)
@@ -185,15 +186,16 @@ static inline unsigned long ext2_swab(const unsigned long y)
#endif
}
-unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
- const unsigned long *p = addr + BITOP_WORD(offset);
+ const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
+ p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
@@ -226,18 +228,18 @@ found_middle:
found_middle_swap:
return result + ffz(ext2_swab(tmp));
}
+EXPORT_SYMBOL(find_next_zero_bit_le);
-EXPORT_SYMBOL(generic_find_next_zero_le_bit);
-
-unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
+unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
- const unsigned long *p = addr + BITOP_WORD(offset);
+ const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
+ p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
@@ -271,5 +273,7 @@ found_middle:
found_middle_swap:
return result + __ffs(ext2_swab(tmp));
}
-EXPORT_SYMBOL(generic_find_next_le_bit);
+EXPORT_SYMBOL(find_next_bit_le);
+
+#endif /* CONFIG_GENERIC_FIND_BIT_LE */
#endif /* __BIG_ENDIAN */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
deleted file mode 100644
index b135d04aa48..00000000000
--- a/lib/kernel_lock.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * lib/kernel_lock.c
- *
- * This is the traditional BKL - big kernel lock. Largely
- * relegated to obsolescence, but used by various less
- * important (or lazy) subsystems.
- */
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
-#include <linux/smp_lock.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/bkl.h>
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reacquired
- * over schedule(). It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - do_raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
- while (!do_raw_spin_trylock(&kernel_flag)) {
- if (need_resched())
- return -EAGAIN;
- cpu_relax();
- }
- preempt_disable();
- return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
- do_raw_spin_unlock(&kernel_flag);
- preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption.
- * If SMP is not on (ie UP preemption), this all goes away because the
- * do_raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
- preempt_disable();
- if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
- /*
- * If preemption was disabled even before this
- * was called, there's nothing we can be polite
- * about - just spin.
- */
- if (preempt_count() > 1) {
- do_raw_spin_lock(&kernel_flag);
- return;
- }
-
- /*
- * Otherwise, let's wait for the kernel lock
- * with preemption enabled..
- */
- do {
- preempt_enable();
- while (raw_spin_is_locked(&kernel_flag))
- cpu_relax();
- preempt_disable();
- } while (!do_raw_spin_trylock(&kernel_flag));
- }
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
- do_raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
- /*
- * the BKL is not covered by lockdep, so we open-code the
- * unlocking sequence (and thus avoid the dep-chain ops):
- */
- do_raw_spin_unlock(&kernel_flag);
- preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc _lock_kernel(const char *func, const char *file, int line)
-{
- int depth = current->lock_depth + 1;
-
- trace_lock_kernel(func, file, line);
-
- if (likely(!depth)) {
- might_sleep();
- __lock_kernel();
- }
- current->lock_depth = depth;
-}
-
-void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
-{
- BUG_ON(current->lock_depth < 0);
- if (likely(--current->lock_depth < 0))
- __unlock_kernel();
-
- trace_unlock_kernel(func, file, line);
-}
-
-EXPORT_SYMBOL(_lock_kernel);
-EXPORT_SYMBOL(_unlock_kernel);
-
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
new file mode 100644
index 00000000000..05672e819f8
--- /dev/null
+++ b/lib/kstrtox.c
@@ -0,0 +1,227 @@
+/*
+ * Convert integer string representation to an integer.
+ * If an integer doesn't fit into specified type, -E is returned.
+ *
+ * Integer starts with optional sign.
+ * kstrtou*() functions do not accept sign "-".
+ *
+ * Radix 0 means autodetection: leading "0x" implies radix 16,
+ * leading "0" implies radix 8, otherwise radix is 10.
+ * Autodetection hints work after optional sign, but not before.
+ *
+ * If -E is returned, result is not touched.
+ */
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+static inline char _tolower(const char c)
+{
+ return c | 0x20;
+}
+
+static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+{
+ unsigned long long acc;
+ int ok;
+
+ if (base == 0) {
+ if (s[0] == '0') {
+ if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
+ base = 16;
+ else
+ base = 8;
+ } else
+ base = 10;
+ }
+ if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
+ s += 2;
+
+ acc = 0;
+ ok = 0;
+ while (*s) {
+ unsigned int val;
+
+ if ('0' <= *s && *s <= '9')
+ val = *s - '0';
+ else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
+ val = _tolower(*s) - 'a' + 10;
+ else if (*s == '\n') {
+ if (*(s + 1) == '\0')
+ break;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (val >= base)
+ return -EINVAL;
+ if (acc > div_u64(ULLONG_MAX - val, base))
+ return -ERANGE;
+ acc = acc * base + val;
+ ok = 1;
+
+ s++;
+ }
+ if (!ok)
+ return -EINVAL;
+ *res = acc;
+ return 0;
+}
+
+int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+{
+ if (s[0] == '+')
+ s++;
+ return _kstrtoull(s, base, res);
+}
+EXPORT_SYMBOL(kstrtoull);
+
+int kstrtoll(const char *s, unsigned int base, long long *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ if (s[0] == '-') {
+ rv = _kstrtoull(s + 1, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if ((long long)(-tmp) >= 0)
+ return -ERANGE;
+ *res = -tmp;
+ } else {
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if ((long long)tmp < 0)
+ return -ERANGE;
+ *res = tmp;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(kstrtoll);
+
+/* Internal, do not use. */
+int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (unsigned long long)(unsigned long)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(_kstrtoul);
+
+/* Internal, do not use. */
+int _kstrtol(const char *s, unsigned int base, long *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (long long)(long)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(_kstrtol);
+
+int kstrtouint(const char *s, unsigned int base, unsigned int *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (unsigned long long)(unsigned int)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtouint);
+
+int kstrtoint(const char *s, unsigned int base, int *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (long long)(int)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtoint);
+
+int kstrtou16(const char *s, unsigned int base, u16 *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (unsigned long long)(u16)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtou16);
+
+int kstrtos16(const char *s, unsigned int base, s16 *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (long long)(s16)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtos16);
+
+int kstrtou8(const char *s, unsigned int base, u8 *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (unsigned long long)(u8)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtou8);
+
+int kstrtos8(const char *s, unsigned int base, s8 *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (long long)(s8)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtos8);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 344c710d16c..b8029a5583f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new,
}
EXPORT_SYMBOL(__list_add);
+void __list_del_entry(struct list_head *entry)
+{
+ struct list_head *prev, *next;
+
+ prev = entry->prev;
+ next = entry->next;
+
+ if (WARN(next == LIST_POISON1,
+ "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+ entry, LIST_POISON1) ||
+ WARN(prev == LIST_POISON2,
+ "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+ entry, LIST_POISON2) ||
+ WARN(prev->next != entry,
+ "list_del corruption. prev->next should be %p, "
+ "but was %p\n", entry, prev->next) ||
+ WARN(next->prev != entry,
+ "list_del corruption. next->prev should be %p, "
+ "but was %p\n", entry, next->prev))
+ return;
+
+ __list_del(prev, next);
+}
+EXPORT_SYMBOL(__list_del_entry);
+
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add);
*/
void list_del(struct list_head *entry)
{
- WARN(entry->next == LIST_POISON1,
- "list_del corruption, next is LIST_POISON1 (%p)\n",
- LIST_POISON1);
- WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
- "list_del corruption, prev is LIST_POISON2 (%p)\n",
- LIST_POISON2);
- WARN(entry->prev->next != entry,
- "list_del corruption. prev->next should be %p, "
- "but was %p\n", entry, entry->prev->next);
- WARN(entry->next->prev != entry,
- "list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, entry->next->prev);
- __list_del(entry->prev, entry->next);
+ __list_del_entry(entry);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc3441..ac09f2226dc 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
{
int i, len = 0;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < n; i++, p++) {
if (p->len)
len += nla_total_size(p->len);
else if (nla_attr_minlen[p->type])
diff --git a/lib/plist.c b/lib/plist.c
index 1471988d919..0ae7e643172 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -28,6 +28,8 @@
#ifdef CONFIG_DEBUG_PI_LIST
+static struct plist_head test_head;
+
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
@@ -54,12 +56,13 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->rawlock && !head->spinlock);
+ WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
if (head->rawlock)
WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
if (head->spinlock)
WARN_ON_SMP(!spin_is_locked(head->spinlock));
- plist_check_list(&head->prio_list);
+ if (!plist_head_empty(head))
+ plist_check_list(&plist_first(head)->prio_list);
plist_check_list(&head->node_list);
}
@@ -75,25 +78,33 @@ static void plist_check_head(struct plist_head *head)
*/
void plist_add(struct plist_node *node, struct plist_head *head)
{
- struct plist_node *iter;
+ struct plist_node *first, *iter, *prev = NULL;
+ struct list_head *node_next = &head->node_list;
plist_check_head(head);
WARN_ON(!plist_node_empty(node));
+ WARN_ON(!list_empty(&node->prio_list));
+
+ if (plist_head_empty(head))
+ goto ins_node;
- list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
- if (node->prio < iter->prio)
- goto lt_prio;
- else if (node->prio == iter->prio) {
- iter = list_entry(iter->plist.prio_list.next,
- struct plist_node, plist.prio_list);
- goto eq_prio;
+ first = iter = plist_first(head);
+
+ do {
+ if (node->prio < iter->prio) {
+ node_next = &iter->node_list;
+ break;
}
- }
-lt_prio:
- list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
-eq_prio:
- list_add_tail(&node->plist.node_list, &iter->plist.node_list);
+ prev = iter;
+ iter = list_entry(iter->prio_list.next,
+ struct plist_node, prio_list);
+ } while (iter != first);
+
+ if (!prev || prev->prio != node->prio)
+ list_add_tail(&node->prio_list, &iter->prio_list);
+ins_node:
+ list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}
@@ -108,14 +119,98 @@ void plist_del(struct plist_node *node, struct plist_head *head)
{
plist_check_head(head);
- if (!list_empty(&node->plist.prio_list)) {
- struct plist_node *next = plist_first(&node->plist);
+ if (!list_empty(&node->prio_list)) {
+ if (node->node_list.next != &head->node_list) {
+ struct plist_node *next;
+
+ next = list_entry(node->node_list.next,
+ struct plist_node, node_list);
- list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
- list_del_init(&node->plist.prio_list);
+ /* add the next plist_node into prio_list */
+ if (list_empty(&next->prio_list))
+ list_add(&next->prio_list, &node->prio_list);
+ }
+ list_del_init(&node->prio_list);
}
- list_del_init(&node->plist.node_list);
+ list_del_init(&node->node_list);
plist_check_head(head);
}
+
+#ifdef CONFIG_DEBUG_PI_LIST
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static struct plist_node __initdata test_node[241];
+
+static void __init plist_test_check(int nr_expect)
+{
+ struct plist_node *first, *prio_pos, *node_pos;
+
+ if (plist_head_empty(&test_head)) {
+ BUG_ON(nr_expect != 0);
+ return;
+ }
+
+ prio_pos = first = plist_first(&test_head);
+ plist_for_each(node_pos, &test_head) {
+ if (nr_expect-- < 0)
+ break;
+ if (node_pos == first)
+ continue;
+ if (node_pos->prio == prio_pos->prio) {
+ BUG_ON(!list_empty(&node_pos->prio_list));
+ continue;
+ }
+
+ BUG_ON(prio_pos->prio > node_pos->prio);
+ BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
+ prio_pos = node_pos;
+ }
+
+ BUG_ON(nr_expect != 0);
+ BUG_ON(prio_pos->prio_list.next != &first->prio_list);
+}
+
+static int __init plist_test(void)
+{
+ int nr_expect = 0, i, loop;
+ unsigned int r = local_clock();
+
+ printk(KERN_INFO "start plist test\n");
+ plist_head_init(&test_head, NULL);
+ for (i = 0; i < ARRAY_SIZE(test_node); i++)
+ plist_node_init(test_node + i, 0);
+
+ for (loop = 0; loop < 1000; loop++) {
+ r = r * 193939 % 47629;
+ i = r % ARRAY_SIZE(test_node);
+ if (plist_node_empty(test_node + i)) {
+ r = r * 193939 % 47629;
+ test_node[i].prio = r % 99;
+ plist_add(test_node + i, &test_head);
+ nr_expect++;
+ } else {
+ plist_del(test_node + i, &test_head);
+ nr_expect--;
+ }
+ plist_test_check(nr_expect);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(test_node); i++) {
+ if (plist_node_empty(test_node + i))
+ continue;
+ plist_del(test_node + i, &test_head);
+ nr_expect--;
+ plist_test_check(nr_expect);
+ }
+
+ printk(KERN_INFO "end plist test\n");
+ return 0;
+}
+
+module_init(plist_test);
+
+#endif
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5086bb962b4..7ea2e033d71 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -736,10 +736,11 @@ next:
}
}
/*
- * The iftag must have been set somewhere because otherwise
- * we would return immediated at the beginning of the function
+ * We need not to tag the root tag if there is no tag which is set with
+ * settag within the range from *first_indexp to last_index.
*/
- root_tag_set(root, settag);
+ if (tagged > 0)
+ root_tag_set(root, settag);
*first_indexp = index;
return tagged;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4693f79195d..a16be19a130 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -315,6 +315,7 @@ void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
rb_augment_path(node, func, data);
}
+EXPORT_SYMBOL(rb_augment_insert);
/*
* before removing the node, find the deepest node on the rebalance path
@@ -340,6 +341,7 @@ struct rb_node *rb_augment_erase_begin(struct rb_node *node)
return deepest;
}
+EXPORT_SYMBOL(rb_augment_erase_begin);
/*
* after removal, update the tree to account for the removed entry
@@ -350,6 +352,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
if (node)
rb_augment_path(node, func, data);
}
+EXPORT_SYMBOL(rb_augment_erase_end);
/*
* This function returns the first node (in sort order) of the tree.
diff --git a/lib/rwsem.c b/lib/rwsem.c
index f236d7cd5cf..aa7c3052261 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -222,8 +222,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/*
* wait for the read lock to be granted
*/
-asmregparm struct rw_semaphore __sched *
-rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{
return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
-RWSEM_ACTIVE_READ_BIAS);
@@ -232,8 +231,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
/*
* wait for the write lock to be granted
*/
-asmregparm struct rw_semaphore __sched *
-rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
{
return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
-RWSEM_ACTIVE_WRITE_BIAS);
@@ -243,7 +241,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
-asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
@@ -263,7 +261,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
-asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
diff --git a/lib/show_mem.c b/lib/show_mem.c
index fdc77c82f92..90cbe4bb596 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -9,14 +9,14 @@
#include <linux/nmi.h>
#include <linux/quicklist.h>
-void show_mem(void)
+void show_mem(unsigned int filter)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, shared = 0,
nonshared = 0, highmem = 0;
printk("Mem-Info:\n");
- show_free_areas();
+ __show_free_areas(filter);
for_each_online_pgdat(pgdat) {
unsigned long i, flags;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b80..93ca08b8a45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size))
- panic("map_single: bounce buffer is not DMA'ble");
+ if (!dma_capable(dev, dev_addr, size)) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
+ dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+ }
return dev_addr;
}
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
new file mode 100644
index 00000000000..325c2f9eceb
--- /dev/null
+++ b/lib/test-kstrtox.c
@@ -0,0 +1,739 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define for_each_test(i, test) \
+ for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
+
+struct test_fail {
+ const char *str;
+ unsigned int base;
+};
+
+#define DEFINE_TEST_FAIL(test) \
+ const struct test_fail test[] __initdata
+
+#define DECLARE_TEST_OK(type, test_type) \
+ test_type { \
+ const char *str; \
+ unsigned int base; \
+ type expected_res; \
+ }
+
+#define DEFINE_TEST_OK(type, test) \
+ const type test[] __initdata
+
+#define TEST_FAIL(fn, type, fmt, test) \
+{ \
+ unsigned int i; \
+ \
+ for_each_test(i, test) { \
+ const struct test_fail *t = &test[i]; \
+ type tmp; \
+ int rv; \
+ \
+ tmp = 0; \
+ rv = fn(t->str, t->base, &tmp); \
+ if (rv >= 0) { \
+ WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \
+ t->str, t->base, rv, tmp); \
+ continue; \
+ } \
+ } \
+}
+
+#define TEST_OK(fn, type, fmt, test) \
+{ \
+ unsigned int i; \
+ \
+ for_each_test(i, test) { \
+ const typeof(test[0]) *t = &test[i]; \
+ type res; \
+ int rv; \
+ \
+ rv = fn(t->str, t->base, &res); \
+ if (rv != 0) { \
+ WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \
+ t->str, t->base, t->expected_res, rv); \
+ continue; \
+ } \
+ if (res != t->expected_res) { \
+ WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \
+ t->str, t->base, t->expected_res, res); \
+ continue; \
+ } \
+ } \
+}
+
+static void __init test_kstrtoull_ok(void)
+{
+ DECLARE_TEST_OK(unsigned long long, struct test_ull);
+ static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = {
+ {"0", 10, 0ULL},
+ {"1", 10, 1ULL},
+ {"127", 10, 127ULL},
+ {"128", 10, 128ULL},
+ {"129", 10, 129ULL},
+ {"255", 10, 255ULL},
+ {"256", 10, 256ULL},
+ {"257", 10, 257ULL},
+ {"32767", 10, 32767ULL},
+ {"32768", 10, 32768ULL},
+ {"32769", 10, 32769ULL},
+ {"65535", 10, 65535ULL},
+ {"65536", 10, 65536ULL},
+ {"65537", 10, 65537ULL},
+ {"2147483647", 10, 2147483647ULL},
+ {"2147483648", 10, 2147483648ULL},
+ {"2147483649", 10, 2147483649ULL},
+ {"4294967295", 10, 4294967295ULL},
+ {"4294967296", 10, 4294967296ULL},
+ {"4294967297", 10, 4294967297ULL},
+ {"9223372036854775807", 10, 9223372036854775807ULL},
+ {"9223372036854775808", 10, 9223372036854775808ULL},
+ {"9223372036854775809", 10, 9223372036854775809ULL},
+ {"18446744073709551614", 10, 18446744073709551614ULL},
+ {"18446744073709551615", 10, 18446744073709551615ULL},
+
+ {"00", 8, 00ULL},
+ {"01", 8, 01ULL},
+ {"0177", 8, 0177ULL},
+ {"0200", 8, 0200ULL},
+ {"0201", 8, 0201ULL},
+ {"0377", 8, 0377ULL},
+ {"0400", 8, 0400ULL},
+ {"0401", 8, 0401ULL},
+ {"077777", 8, 077777ULL},
+ {"0100000", 8, 0100000ULL},
+ {"0100001", 8, 0100001ULL},
+ {"0177777", 8, 0177777ULL},
+ {"0200000", 8, 0200000ULL},
+ {"0200001", 8, 0200001ULL},
+ {"017777777777", 8, 017777777777ULL},
+ {"020000000000", 8, 020000000000ULL},
+ {"020000000001", 8, 020000000001ULL},
+ {"037777777777", 8, 037777777777ULL},
+ {"040000000000", 8, 040000000000ULL},
+ {"040000000001", 8, 040000000001ULL},
+ {"0777777777777777777777", 8, 0777777777777777777777ULL},
+ {"01000000000000000000000", 8, 01000000000000000000000ULL},
+ {"01000000000000000000001", 8, 01000000000000000000001ULL},
+ {"01777777777777777777776", 8, 01777777777777777777776ULL},
+ {"01777777777777777777777", 8, 01777777777777777777777ULL},
+
+ {"0x0", 16, 0x0ULL},
+ {"0x1", 16, 0x1ULL},
+ {"0x7f", 16, 0x7fULL},
+ {"0x80", 16, 0x80ULL},
+ {"0x81", 16, 0x81ULL},
+ {"0xff", 16, 0xffULL},
+ {"0x100", 16, 0x100ULL},
+ {"0x101", 16, 0x101ULL},
+ {"0x7fff", 16, 0x7fffULL},
+ {"0x8000", 16, 0x8000ULL},
+ {"0x8001", 16, 0x8001ULL},
+ {"0xffff", 16, 0xffffULL},
+ {"0x10000", 16, 0x10000ULL},
+ {"0x10001", 16, 0x10001ULL},
+ {"0x7fffffff", 16, 0x7fffffffULL},
+ {"0x80000000", 16, 0x80000000ULL},
+ {"0x80000001", 16, 0x80000001ULL},
+ {"0xffffffff", 16, 0xffffffffULL},
+ {"0x100000000", 16, 0x100000000ULL},
+ {"0x100000001", 16, 0x100000001ULL},
+ {"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL},
+ {"0x8000000000000000", 16, 0x8000000000000000ULL},
+ {"0x8000000000000001", 16, 0x8000000000000001ULL},
+ {"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL},
+ {"0xffffffffffffffff", 16, 0xffffffffffffffffULL},
+
+ {"0\n", 0, 0ULL},
+ };
+ TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok);
+}
+
+static void __init test_kstrtoull_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_ull_fail) = {
+ {"", 0},
+ {"", 8},
+ {"", 10},
+ {"", 16},
+ {"\n", 0},
+ {"\n", 8},
+ {"\n", 10},
+ {"\n", 16},
+ {"\n0", 0},
+ {"\n0", 8},
+ {"\n0", 10},
+ {"\n0", 16},
+ {"+", 0},
+ {"+", 8},
+ {"+", 10},
+ {"+", 16},
+ {"-", 0},
+ {"-", 8},
+ {"-", 10},
+ {"-", 16},
+ {"0x", 0},
+ {"0x", 16},
+ {"0X", 0},
+ {"0X", 16},
+ {"0 ", 0},
+ {"1+", 0},
+ {"1-", 0},
+ {" 2", 0},
+ /* base autodetection */
+ {"0x0z", 0},
+ {"0z", 0},
+ {"a", 0},
+ /* digit >= base */
+ {"2", 2},
+ {"8", 8},
+ {"a", 10},
+ {"A", 10},
+ {"g", 16},
+ {"G", 16},
+ /* overflow */
+ {"10000000000000000000000000000000000000000000000000000000000000000", 2},
+ {"2000000000000000000000", 8},
+ {"18446744073709551616", 10},
+ {"10000000000000000", 16},
+ /* negative */
+ {"-0", 0},
+ {"-0", 8},
+ {"-0", 10},
+ {"-0", 16},
+ {"-1", 0},
+ {"-1", 8},
+ {"-1", 10},
+ {"-1", 16},
+ /* sign is first character if any */
+ {"-+1", 0},
+ {"-+1", 8},
+ {"-+1", 10},
+ {"-+1", 16},
+ /* nothing after \n */
+ {"0\n0", 0},
+ {"0\n0", 8},
+ {"0\n0", 10},
+ {"0\n0", 16},
+ {"0\n+", 0},
+ {"0\n+", 8},
+ {"0\n+", 10},
+ {"0\n+", 16},
+ {"0\n-", 0},
+ {"0\n-", 8},
+ {"0\n-", 10},
+ {"0\n-", 16},
+ {"0\n ", 0},
+ {"0\n ", 8},
+ {"0\n ", 10},
+ {"0\n ", 16},
+ };
+ TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail);
+}
+
+static void __init test_kstrtoll_ok(void)
+{
+ DECLARE_TEST_OK(long long, struct test_ll);
+ static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = {
+ {"0", 10, 0LL},
+ {"1", 10, 1LL},
+ {"127", 10, 127LL},
+ {"128", 10, 128LL},
+ {"129", 10, 129LL},
+ {"255", 10, 255LL},
+ {"256", 10, 256LL},
+ {"257", 10, 257LL},
+ {"32767", 10, 32767LL},
+ {"32768", 10, 32768LL},
+ {"32769", 10, 32769LL},
+ {"65535", 10, 65535LL},
+ {"65536", 10, 65536LL},
+ {"65537", 10, 65537LL},
+ {"2147483647", 10, 2147483647LL},
+ {"2147483648", 10, 2147483648LL},
+ {"2147483649", 10, 2147483649LL},
+ {"4294967295", 10, 4294967295LL},
+ {"4294967296", 10, 4294967296LL},
+ {"4294967297", 10, 4294967297LL},
+ {"9223372036854775807", 10, 9223372036854775807LL},
+
+ {"-1", 10, -1LL},
+ {"-2", 10, -2LL},
+ {"-9223372036854775808", 10, LLONG_MIN},
+ };
+ TEST_OK(kstrtoll, long long, "%lld", test_ll_ok);
+}
+
+static void __init test_kstrtoll_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_ll_fail) = {
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"-9223372036854775809", 10},
+ {"-18446744073709551614", 10},
+ {"-18446744073709551615", 10},
+ /* negative zero isn't an integer in Linux */
+ {"-0", 0},
+ {"-0", 8},
+ {"-0", 10},
+ {"-0", 16},
+ /* sign is first character if any */
+ {"-+1", 0},
+ {"-+1", 8},
+ {"-+1", 10},
+ {"-+1", 16},
+ };
+ TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail);
+}
+
+static void __init test_kstrtou64_ok(void)
+{
+ DECLARE_TEST_OK(u64, struct test_u64);
+ static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ {"2147483648", 10, 2147483648},
+ {"2147483649", 10, 2147483649},
+ {"4294967294", 10, 4294967294},
+ {"4294967295", 10, 4294967295},
+ {"4294967296", 10, 4294967296},
+ {"4294967297", 10, 4294967297},
+ {"9223372036854775806", 10, 9223372036854775806ULL},
+ {"9223372036854775807", 10, 9223372036854775807ULL},
+ {"9223372036854775808", 10, 9223372036854775808ULL},
+ {"9223372036854775809", 10, 9223372036854775809ULL},
+ {"18446744073709551614", 10, 18446744073709551614ULL},
+ {"18446744073709551615", 10, 18446744073709551615ULL},
+ };
+ TEST_OK(kstrtou64, u64, "%llu", test_u64_ok);
+}
+
+static void __init test_kstrtou64_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u64_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail);
+}
+
+static void __init test_kstrtos64_ok(void)
+{
+ DECLARE_TEST_OK(s64, struct test_s64);
+ static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = {
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ {"2147483648", 10, 2147483648},
+ {"2147483649", 10, 2147483649},
+ {"4294967294", 10, 4294967294},
+ {"4294967295", 10, 4294967295},
+ {"4294967296", 10, 4294967296},
+ {"4294967297", 10, 4294967297},
+ {"9223372036854775806", 10, 9223372036854775806LL},
+ {"9223372036854775807", 10, 9223372036854775807LL},
+ };
+ TEST_OK(kstrtos64, s64, "%lld", test_s64_ok);
+}
+
+static void __init test_kstrtos64_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s64_fail) = {
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail);
+}
+
+static void __init test_kstrtou32_ok(void)
+{
+ DECLARE_TEST_OK(u32, struct test_u32);
+ static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ {"2147483648", 10, 2147483648},
+ {"2147483649", 10, 2147483649},
+ {"4294967294", 10, 4294967294},
+ {"4294967295", 10, 4294967295},
+ };
+ TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
+}
+
+static void __init test_kstrtou32_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u32_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail);
+}
+
+static void __init test_kstrtos32_ok(void)
+{
+ DECLARE_TEST_OK(s32, struct test_s32);
+ static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = {
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ };
+ TEST_OK(kstrtos32, s32, "%d", test_s32_ok);
+}
+
+static void __init test_kstrtos32_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s32_fail) = {
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail);
+}
+
+static void __init test_kstrtou16_ok(void)
+{
+ DECLARE_TEST_OK(u16, struct test_u16);
+ static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ };
+ TEST_OK(kstrtou16, u16, "%hu", test_u16_ok);
+}
+
+static void __init test_kstrtou16_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u16_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail);
+}
+
+static void __init test_kstrtos16_ok(void)
+{
+ DECLARE_TEST_OK(s16, struct test_s16);
+ static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = {
+ {"-130", 10, -130},
+ {"-129", 10, -129},
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ };
+ TEST_OK(kstrtos16, s16, "%hd", test_s16_ok);
+}
+
+static void __init test_kstrtos16_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s16_fail) = {
+ {"32768", 10},
+ {"32769", 10},
+ {"65534", 10},
+ {"65535", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail);
+}
+
+static void __init test_kstrtou8_ok(void)
+{
+ DECLARE_TEST_OK(u8, struct test_u8);
+ static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ };
+ TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok);
+}
+
+static void __init test_kstrtou8_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u8_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"256", 10},
+ {"257", 10},
+ {"32766", 10},
+ {"32767", 10},
+ {"32768", 10},
+ {"32769", 10},
+ {"65534", 10},
+ {"65535", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail);
+}
+
+static void __init test_kstrtos8_ok(void)
+{
+ DECLARE_TEST_OK(s8, struct test_s8);
+ static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = {
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ };
+ TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok);
+}
+
+static void __init test_kstrtos8_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s8_fail) = {
+ {"-130", 10},
+ {"-129", 10},
+ {"128", 10},
+ {"129", 10},
+ {"254", 10},
+ {"255", 10},
+ {"256", 10},
+ {"257", 10},
+ {"32766", 10},
+ {"32767", 10},
+ {"32768", 10},
+ {"32769", 10},
+ {"65534", 10},
+ {"65535", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail);
+}
+
+static int __init test_kstrtox_init(void)
+{
+ test_kstrtoull_ok();
+ test_kstrtoull_fail();
+ test_kstrtoll_ok();
+ test_kstrtoll_fail();
+
+ test_kstrtou64_ok();
+ test_kstrtou64_fail();
+ test_kstrtos64_ok();
+ test_kstrtos64_fail();
+
+ test_kstrtou32_ok();
+ test_kstrtou32_fail();
+ test_kstrtos32_ok();
+ test_kstrtos32_fail();
+
+ test_kstrtou16_ok();
+ test_kstrtou16_fail();
+ test_kstrtos16_ok();
+ test_kstrtos16_fail();
+
+ test_kstrtou8_ok();
+ test_kstrtou8_fail();
+ test_kstrtos8_ok();
+ test_kstrtos8_fail();
+ return -EINVAL;
+}
+module_init(test_kstrtox_init);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
index d608331b3e4..e0cc0146ae6 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -13,7 +13,7 @@
*
* INTRODUCTION
*
- * The textsearch infrastructure provides text searching facitilies for
+ * The textsearch infrastructure provides text searching facilities for
* both linear and non-linear data. Individual search algorithms are
* implemented in modules and chosen by the user.
*
@@ -43,7 +43,7 @@
* to the algorithm to store persistent variables.
* (4) Core eventually resets the search offset and forwards the find()
* request to the algorithm.
- * (5) Algorithm calls get_next_block() provided by the user continously
+ * (5) Algorithm calls get_next_block() provided by the user continuously
* to fetch the data to be searched in block by block.
* (6) Algorithm invokes finish() after the last call to get_next_block
* to clean up any leftovers from get_next_block. (Optional)
@@ -58,15 +58,15 @@
* the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
* to perform case insensitive matching. But it might slow down
* performance of algorithm, so you should use it at own your risk.
- * The returned configuration may then be used for an arbitary
+ * The returned configuration may then be used for an arbitrary
* amount of times and even in parallel as long as a separate struct
* ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
* implementation and calling textsearch_find(). Both functions return
- * the position of the first occurrence of the patern or UINT_MAX if
- * no match was found. Subsequent occurences can be found by calling
+ * the position of the first occurrence of the pattern or UINT_MAX if
+ * no match was found. Subsequent occurrences can be found by calling
* textsearch_next() regardless of the linearity of the data.
*
* Once you're done using a configuration it must be given back via
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d3023df8477..02bcdd5feac 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -120,147 +120,6 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtoll);
-/**
- * strict_strtoul - convert a string to an unsigned long strictly
- * @cp: The string to be converted
- * @base: The number base to use
- * @res: The converted result value
- *
- * strict_strtoul converts a string to an unsigned long only if the
- * string is really an unsigned long string, any string containing
- * any invalid char at the tail will be rejected and -EINVAL is returned,
- * only a newline char at the tail is acceptible because people generally
- * change a module parameter in the following way:
- *
- * echo 1024 > /sys/module/e1000/parameters/copybreak
- *
- * echo will append a newline to the tail.
- *
- * It returns 0 if conversion is successful and *res is set to the converted
- * value, otherwise it returns -EINVAL and *res is set to 0.
- *
- * simple_strtoul just ignores the successive invalid characters and
- * return the converted value of prefix part of the string.
- */
-int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
-{
- char *tail;
- unsigned long val;
-
- *res = 0;
- if (!*cp)
- return -EINVAL;
-
- val = simple_strtoul(cp, &tail, base);
- if (tail == cp)
- return -EINVAL;
-
- if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
- *res = val;
- return 0;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(strict_strtoul);
-
-/**
- * strict_strtol - convert a string to a long strictly
- * @cp: The string to be converted
- * @base: The number base to use
- * @res: The converted result value
- *
- * strict_strtol is similiar to strict_strtoul, but it allows the first
- * character of a string is '-'.
- *
- * It returns 0 if conversion is successful and *res is set to the converted
- * value, otherwise it returns -EINVAL and *res is set to 0.
- */
-int strict_strtol(const char *cp, unsigned int base, long *res)
-{
- int ret;
- if (*cp == '-') {
- ret = strict_strtoul(cp + 1, base, (unsigned long *)res);
- if (!ret)
- *res = -(*res);
- } else {
- ret = strict_strtoul(cp, base, (unsigned long *)res);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(strict_strtol);
-
-/**
- * strict_strtoull - convert a string to an unsigned long long strictly
- * @cp: The string to be converted
- * @base: The number base to use
- * @res: The converted result value
- *
- * strict_strtoull converts a string to an unsigned long long only if the
- * string is really an unsigned long long string, any string containing
- * any invalid char at the tail will be rejected and -EINVAL is returned,
- * only a newline char at the tail is acceptible because people generally
- * change a module parameter in the following way:
- *
- * echo 1024 > /sys/module/e1000/parameters/copybreak
- *
- * echo will append a newline to the tail of the string.
- *
- * It returns 0 if conversion is successful and *res is set to the converted
- * value, otherwise it returns -EINVAL and *res is set to 0.
- *
- * simple_strtoull just ignores the successive invalid characters and
- * return the converted value of prefix part of the string.
- */
-int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
-{
- char *tail;
- unsigned long long val;
-
- *res = 0;
- if (!*cp)
- return -EINVAL;
-
- val = simple_strtoull(cp, &tail, base);
- if (tail == cp)
- return -EINVAL;
- if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
- *res = val;
- return 0;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(strict_strtoull);
-
-/**
- * strict_strtoll - convert a string to a long long strictly
- * @cp: The string to be converted
- * @base: The number base to use
- * @res: The converted result value
- *
- * strict_strtoll is similiar to strict_strtoull, but it allows the first
- * character of a string is '-'.
- *
- * It returns 0 if conversion is successful and *res is set to the converted
- * value, otherwise it returns -EINVAL and *res is set to 0.
- */
-int strict_strtoll(const char *cp, unsigned int base, long long *res)
-{
- int ret;
- if (*cp == '-') {
- ret = strict_strtoull(cp + 1, base, (unsigned long long *)res);
- if (!ret)
- *res = -(*res);
- } else {
- ret = strict_strtoull(cp, base, (unsigned long long *)res);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(strict_strtoll);
-
static noinline_for_stack
int skip_atoi(const char **s)
{
@@ -991,7 +850,7 @@ static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
- if (!ptr) {
+ if (!ptr && *fmt != 'K') {
/*
* Print (null) with the same width as a pointer so it makes
* tabular output look nice.
@@ -1047,16 +906,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(void *);
return string(buf, end, "pK-error", spec);
- } else if ((kptr_restrict == 0) ||
- (kptr_restrict == 1 &&
- has_capability_noaudit(current, CAP_SYSLOG)))
- break;
-
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(void *);
- spec.flags |= ZEROPAD;
}
- return number(buf, end, 0, spec);
+ if (!((kptr_restrict == 0) ||
+ (kptr_restrict == 1 &&
+ has_capability_noaudit(current, CAP_SYSLOG))))
+ ptr = NULL;
+ break;
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 46a31e5f49c..d63381e8e33 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -176,6 +176,7 @@ int zlib_deflateInit2(
deflate_state *s;
int noheader = 0;
deflate_workspace *mem;
+ char *next;
ush *overlay;
/* We overlay pending_buf and d_buf+l_buf. This works since the average
@@ -199,6 +200,21 @@ int zlib_deflateInit2(
strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
return Z_STREAM_ERROR;
}
+
+ /*
+ * Direct the workspace's pointers to the chunks that were allocated
+ * along with the deflate_workspace struct.
+ */
+ next = (char *) mem;
+ next += sizeof(*mem);
+ mem->window_memory = (Byte *) next;
+ next += zlib_deflate_window_memsize(windowBits);
+ mem->prev_memory = (Pos *) next;
+ next += zlib_deflate_prev_memsize(windowBits);
+ mem->head_memory = (Pos *) next;
+ next += zlib_deflate_head_memsize(memLevel);
+ mem->overlay_memory = next;
+
s = (deflate_state *) &(mem->deflate_memory);
strm->state = (struct internal_state *)s;
s->strm = strm;
@@ -1247,7 +1263,18 @@ static block_state deflate_slow(
return flush == Z_FINISH ? finish_done : block_done;
}
-int zlib_deflate_workspacesize(void)
+int zlib_deflate_workspacesize(int windowBits, int memLevel)
{
- return sizeof(deflate_workspace);
+ if (windowBits < 0) /* undocumented feature: suppress zlib header */
+ windowBits = -windowBits;
+
+ /* Since the return value is typically passed to vmalloc() unchecked... */
+ BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
+ windowBits > 15);
+
+ return sizeof(deflate_workspace)
+ + zlib_deflate_window_memsize(windowBits)
+ + zlib_deflate_prev_memsize(windowBits)
+ + zlib_deflate_head_memsize(memLevel)
+ + zlib_deflate_overlay_memsize(memLevel);
}
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index 6b15a909ca3..b640b6402e9 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -241,12 +241,21 @@ typedef struct deflate_state {
typedef struct deflate_workspace {
/* State memory for the deflator */
deflate_state deflate_memory;
- Byte window_memory[2 * (1 << MAX_WBITS)];
- Pos prev_memory[1 << MAX_WBITS];
- Pos head_memory[1 << (MAX_MEM_LEVEL + 7)];
- char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)];
+ Byte *window_memory;
+ Pos *prev_memory;
+ Pos *head_memory;
+ char *overlay_memory;
} deflate_workspace;
+#define zlib_deflate_window_memsize(windowBits) \
+ (2 * (1 << (windowBits)) * sizeof(Byte))
+#define zlib_deflate_prev_memsize(windowBits) \
+ ((1 << (windowBits)) * sizeof(Pos))
+#define zlib_deflate_head_memsize(memLevel) \
+ ((1 << ((memLevel)+7)) * sizeof(Pos))
+#define zlib_deflate_overlay_memsize(memLevel) \
+ ((1 << ((memLevel)+6)) * (sizeof(ush)+2))
+
/* Output a byte on the stream.
* IN assertion: there is enough room in pending_buf.
*/