aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2011-08-08 14:30:29 +0200
committerTakashi Iwai <tiwai@suse.de>2011-08-08 14:30:29 +0200
commit0a2d31b62dba9b5b92a38c67c9cc42630513662a (patch)
treef755d74ec85248de645e10c45ed1a2ed467530f6 /lib
parent8039290a91c5dc4414093c086987a5d7738fe2fd (diff)
parentdf944f66784e6d4f2f50739263a4947885d8b6ae (diff)
Merge branch 'fix/kconfig' into for-linus
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig17
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile7
-rw-r--r--lib/atomic64.c2
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/bitmap.c6
-rw-r--r--lib/checksum.c13
-rw-r--r--lib/cordic.c101
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crc8.c86
-rw-r--r--lib/debugobjects.c2
-rw-r--r--lib/dec_and_lock.c2
-rw-r--r--lib/devres.c2
-rw-r--r--lib/fault-inject.c156
-rw-r--r--lib/genalloc.c300
-rw-r--r--lib/idr.c67
-rw-r--r--lib/iomap.c4
-rw-r--r--lib/kobject.c26
-rw-r--r--lib/kstrtox.c5
-rw-r--r--lib/lcm.c1
-rw-r--r--lib/llist.c129
-rw-r--r--lib/md5.c95
-rw-r--r--lib/plist.c7
-rw-r--r--lib/radix-tree.c121
-rw-r--r--lib/sha1.c212
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/vsprintf.c30
-rw-r--r--lib/xz/xz_private.h2
29 files changed, 1115 insertions, 304 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 830181cc7a8..6c695ff9cab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -79,6 +79,13 @@ config LIBCRC32C
require M here. See Castagnoli93.
Module will be libcrc32c.
+config CRC8
+ tristate "CRC8 function"
+ help
+ This option provides CRC8 function. Drivers may select this
+ when they need to do cyclic redundancy check according CRC8
+ algorithm. Module will be called crc8.
+
config AUDIT_GENERIC
bool
depends on AUDIT && !AUDIT_ARCH
@@ -262,4 +269,14 @@ config AVERAGE
If unsure, say N.
+config CORDIC
+ tristate "Cordic function"
+ help
+ The option provides arithmetic function using cordic algorithm
+ so its calculations are in fixed point. Modules can select this
+ when they require this function. Module will be called cordic.
+
+config LLIST
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd373c8ee94..c0cb9c4bc46 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -227,7 +227,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
- default DETECT_SOFTLOCKUP
+ default LOCKUP_DETECTOR
help
Say Y here to enable the kernel to detect "hung tasks",
which are bugs that cause the task to be stuck in
@@ -648,12 +648,15 @@ config TRACE_IRQFLAGS
Enables hooks to interrupt enabling and disabling for
either tracing or lock debugging.
-config DEBUG_SPINLOCK_SLEEP
- bool "Spinlock debugging: sleep-inside-spinlock checking"
+config DEBUG_ATOMIC_SLEEP
+ bool "Sleep inside atomic section checking"
+ select PREEMPT_COUNT
depends on DEBUG_KERNEL
help
If you say Y here, various routines which may sleep will become very
- noisy if they are called with a spinlock held.
+ noisy if they are called inside atomic sections: when a spinlock is
+ held, inside an rcu read side critical section, inside preempt disabled
+ sections, inside an interrupt, etc...
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
@@ -866,7 +869,7 @@ config BOOT_PRINTK_DELAY
system, and then set "lpj=M" before setting "boot_delay=N".
NOTE: Using this option may adversely affect SMP systems.
I.e., processors other than the first one may not boot up.
- BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect
+ BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect
what it believes to be lockup conditions.
config RCU_TORTURE_TEST
diff --git a/lib/Makefile b/lib/Makefile
index 6b597fdb189..d5d175c8a6c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -10,7 +10,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o prio_tree.o \
- sha1.o irq_regs.o reciprocal_div.o argv_split.o \
+ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o find_next_bit.o
@@ -61,6 +61,7 @@ obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
+obj-$(CONFIG_CRC8) += crc8.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
@@ -112,6 +113,10 @@ obj-$(CONFIG_AVERAGE) += average.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+obj-$(CONFIG_CORDIC) += cordic.o
+
+obj-$(CONFIG_LLIST) += llist.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/atomic64.c b/lib/atomic64.c
index a21c12bc727..e12ae0dd08a 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -14,7 +14,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
* We use a hashed array of spinlocks to provide exclusive access
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 44524cc8c32..0c33cde2a1e 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -10,7 +10,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
static __init int test_atomic64(void)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 41baf02924e..2f4412e4d07 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
-
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
@@ -572,7 +570,7 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
* __bitmap_parselist - convert list format ASCII string to bitmap
- * @bp: read nul-terminated user string from this buffer
+ * @buf: read nul-terminated user string from this buffer
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @is_user: location of buffer, 0 indicates kernel space
@@ -756,7 +754,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
-static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
+int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
{
int pos = 0;
diff --git a/lib/checksum.c b/lib/checksum.c
index 097508732f3..8df2f91e6d9 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -49,7 +49,7 @@ static inline unsigned short from32to16(unsigned int x)
static unsigned int do_csum(const unsigned char *buff, int len)
{
- int odd, count;
+ int odd;
unsigned int result = 0;
if (len <= 0)
@@ -64,25 +64,22 @@ static unsigned int do_csum(const unsigned char *buff, int len)
len--;
buff++;
}
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
+ if (len >= 2) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff;
- count--;
len -= 2;
buff += 2;
}
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
+ if (len >= 4) {
+ const unsigned char *end = buff + ((unsigned)len & ~3);
unsigned int carry = 0;
do {
unsigned int w = *(unsigned int *) buff;
- count--;
buff += 4;
result += carry;
result += w;
carry = (w > result);
- } while (count);
+ } while (buff < end);
result += carry;
result = (result & 0xffff) + (result >> 16);
}
diff --git a/lib/cordic.c b/lib/cordic.c
new file mode 100644
index 00000000000..aa27a88d7e0
--- /dev/null
+++ b/lib/cordic.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/cordic.h>
+
+#define CORDIC_ANGLE_GEN 39797
+#define CORDIC_PRECISION_SHIFT 16
+#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
+
+#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define FLOAT(X) (((X) >= 0) \
+ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
+static const s32 arctan_table[] = {
+ 2949120,
+ 1740967,
+ 919879,
+ 466945,
+ 234379,
+ 117304,
+ 58666,
+ 29335,
+ 14668,
+ 7334,
+ 3667,
+ 1833,
+ 917,
+ 458,
+ 229,
+ 115,
+ 57,
+ 29
+};
+
+/*
+ * cordic_calc_iq() - calculates the i/q coordinate for given angle
+ *
+ * theta: angle in degrees for which i/q coordinate is to be calculated
+ * coord: function output parameter holding the i/q coordinate
+ */
+struct cordic_iq cordic_calc_iq(s32 theta)
+{
+ struct cordic_iq coord;
+ s32 angle, valtmp;
+ unsigned iter;
+ int signx = 1;
+ int signtheta;
+
+ coord.i = CORDIC_ANGLE_GEN;
+ coord.q = 0;
+ angle = 0;
+
+ theta = FIXED(theta);
+ signtheta = (theta < 0) ? -1 : 1;
+ theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) -
+ FIXED(180) * signtheta;
+
+ if (FLOAT(theta) > 90) {
+ theta -= FIXED(180);
+ signx = -1;
+ } else if (FLOAT(theta) < -90) {
+ theta += FIXED(180);
+ signx = -1;
+ }
+
+ for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
+ if (theta > angle) {
+ valtmp = coord.i - (coord.q >> iter);
+ coord.q += (coord.i >> iter);
+ angle += arctan_table[iter];
+ } else {
+ valtmp = coord.i + (coord.q >> iter);
+ coord.q -= (coord.i >> iter);
+ angle -= arctan_table[iter];
+ }
+ coord.i = valtmp;
+ }
+
+ coord.i *= signx;
+ coord.q *= signx;
+ return coord;
+}
+EXPORT_SYMBOL(cordic_calc_iq);
+
+MODULE_DESCRIPTION("Cordic functions");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 05d6aca7fc1..af3e5817de9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -30,7 +30,7 @@ int __any_online_cpu(const cpumask_t *mask)
{
int cpu;
- for_each_cpu_mask(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
if (cpu_online(cpu))
break;
}
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var_node);
*/
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
- return alloc_cpumask_var_node(mask, flags, numa_node_id());
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_cpumask_var);
diff --git a/lib/crc32.c b/lib/crc32.c
index 4855995fcde..a6e633a48ce 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -26,7 +26,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "crc32defs.h"
#if CRC_LE_BITS == 8
# define tole(x) __constant_cpu_to_le32(x)
diff --git a/lib/crc8.c b/lib/crc8.c
new file mode 100644
index 00000000000..87b59cafdb8
--- /dev/null
+++ b/lib/crc8.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crc8.h>
+#include <linux/printk.h>
+
+/*
+ * crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
+ *
+ * table: table to be filled.
+ * polynomial: polynomial for which table is to be filled.
+ */
+void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
+{
+ int i, j;
+ const u8 msbit = 0x80;
+ u8 t = msbit;
+
+ table[0] = 0;
+
+ for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) {
+ t = (t << 1) ^ (t & msbit ? polynomial : 0);
+ for (j = 0; j < i; j++)
+ table[i+j] = table[j] ^ t;
+ }
+}
+EXPORT_SYMBOL(crc8_populate_msb);
+
+/*
+ * crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
+ *
+ * table: table to be filled.
+ * polynomial: polynomial for which table is to be filled.
+ */
+void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
+{
+ int i, j;
+ u8 t = 1;
+
+ table[0] = 0;
+
+ for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) {
+ t = (t >> 1) ^ (t & 1 ? polynomial : 0);
+ for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i)
+ table[i+j] = table[j] ^ t;
+ }
+}
+EXPORT_SYMBOL(crc8_populate_lsb);
+
+/*
+ * crc8 - calculate a crc8 over the given input data.
+ *
+ * table: crc table used for calculation.
+ * pdata: pointer to data buffer.
+ * nbytes: number of bytes in data buffer.
+ * crc: previous returned crc8 value.
+ */
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
+{
+ /* loop over the buffer data */
+ while (nbytes-- > 0)
+ crc = table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+EXPORT_SYMBOL(crc8);
+
+MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 9d86e45086f..a78b7c6e042 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -198,7 +198,7 @@ static void free_object(struct debug_obj *obj)
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
- sched = !work_pending(&debug_obj_work);
+ sched = keventd_up() && !work_pending(&debug_obj_work);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index e73822aa6e9..b5257725daa 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,6 +1,6 @@
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
* This is an implementation of the notion of "decrement a
diff --git a/lib/devres.c b/lib/devres.c
index 6efddf53b90..7c0e953a748 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -79,9 +79,9 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
*/
void devm_iounmap(struct device *dev, void __iomem *addr)
{
- iounmap(addr);
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
(void *)addr));
+ iounmap(addr);
}
EXPORT_SYMBOL(devm_iounmap);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 7e65af70635..f193b779644 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
-#include <linux/kallsyms.h>
#include <linux/fault-inject.h>
/*
@@ -140,16 +139,6 @@ static int debugfs_ul_set(void *data, u64 val)
return 0;
}
-#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-static int debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
-{
- *(unsigned long *)data =
- val < MAX_STACK_TRACE_DEPTH ?
- val : MAX_STACK_TRACE_DEPTH;
- return 0;
-}
-#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
-
static int debugfs_ul_get(void *data, u64 *val)
{
*val = *(unsigned long *)data;
@@ -165,16 +154,26 @@ static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
- debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
-static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
+static int debugfs_stacktrace_depth_set(void *data, u64 val)
+{
+ *(unsigned long *)data =
+ min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
+ debugfs_stacktrace_depth_set, "%llu\n");
+
+static struct dentry *debugfs_create_stacktrace_depth(
const char *name, mode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value,
- &fops_ul_MAX_STACK_TRACE_DEPTH);
+ &fops_stacktrace_depth);
}
+
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
static int debugfs_atomic_t_set(void *data, u64 val)
@@ -198,118 +197,51 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
}
-void cleanup_fault_attr_dentries(struct fault_attr *attr)
-{
- debugfs_remove(attr->dentries.probability_file);
- attr->dentries.probability_file = NULL;
-
- debugfs_remove(attr->dentries.interval_file);
- attr->dentries.interval_file = NULL;
-
- debugfs_remove(attr->dentries.times_file);
- attr->dentries.times_file = NULL;
-
- debugfs_remove(attr->dentries.space_file);
- attr->dentries.space_file = NULL;
-
- debugfs_remove(attr->dentries.verbose_file);
- attr->dentries.verbose_file = NULL;
-
- debugfs_remove(attr->dentries.task_filter_file);
- attr->dentries.task_filter_file = NULL;
-
-#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-
- debugfs_remove(attr->dentries.stacktrace_depth_file);
- attr->dentries.stacktrace_depth_file = NULL;
-
- debugfs_remove(attr->dentries.require_start_file);
- attr->dentries.require_start_file = NULL;
-
- debugfs_remove(attr->dentries.require_end_file);
- attr->dentries.require_end_file = NULL;
-
- debugfs_remove(attr->dentries.reject_start_file);
- attr->dentries.reject_start_file = NULL;
-
- debugfs_remove(attr->dentries.reject_end_file);
- attr->dentries.reject_end_file = NULL;
-
-#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
-
- if (attr->dentries.dir)
- WARN_ON(!simple_empty(attr->dentries.dir));
-
- debugfs_remove(attr->dentries.dir);
- attr->dentries.dir = NULL;
-}
-
-int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
+struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr)
{
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
- memset(&attr->dentries, 0, sizeof(attr->dentries));
-
- dir = debugfs_create_dir(name, NULL);
+ dir = debugfs_create_dir(name, parent);
if (!dir)
- goto fail;
- attr->dentries.dir = dir;
-
- attr->dentries.probability_file =
- debugfs_create_ul("probability", mode, dir, &attr->probability);
+ return ERR_PTR(-ENOMEM);
- attr->dentries.interval_file =
- debugfs_create_ul("interval", mode, dir, &attr->interval);
-
- attr->dentries.times_file =
- debugfs_create_atomic_t("times", mode, dir, &attr->times);
-
- attr->dentries.space_file =
- debugfs_create_atomic_t("space", mode, dir, &attr->space);
-
- attr->dentries.verbose_file =
- debugfs_create_ul("verbose", mode, dir, &attr->verbose);
-
- attr->dentries.task_filter_file = debugfs_create_bool("task-filter",
- mode, dir, &attr->task_filter);
-
- if (!attr->dentries.probability_file || !attr->dentries.interval_file ||
- !attr->dentries.times_file || !attr->dentries.space_file ||
- !attr->dentries.verbose_file || !attr->dentries.task_filter_file)
+ if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
+ goto fail;
+ if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
+ goto fail;
+ if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
+ goto fail;
+ if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
+ goto fail;
+ if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
+ goto fail;
+ if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
goto fail;
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
- attr->dentries.stacktrace_depth_file =
- debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
- "stacktrace-depth", mode, dir, &attr->stacktrace_depth);
-
- attr->dentries.require_start_file =
- debugfs_create_ul("require-start", mode, dir, &attr->require_start);
-
- attr->dentries.require_end_file =
- debugfs_create_ul("require-end", mode, dir, &attr->require_end);
-
- attr->dentries.reject_start_file =
- debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
-
- attr->dentries.reject_end_file =
- debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
-
- if (!attr->dentries.stacktrace_depth_file ||
- !attr->dentries.require_start_file ||
- !attr->dentries.require_end_file ||
- !attr->dentries.reject_start_file ||
- !attr->dentries.reject_end_file)
+ if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
+ &attr->stacktrace_depth))
+ goto fail;
+ if (!debugfs_create_ul("require-start", mode, dir,
+ &attr->require_start))
+ goto fail;
+ if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
+ goto fail;
+ if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
+ goto fail;
+ if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
goto fail;
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
- return 0;
+ return dir;
fail:
- cleanup_fault_attr_dentries(attr);
- return -ENOMEM;
+ debugfs_remove_recursive(dir);
+
+ return ERR_PTR(-ENOMEM);
}
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 577ddf80597..f352cc42f4f 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -1,8 +1,26 @@
/*
- * Basic general purpose allocator for managing special purpose memory
- * not managed by the regular kmalloc/kfree interface.
- * Uses for this includes on-device special memory, uncached memory
- * etc.
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
@@ -13,8 +31,109 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitmap.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
#include <linux/genalloc.h>
+static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
+{
+ unsigned long val, nval;
+
+ nval = *addr;
+ do {
+ val = nval;
+ if (val & mask_to_set)
+ return -EBUSY;
+ cpu_relax();
+ } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
+
+ return 0;
+}
+
+static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
+{
+ unsigned long val, nval;
+
+ nval = *addr;
+ do {
+ val = nval;
+ if ((val & mask_to_clear) != mask_to_clear)
+ return -EBUSY;
+ cpu_relax();
+ } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
+
+ return 0;
+}
+
+/*
+ * bitmap_set_ll - set the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Set @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users set the same bit, one user will return remain bits, otherwise
+ * return 0.
+ */
+static int bitmap_set_ll(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ }
+
+ return 0;
+}
+
+/*
+ * bitmap_clear_ll - clear the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Clear @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users clear the same bit, one user will return remain bits,
+ * otherwise return 0.
+ */
+static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ }
+
+ return 0;
+}
/**
* gen_pool_create - create a new special memory pool
@@ -30,7 +149,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
if (pool != NULL) {
- rwlock_init(&pool->lock);
+ spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
}
@@ -63,14 +182,14 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
if (unlikely(chunk == NULL))
return -ENOMEM;
- spin_lock_init(&chunk->lock);
chunk->phys_addr = phys;
chunk->start_addr = virt;
chunk->end_addr = virt + size;
+ atomic_set(&chunk->avail, size);
- write_lock(&pool->lock);
- list_add(&chunk->next_chunk, &pool->chunks);
- write_unlock(&pool->lock);
+ spin_lock(&pool->lock);
+ list_add_rcu(&chunk->next_chunk, &pool->chunks);
+ spin_unlock(&pool->lock);
return 0;
}
@@ -85,19 +204,19 @@ EXPORT_SYMBOL(gen_pool_add_virt);
*/
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
{
- struct list_head *_chunk;
struct gen_pool_chunk *chunk;
+ phys_addr_t paddr = -1;
- read_lock(&pool->lock);
- list_for_each(_chunk, &pool->chunks) {
- chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
-
- if (addr >= chunk->start_addr && addr < chunk->end_addr)
- return chunk->phys_addr + addr - chunk->start_addr;
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (addr >= chunk->start_addr && addr < chunk->end_addr) {