diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 11 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 29 | ||||
| -rw-r--r-- | lib/Makefile | 10 | ||||
| -rw-r--r-- | lib/crc16.c | 67 | ||||
| -rw-r--r-- | lib/crc32.c | 2 | ||||
| -rw-r--r-- | lib/dec_and_lock.c | 3 | ||||
| -rw-r--r-- | lib/idr.c | 2 | ||||
| -rw-r--r-- | lib/inflate.c | 16 | ||||
| -rw-r--r-- | lib/kernel_lock.c | 3 | ||||
| -rw-r--r-- | lib/klist.c | 26 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 4 | ||||
| -rw-r--r-- | lib/radix-tree.c | 180 | ||||
| -rw-r--r-- | lib/semaphore-sleepers.c | 177 | ||||
| -rw-r--r-- | lib/sort.c | 5 | ||||
| -rw-r--r-- | lib/spinlock_debug.c | 257 | ||||
| -rw-r--r-- | lib/ts_bm.c | 185 | ||||
| -rw-r--r-- | lib/vsprintf.c | 5 |
17 files changed, 864 insertions, 118 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index eeb429a5215..3de93357f5a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -12,6 +12,14 @@ config CRC_CCITT the kernel tree does. Such modules that use library CRC-CCITT functions require M here. +config CRC16 + tristate "CRC16 functions" + help + This option is provided for the case where no in-kernel-tree + modules require CRC16 functions, but a module built outside + the kernel tree does. Such modules that use library CRC16 + functions require M here. + config CRC32 tristate "CRC32 functions" default y @@ -72,6 +80,9 @@ config TEXTSEARCH config TEXTSEARCH_KMP tristate +config TEXTSEARCH_BM + tristate + config TEXTSEARCH_FSM tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0c421295e61..016e89a44ac 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -46,6 +46,25 @@ config LOG_BUF_SHIFT 13 => 8 KB 12 => 4 KB +config DETECT_SOFTLOCKUP + bool "Detect Soft Lockups" + depends on DEBUG_KERNEL + default y + help + Say Y here to enable the kernel to detect "soft lockups", + which are bugs that cause the kernel to loop in kernel + mode for more than 10 seconds, without giving other tasks a + chance to run. + + When a soft-lockup is detected, the kernel will print the + current stack trace (which you should report), but the + system will stay locked up. This feature has negligible + overhead. + + (Note that "hard lockups" are separate type of bugs that + can be detected via the NMI-watchdog, on platforms that + support it.) + config SCHEDSTATS bool "Collect scheduler statistics" depends on DEBUG_KERNEL && PROC_FS @@ -141,7 +160,7 @@ config DEBUG_IOREMAP config DEBUG_FS bool "Debug Filesystem" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && SYSFS help debugfs is a virtual file system that kernel developers use to put debugging files into. Enable this option to be able to read and @@ -151,11 +170,11 @@ config DEBUG_FS config FRAME_POINTER bool "Compile the kernel with frame pointers" - depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV || UML) + depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) default y if DEBUG_INFO && UML help If you say Y here the resulting kernel image will be slightly larger - and slower, but it will give very useful debugging information. - If you don't debug the kernel, you can say N, but we may not be able - to solve problems without frame pointers. + and slower, but it might give very useful debugging information + on some architectures or you use external debuggers. + If you don't debug the kernel, you can say N. diff --git a/lib/Makefile b/lib/Makefile index beed1585294..44a46750690 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -5,28 +5,31 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ - sha1.o halfmd4.o + sha1.o lib-y += kobject.o kref.o kobject_uevent.o klist.o -obj-y += sort.o parser.o +obj-y += sort.o parser.o halfmd4.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG CFLAGS_kobject_uevent.o += -DDEBUG endif +obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o -ifneq ($(CONFIG_HAVE_DEC_LOCK),y) +ifneq ($(CONFIG_HAVE_DEC_LOCK),y) lib-y += dec_and_lock.o endif obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o +obj-$(CONFIG_CRC16) += crc16.o obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_GENERIC_IOMAP) += iomap.o @@ -38,6 +41,7 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o +obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o hostprogs-y := gen_crc32table diff --git a/lib/crc16.c b/lib/crc16.c new file mode 100644 index 00000000000..011fe573c66 --- /dev/null +++ b/lib/crc16.c @@ -0,0 +1,67 @@ +/* + * crc16.c + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/crc16.h> + +/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ +u16 const crc16_table[256] = { + 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, + 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, + 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, + 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, + 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, + 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, + 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, + 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, + 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, + 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, + 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, + 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, + 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, + 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, + 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, + 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, + 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, + 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, + 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, + 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, + 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, + 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, + 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, + 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, + 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, + 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, + 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, + 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, + 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, + 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, + 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, + 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 +}; +EXPORT_SYMBOL(crc16_table); + +/** + * Compute the CRC-16 for the data buffer + * + * @param crc previous CRC value + * @param buffer data pointer + * @param len number of bytes in the buffer + * @return the updated CRC value + */ +u16 crc16(u16 crc, u8 const *buffer, size_t len) +{ + while (len--) + crc = crc16_byte(crc, *buffer++); + return crc; +} +EXPORT_SYMBOL(crc16); + +MODULE_DESCRIPTION("CRC16 calculations"); +MODULE_LICENSE("GPL"); + diff --git a/lib/crc32.c b/lib/crc32.c index 58b222783f9..065198f98b3 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -473,7 +473,7 @@ static u32 test_step(u32 init, unsigned char *buf, size_t len) init = bitreverse(init); crc2 = bitreverse(crc1); if (crc1 != bitreverse(crc2)) - printf("\nBit reversal fail: 0x%08x -> %0x08x -> 0x%08x\n", + printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n", crc1, crc2, bitreverse(crc2)); crc1 = crc32_le(init, buf, len); if (crc1 != crc2) diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 6658d81e183..2377af057d0 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -25,8 +25,6 @@ * this is trivially done efficiently using a load-locked * store-conditional approach, for example. */ - -#ifndef ATOMIC_DEC_AND_LOCK int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { spin_lock(lock); @@ -37,4 +35,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) } EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif diff --git a/lib/idr.c b/lib/idr.c index c5be889de44..6415d053e2b 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -207,7 +207,7 @@ build_up: } /** - * idr_get_new_above - allocate new idr entry above a start id + * idr_get_new_above - allocate new idr entry above or equal to a start id * @idp: idr handle * @ptr: pointer you want associated with the ide * @start_id: id to start search at diff --git a/lib/inflate.c b/lib/inflate.c index 75e7d303c72..6db6e98d163 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -326,7 +326,7 @@ DEBG("huft1 "); { *t = (struct huft *)NULL; *m = 0; - return 0; + return 2; } DEBG("huft2 "); @@ -374,6 +374,7 @@ DEBG("huft5 "); if ((j = *p++) != 0) v[x[j]++] = i; } while (++i < n); + n = x[g]; /* set n to length of v */ DEBG("h6 "); @@ -410,12 +411,13 @@ DEBG1("1 "); DEBG1("2 "); f -= a + 1; /* deduct codes from patterns left */ xp = c + k; - while (++j < z) /* try smaller tables up to z bits */ - { - if ((f <<= 1) <= *++xp) - break; /* enough codes to use up j bits */ - f -= *xp; /* else deduct codes from patterns */ - } + if (j < z) + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } } DEBG1("3 "); z = 1 << j; /* table entries for j-bit table */ diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index bd2bc5d887b..cb5490ec00f 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -177,8 +177,7 @@ static inline void __lock_kernel(void) static inline void __unlock_kernel(void) { - _raw_spin_unlock(&kernel_flag); - preempt_enable(); + spin_unlock(&kernel_flag); } /* diff --git a/lib/klist.c b/lib/klist.c index 738ab810160..bb2f3551d50 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -42,12 +42,23 @@ /** * klist_init - Initialize a klist structure. * @k: The klist we're initializing. + * @get: The get function for the embedding object (NULL if none) + * @put: The put function for the embedding object (NULL if none) + * + * Initialises the klist structure. If the klist_node structures are + * going to be embedded in refcounted objects (necessary for safe + * deletion) then the get/put arguments are used to initialise + * functions that take and release references on the embedding + * objects. */ -void klist_init(struct klist * k) +void klist_init(struct klist * k, void (*get)(struct klist_node *), + void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); spin_lock_init(&k->k_lock); + k->get = get; + k->put = put; } EXPORT_SYMBOL_GPL(klist_init); @@ -74,16 +85,18 @@ static void klist_node_init(struct klist * k, struct klist_node * n) init_completion(&n->n_removed); kref_init(&n->n_ref); n->n_klist = k; + if (k->get) + k->get(n); } /** * klist_add_head - Initialize a klist_node and add it to front. - * @k: klist it's going on. * @n: node we're adding. + * @k: klist it's going on. */ -void klist_add_head(struct klist * k, struct klist_node * n) +void klist_add_head(struct klist_node * n, struct klist * k) { klist_node_init(k, n); add_head(k, n); @@ -94,11 +107,11 @@ EXPORT_SYMBOL_GPL(klist_add_head); /** * klist_add_tail - Initialize a klist_node and add it to back. - * @k: klist it's going on. * @n: node we're adding. + * @k: klist it's going on. */ -void klist_add_tail(struct klist * k, struct klist_node * n) +void klist_add_tail(struct klist_node * n, struct klist * k) { klist_node_init(k, n); add_tail(k, n); @@ -110,9 +123,12 @@ EXPORT_SYMBOL_GPL(klist_add_tail); static void klist_release(struct kref * kref) { struct klist_node * n = container_of(kref, struct klist_node, n_ref); + void (*put)(struct klist_node *) = n->n_klist->put; list_del(&n->n_node); complete(&n->n_removed); n->n_klist = NULL; + if (put) + put(n); } static int klist_dec_and_del(struct klist_node * n) diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 8e49d21057e..04ca4429ddf 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -93,6 +93,7 @@ static int send_uevent(const char *signal, const char *obj, } } + NETLINK_CB(skb).dst_group = 1; return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask); } @@ -153,7 +154,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_atomic); static int __init kobject_uevent_init(void) { - uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, NULL); + uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, + THIS_MODULE); if (!uevent_sock) { printk(KERN_ERR diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 04d664377f2..6a8bc6e0643 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1,6 +1,7 @@ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig + * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -51,14 +52,14 @@ struct radix_tree_node { }; struct radix_tree_path { - struct radix_tree_node *node, **slot; + struct radix_tree_node *node; int offset; }; #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define RADIX_TREE_MAX_PATH (RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2) -static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH]; +static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly; /* * Radix tree node cache. @@ -109,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node) * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. */ -int radix_tree_preload(int gfp_mask) +int radix_tree_preload(unsigned int __nocast gfp_mask) { struct radix_tree_preload *rtp; struct radix_tree_node *node; @@ -227,7 +228,7 @@ out: int radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item) { - struct radix_tree_node *node = NULL, *tmp, **slot; + struct radix_tree_node *node = NULL, *slot; unsigned int height, shift; int offset; int error; @@ -240,38 +241,42 @@ int radix_tree_insert(struct radix_tree_root *root, return error; } - slot = &root->rnode; + slot = root->rnode; height = root->height; shift = (height-1) * RADIX_TREE_MAP_SHIFT; offset = 0; /* uninitialised var warning */ while (height > 0) { - if (*slot == NULL) { + if (slot == NULL) { /* Have to add a child node. */ - if (!(tmp = radix_tree_node_alloc(root))) + if (!(slot = radix_tree_node_alloc(root))) return -ENOMEM; - *slot = tmp; - if (node) + if (node) { + node->slots[offset] = slot; node->count++; + } else + root->rnode = slot; } /* Go a level down */ offset = (index >> shift) & RADIX_TREE_MAP_MASK; - node = *slot; - slot = (struct radix_tree_node **)(node->slots + offset); + node = slot; + slot = node->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; } - if (*slot != NULL) + if (slot != NULL) return -EEXIST; + if (node) { node->count++; + node->slots[offset] = item; BUG_ON(tag_get(node, 0, offset)); BUG_ON(tag_get(node, 1, offset)); - } + } else + root->rnode = item; - *slot = item; return 0; } EXPORT_SYMBOL(radix_tree_insert); @@ -286,27 +291,25 @@ EXPORT_SYMBOL(radix_tree_insert); void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) { unsigned int height, shift; - struct radix_tree_node **slot; + struct radix_tree_node *slot; height = root->height; if (index > radix_tree_maxindex(height)) return NULL; shift = (height-1) * RADIX_TREE_MAP_SHIFT; - slot = &root->rnode; + slot = root->rnode; while (height > 0) { - if (*slot == NULL) + if (slot == NULL) return NULL; - slot = (struct radix_tree_node **) - ((*slot)->slots + - ((index >> shift) & RADIX_TREE_MAP_MASK)); + slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK]; shift -= RADIX_TREE_MAP_SHIFT; height--; } - return *slot; + return slot; } EXPORT_SYMBOL(radix_tree_lookup); @@ -326,27 +329,27 @@ void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, int tag) { unsigned int height, shift; - struct radix_tree_node **slot; + struct radix_tree_node *slot; height = root->height; if (index > radix_tree_maxindex(height)) return NULL; shift = (height - 1) * RADIX_TREE_MAP_SHIFT; - slot = &root->rnode; + slot = root->rnode; while (height > 0) { int offset; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - tag_set(*slot, tag, offset); - slot = (struct radix_tree_node **)((*slot)->slots + offset); - BUG_ON(*slot == NULL); + tag_set(slot, tag, offset); + slot = slot->slots[offset]; + BUG_ON(slot == NULL); shift -= RADIX_TREE_MAP_SHIFT; height--; } - return *slot; + return slot; } EXPORT_SYMBOL(radix_tree_tag_set); @@ -367,6 +370,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, int tag) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; + struct radix_tree_node *slot; unsigned int height, shift; void *ret = NULL; @@ -376,38 +380,37 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; - pathp->slot = &root->rnode; + slot = root->rnode; while (height > 0) { int offset; - if (*pathp->slot == NULL) + if (slot == NULL) goto out; offset = (index >> shift) & RADIX_TREE_MAP_MASK; pathp[1].offset = offset; - pathp[1].node = *pathp[0].slot; - pathp[1].slot = (struct radix_tree_node **) - (pathp[1].node->slots + offset); + pathp[1].node = slot; + slot = slot->slots[offset]; pathp++; shift -= RADIX_TREE_MAP_SHIFT; height--; } - ret = *pathp[0].slot; + ret = slot; if (ret == NULL) goto out; do { int idx; - tag_clear(pathp[0].node, tag, pathp[0].offset); + tag_clear(pathp->node, tag, pathp->offset); for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (pathp[0].node->tags[tag][idx]) + if (pathp->node->tags[tag][idx]) goto out; } pathp--; - } while (pathp[0].node); + } while (pathp->node); out: return ret; } @@ -415,21 +418,22 @@ EXPORT_SYMBOL(radix_tree_tag_clear); #ifndef __KERNEL__ /* Only the test harness uses this at present */ /** - * radix_tree_tag_get - get a tag on a radix tree node - * @root: radix tree root - * @index: index key - * @tag: tag index + * radix_tree_tag_get - get a tag on a radix tree node + * @root: radix tree root + * @index: index key + * @tag: tag index * - * Return the search tag corresponging to @index in the radix tree. + * Return values: * - * Returns zero if the tag is unset, or if there is no corresponding item - * in the tree. + * 0: tag not present + * 1: tag present, set + * -1: tag present, unset */ int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, int tag) { unsigned int height, shift; - struct radix_tree_node **slot; + struct radix_tree_node *slot; int saw_unset_tag = 0; height = root->height; @@ -437,12 +441,12 @@ int radix_tree_tag_get(struct radix_tree_root *root, return 0; shift = (height - 1) * RADIX_TREE_MAP_SHIFT; - slot = &root->rnode; + slot = root->rnode; for ( ; ; ) { int offset; - if (*slot == NULL) + if (slot == NULL) return 0; offset = (index >> shift) & RADIX_TREE_MAP_MASK; @@ -451,15 +455,15 @@ int radix_tree_tag_get(struct radix_tree_root *root, * This is just a debug check. Later, we can bale as soon as * we see an unset tag. */ - if (!tag_get(*slot, tag, offset)) + if (!tag_get(slot, tag, offset)) saw_unset_tag = 1; if (height == 1) { - int ret = tag_get(*slot, tag, offset); + int ret = tag_get(slot, tag, offset); BUG_ON(ret && saw_unset_tag); - return ret; + return ret ? 1 : -1; } - slot = (struct radix_tree_node **)((*slot)->slots + offset); + slot = slot->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -472,17 +476,21 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index, unsigned int max_items, unsigned long *next_index) { unsigned int nr_found = 0; - unsigned int shift; - unsigned int height = root->height; + unsigned int shift, height; struct radix_tree_node *slot; + unsigned long i; + + height = root->height; + if (height == 0) + goto out; shift = (height-1) * RADIX_TREE_MAP_SHIFT; slot = root->rnode; - while (height > 0) { - unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK; + for ( ; height > 1; height--) { - for ( ; i < RADIX_TREE_MAP_SIZE; i++) { + for (i = (index >> shift) & RADIX_TREE_MAP_MASK ; + i < RADIX_TREE_MAP_SIZE; i++) { if (slot->slots[i] != NULL) break; index &= ~((1UL << shift) - 1); @@ -492,22 +500,20 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index, } if (i == RADIX_TREE_MAP_SIZE) goto out; - height--; - if (height == 0) { /* Bottom level: grab some items */ - unsigned long j = index & RADIX_TREE_MAP_MASK; - for ( ; j < RADIX_TREE_MAP_SIZE; j++) { - index++; - if (slot->slots[j]) { - results[nr_found++] = slot->slots[j]; - if (nr_found == max_items) - goto out; - } - } - } shift -= RADIX_TREE_MAP_SHIFT; slot = slot->slots[i]; } + + /* Bottom level: grab some items */ + for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { + index++; + if (slot->slots[i]) { + results[nr_found++] = slot->slots[i]; + if (nr_found == max_items) + goto out; + } + } out: *next_index = index; return nr_found; @@ -655,6 +661,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; struct radix_tree_path *orig_pathp; + struct radix_tree_node *slot; unsigned int height, shift; void *ret = NULL; char tags[RADIX_TREE_TAGS]; @@ -666,25 +673,23 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; - pathp->slot = &root->rnode; + slot = root->rnode; - while (height > 0) { + for ( ; height > 0; height--) { int offset; - if (*pathp->slot == NULL) + if (slot == NULL) goto out; offset = (index >> shift) & RADIX_TREE_MAP_MASK; pathp[1].offset = offset; - pathp[1].node = *pathp[0].slot; - pathp[1].slot = (struct radix_tree_node **) - (pathp[1].node->slots + offset); + pathp[1].node = slot; + slot = slot->slots[offset]; pathp++; shift -= RADIX_TREE_MAP_SHIFT; - height--; } - ret = *pathp[0].slot; + ret = slot; if (ret == NULL) goto out; @@ -704,10 +709,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) if (tags[tag]) continue; - tag_clear(pathp[0].node, tag, pathp[0].offset); + tag_clear(pathp->node, tag, pathp->offset); for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (pathp[0].node->tags[tag][idx]) { + if (pathp->node->tags[tag][idx]) { tags[tag] = 1; nr_cleared_tags--; break; @@ -715,18 +720,19 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) } } pathp--; - } while (pathp[0].node && nr_cleared_tags); + } while (pathp->node && nr_cleared_tags); - pathp = orig_pathp; - *pathp[0].slot = NULL; - while (pathp[0].node && --pathp[0].node->count == 0) { - pathp--; - BUG_ON(*pathp[0].slot == NULL); - *pathp[0].slot = NULL; - radix_tree_node_free(pathp[1].node); + /* Now free the nodes we do not need anymore */ + for (pathp = orig_pathp; pathp->node; pathp--) { + pathp->node->slots[pathp->offset] = NULL; + if (--pathp->node->count) + goto out; + + /* Node with zero slots in use so free it */ + radix_tree_node_free(pathp->node); } - if (root->rnode == NULL) - root->height = 0; + root->rnode = NULL; + root->height = 0; out: return ret; } diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c new file mode 100644 index 00000000000..4d5f18889fa --- /dev/null +++ b/lib/semaphore-sleepers.c @@ -0,0 +1,177 @@ +/* + * i386 and x86-64 semaphore implementation. + * + * (C) Copyright 1999 Linus Torvalds + * + * Portions Copyright 1999 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> + */ +#include <linux/config.h> +#include <linux/sched.h> +#include <linux/err.h> +#include <linux/init.h> +#include <asm/semaphore.h> + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to acquire the semaphore, while the "sleeping" + * variable is a count of such acquires. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * "sleeping" and the contention routine ordering is protected + * by the spinlock in the semaphore's waitqueue head. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in <asm/semaphore.h> + * where we want to avoid any extra jumps and calls. + */ + +/* + * Logic: + * - only on a boundary condition do we need to care. When we go + * from a negative count to a non-negative, we wake people up. + * - when we go from a non-negative count to a negative do we + * (a) synchronize with the "sleeper" count and (b) make sure + * that we're on the wakeup list before we synchronize so that + * we cannot lose wakeup events. + */ + +fastcall void __up(struct semaphore *sem) +{ + wake_up(&sem->wait); +} + +fastcall void __sched __down(struct semaphore * sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + unsigned long flags; + + tsk->state = TASK_UNINTERRUPTIBLE; + spin_lock_irqsave(&sem->wait.lock, flags); + add_wait_queue_exclusive_locked(&sem->wait, &wait); + + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock in + * the wait_queue_head. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + schedule(); + + spin_lock_irqsave(&sem->wait.lock, flags); + tsk->state = TASK_UNINTERRUPTIBLE; + } + remove_wait_queue_locked(&sem->wait, &wait); + wake_up_locked(&sem->wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); + tsk->state = TASK_RUNNING; +} + +fastcall int __sched __down_interruptible(struct semaphore * sem) +{ + int retval = 0; + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + unsigned long flags; + + tsk->state = TASK_INTERRUPTIBLE; + spin_lock_irqsave(&sem->wait.lock, flags); + add_wait_queue_exclusive_locked(&sem->wait, &wait); + + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * With signals pending, this turns into + * the trylock failure case - we won't be + * sleeping, and we* can't get the lock as + * it has contention. Just correct the count + * and exit. + */ + if (signal_pending(current)) { + retval = -EINTR; + sem->sleepers = 0; + atomic_add(sleepers, &sem->count); + break; + } + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock in + * wait_queue_head. The "-1" is because we're + * still hoping to get the semaphore. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + schedule(); + + spin_lock_irqsave(&sem->wait.lock, flags); + tsk->state = TASK_INTERRUPTIBLE; + } + remove_wait_queue_locked(&sem->wait, &wait); + wake_up_locked(&sem->wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); + + tsk->state = TASK_RUNNING; + return retval; +} + +/* + * Trylock failed - make sure we correct for + * having decremented the count. + * + * We could have done the trylock with a + * single "cmpxchg" without failure cases, + * but then it wouldn't work on a 386. + */ +fastcall int __down_trylock(struct semaphore * sem) +{ + int sleepers; + unsigned long flags; + + spin_lock_irqsave(&sem->wait.lock, flags); + sleepers = sem->sleepers + 1; + sem->sleepers = 0; + + /* + * Add "everybody else" and us into it. They aren't + * playing, because we own the spinlock in the + * wait_queue_head. + */ + if (!atomic_add_negative(sleepers, &sem->count)) { + wake_up_locked(&sem->wait); + } + + spin_unlock_irqrestore(&sem->wait.lock, flags); + return 1; +} diff --git a/lib/sort.c b/lib/sort.c index b73dbb0e7c8..ddc4d35df28 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -6,15 +6,16 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sort.h> -void u32_swap(void *a, void *b, int size) +static void u32_swap(void *a, void *b, int size) { u32 t = *(u32 *)a; *(u32 *)a = *(u32 *)b; *(u32 *)b = t; } -void generic_swap(void *a, void *b, int size) +static void generic_swap(void *a, void *b, int size) { char t; diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c new file mode 100644 index 00000000000..906ad101eab --- /dev/null +++ b/lib/spinlock_debug.c @@ -0,0 +1,257 @@ +/* + * Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + * + * This file contains the spinlock/rwlock implementations for + * DEBUG_SPINLOCK. + */ + +#include <linux/config.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/delay.h> + +static void spin_bug(spinlock_t *lock, const char *msg) +{ + static long print_once = 1; + struct task_struct *owner = NULL; + + if (xchg(&print_once, 0)) { + if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) + owner = lock->owner; + printk("BUG: spinlock %s on CPU#%d, %s/%d\n", + msg, smp_processor_id(), current->comm, current->pid); + printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", + lock, lock->magic, + owner ? owner->comm : "<none>", + owner ? owner->pid : -1, + lock->owner_cpu); + dump_stack(); +#ifdef CONFIG_SMP + /* + * We cannot continue on SMP: + */ +// panic("bad locking"); +#endif + } +} + +#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) + +static inline void debug_spin_lock_before(spinlock_t *lock) +{ + SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); + SPIN_BUG_ON(lock->owner == current, lock, "recursion"); + SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), + lock, "cpu recursion"); +} + +static inline void debug_spin_lock_after(spinlock_t *lock) +{ + lock->owner_cpu = raw_smp_processor_id(); + lock->owner = current; +} + +static inline void debug_spin_unlock(spinlock_t *lock) +{ + SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); + SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); + SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); + SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), + lock, "wrong CPU"); + lock->owner = SPINLOCK_OWNER_INIT; + lock->owner_cpu = -1; +} + +static void __spin_lock_debug(spinlock_t *lock) +{ + int print_once = 1; + u64 i; + + for (;;) { + for (i = 0; i < loops_per_jiffy * HZ; i++) { + cpu_relax(); + if (__raw_spin_trylock(&lock->raw_lock)) + return; + } + /* lockup suspected: */ + if (print_once) { + print_once = 0; + printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", + smp_processor_id(), current->comm, current->pid, + lock); + dump_stack(); + } + } +} + +void _raw_spin_lock(spinlock_t *lock) +{ + debug_spin_lock_before(lock); + if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) + __spin_lock_debug(lock); + debug_spin_lock_after(lock); +} + +int _raw_spin_trylock(spinlock_t *lock) +{ + int ret = __raw_spin_trylock(&lock->raw_lock); + + if (ret) + debug_spin_lock_after(lock); +#ifndef CONFIG_SMP + /* + * Must not happen on UP: + */ + SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); +#endif + return ret; +} + +void _raw_spin_unlock(spinlock_t *lock) +{ + debug_spin_unlock(lock); + __raw_spin_unlock(&lock->raw_lock); +} + +static void rwlock_bug(rwlock_t *lock, const char *msg) +{ + static long print_once = 1; + + if (xchg(&print_once, 0)) { + printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, + smp_processor_id(), current->comm, current->pid, lock); + dump_stack(); +#ifdef CONFIG_SMP + /* + * We cannot continue on SMP: + */ + panic("bad locking"); +#endif + } +} + +#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) + +static void __read_lock_debug(rwlock_t *lock) +{ + int print_once = 1; + u64 i; + + for (;;) { + for (i = 0; i < loops_per_jiffy * HZ; i++) { + cpu_relax(); + if (__raw_read_trylock(&lock->raw_lock)) + return; + } + /* lockup suspected: */ + if (print_once) { + print_once = 0; + printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", + smp_processor_id(), current->comm, current->pid, + lock); + dump_stack(); + } + } +} + +void _raw_read_lock(rwlock_t *lock) +{ + RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); + if (unlikely(!__raw_read_trylock(&lock->raw_lock))) + __read_lock_debug(lock); +} + +int _raw_read_trylock(rwlock_t *lock) +{ + int ret = __raw_read_trylock(&lock->raw_lock); + +#ifndef CONFIG_SMP + /* + * Must not happen on UP: + */ + RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); +#endif + return ret; +} + +void _raw_read_unlock(rwlock_t *lock) +{ + RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); + __raw_read_unlock(&lock->raw_lock); +} + +static inline void debug_write_lock_before(rwlock_t *lock) +{ + RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); + RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); + RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), + lock, "cpu recursion"); +} + +static inline void debug_write_lock_after(rwlock_t *lock) +{ + lock->owner_cpu = raw_smp_processor_id(); + lock->owner = current; +} + +static inline void debug_write_unlock(rwlock_t *lock) +{ + RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); + RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); + RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), + lock, "wrong CPU"); + lock->owner = SPINLOCK_OWNER_INIT; + lock->owner_cpu = -1; +} + +static void __write_lock_debug(rwlock_t *lock) +{ + int print_once = 1; + u64 i; + + for (;;) { + for (i = 0; i < loops_per_jiffy * HZ; i++) { + cpu_relax(); + if (__raw_write_trylock(&lock->raw_lock)) + return; + } + /* lockup suspected: */ + if (print_once) { + print_once = 0; + printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", + smp_processor_id(), current->comm, current->pid, + lock); + dump_stack(); + } + } +} + +void _raw_write_lock(rwlock_t *lock) +{ + debug_write_lock_before(lock); + if (unlikely(!__raw_write_trylock(&lock->raw_lock))) + __write_lock_debug(lock); + debug_write_lock_after(lock); +} + +int _raw_write_trylock(rwlock_t *lock) +{ + int ret = __raw_write_trylock(&lock->raw_lock); + + if (ret) + debug_write_lock_after(lock); +#ifndef CONFIG_SMP + /* + * Must not happen on UP: + */ + RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); +#endif + return ret; +} + +void _raw_write_unlock(rwlock_t *lock) +{ + debug_write_unlock(lock); + __raw_write_unlock(&lock->raw_lock); +} diff --git a/lib/ts_bm.c b/lib/ts_bm.c new file mode 100644 index 00000000000..2cc79112ecc --- /dev/null +++ b/lib/ts_bm.c @@ -0,0 +1,185 @@ +/* + * lib/ts_bm.c Boyer-Moore text search implementation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Pablo Neira Ayuso <pablo@eurodev.net> + * + * ========================================================================== + * + * Implements Boyer-Moore string matching algorithm: + * + * [1] A Fast String Searching Algorithm, R.S. Boyer and Moore. + * Communications of the Association for Computing Machinery, + * 20(10), 1977, pp. 762-772. + * http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf + * + * [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004 + * http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf + * + * Note: Since Boyer-Moore (BM) performs searches for matchings from right + * to left, it's still possible that a matching could be spread over + * multiple blocks, in that case this algorithm won't find any coincidence. + * + * If you're willing to ensure that such thing won't ever happen, use the + * Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose + * the proper string search algorithm depending on your setting. + * + * Say you're using the textsearch infrastructure for filtering, NIDS or + * any similar security focused purpose, then go KMP. Otherwise, if you + * really care about performance, say you're classifying packets to apply + * Quality of Service (QoS) policies, and you don't mind about possible + * matchings spread over multiple fragments, then go BM. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/textsearch.h> + +/* Alphabet size, use ASCII */ +#define ASIZE 256 + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(args, format...) +#endif + +struct ts_bm +{ + u8 * pattern; + unsigned int patlen; + unsigned int bad_shift[ASIZE]; + unsigned int good_shift[0]; +}; + +static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) +{ + struct ts_bm *bm = ts_config_priv(conf); + unsigned int i, text_len, consumed = state->offset; + const u8 *text; + int shift = bm->patlen, bs; + + for (;;) { + text_len = conf->get_next_block(consumed, &text, conf, state); + + if (unlikely(text_len == 0)) + break; + + while (shift < text_len) { + DEBUGP("Searching in position %d (%c)\n", + shift, text[shift]); + for (i = 0; i < bm->patlen; i++) + if (text[shift-i] != bm->pattern[bm->patlen-1-i]) + goto next; + + /* London calling... */ + DEBUGP("found!\n"); + return consumed += (shift-(bm->patlen-1)); + +next: bs = bm->bad_shift[text[shift-i]]; + + /* Now jumping to... */ + shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]); + } + consumed += text_len; + } + + return UINT_MAX; +} + +static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, + unsigned int len) +{ + int i, j, ended, l[ASIZE]; + + for (i = 0; i < ASIZE; i++) + bm->bad_shift[i] = len; + for (i = 0; i < len - 1; i++) + bm->bad_shift[pattern[i]] = len - 1 - i; + + /* Compute the good shift array, used to match reocurrences + * of a subpattern */ + for (i = 1; i < bm->patlen; i++) { + for (j = 0; j < bm->patlen && bm->pattern[bm->patlen - 1 - j] + == bm->pattern[bm->patlen - 1 - i - j]; j++); + l[i] = j; + } + + bm->good_shift[0] = 1; + for (i = 1; i < bm->patlen; i++) + bm->good_shift[i] = bm->patlen; + for (i = bm->patlen - 1; i > 0; i--) + bm->good_shift[l[i]] = i; + ended = 0; + for (i = 0; i < bm->patlen; i++) { + if (l[i] == bm->patlen - 1 - i) + ended = i; + if (ended) + bm->good_shift[i] = ended; + } +} + +static struct ts_config *bm_init(const void *pattern, unsigned int len, + int gfp_mask) +{ + struct ts_config *conf; + struct ts_bm *bm; + unsigned int prefix_tbl_len = len * sizeof(unsigned int); + size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; + + conf = alloc_ts_config(priv_size, gfp_mask); + if (IS_ERR(conf)) + return conf; + + bm = ts_config_priv(conf); + bm->patlen = len; + bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; + compute_prefix_tbl(bm, pattern, len); + memcpy(bm->pattern, pattern, len); + + return conf; +} + +static void *bm_get_pattern(struct ts_config *conf) +{ + struct ts_bm *bm = ts_config_priv(conf); + return bm->pattern; +} + +static unsigned int bm_get_pattern_len(struct ts_config *conf) +{ + struct ts_bm *bm = ts_config_priv(conf); + return bm->patlen; +} + +static struct ts_ops bm_ops = { + .name = "bm", + .find = bm_find, + .init = bm_init, + .get_pattern = bm_get_pattern, + .get_pattern_len = bm_get_pattern_len, + .owner = THIS_MODULE, + .list = LIST_HEAD_INIT(bm_ops.list) +}; + +static int __init init_bm(void) +{ + return textsearch_register(&bm_ops); +} + +static void __exit exit_bm(void) +{ + textsearch_unregister(&bm_ops); +} + +MODULE_LICENSE("GPL"); + +module_init(init_bm); +module_exit(exit_bm); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index a9bda0a361f..e4e9031dd9c 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -269,6 +269,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) int qualifier; /* 'h', 'l', or 'L' for integer fields */ /* 'z' support added 23/7/1999 S.H. */ /* 'z' changed to 'Z' --davidm 1/25/99 */ + /* 't' added for ptrdiff_t */ /* Reject out-of-range values early */ if (unlikely((int) size < 0)) { @@ -339,7 +340,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* get the conversion qualifier */ qualifier = -1; if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt =='Z' || *fmt == 'z') { + *fmt =='Z' || *fmt == 'z' || *fmt == 't') { qualifier = *fmt; ++fmt; if (qualifier == 'l' && *fmt == 'l') { @@ -467,6 +468,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) num = (signed long) num; } else if (qualifier == 'Z' || qualifier == 'z') { num = va_arg(args, size_t); + } else if (qualifier == 't') { + num = va_arg(args, ptrdiff_t); } else if (qualifier == 'h') { num = (unsigned short) va_arg(args, int); if (flags & SIGN) |
