diff options
Diffstat (limited to 'lib')
36 files changed, 1989 insertions, 635 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 277fbfb233b..496d16e1fa2 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -117,6 +117,10 @@ config DECOMPRESS_BZIP2 config DECOMPRESS_LZMA tristate +config DECOMPRESS_LZO + select LZO_DECOMPRESS + tristate + # # Generic allocator support is selected if needed # @@ -156,6 +160,9 @@ config TEXTSEARCH_BM config TEXTSEARCH_FSM tristate +config LIST_SORT + boolean + config BTREE boolean @@ -203,4 +210,7 @@ config NLATTR config GENERIC_ATOMIC64 bool +config LRU_CACHE + tristate + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 234ceb10861..b520ec1f33c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -105,7 +105,7 @@ config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" depends on UNDEFINED # This option is on purpose disabled for now. - # It will be enabled when we are down to a resonable number + # It will be enabled when we are down to a reasonable number # of section mismatch warnings (< 10 for an allyesconfig build) help The section mismatch analysis checks if there are illegal @@ -298,6 +298,14 @@ config DEBUG_OBJECTS_TIMERS timer routines to track the life time of timer objects and validate the timer operations. +config DEBUG_OBJECTS_WORK + bool "Debug work objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + work queue routines to track the life time of work objects and + validate the work operations. + config DEBUG_OBJECTS_ENABLE_DEFAULT int "debug_objects bootup default value (0-1)" range 0 1 @@ -347,11 +355,12 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390) + (X86 || ARM || PPC || S390 || SUPERH) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS + select CRC32 help Say Y here if you want to enable the memory leak detector. The memory allocation/freeing is traced in a way @@ -490,6 +499,18 @@ config PROVE_LOCKING For more details, see Documentation/lockdep-design.txt. +config PROVE_RCU + bool "RCU debugging: prove RCU correctness" + depends on PROVE_LOCKING + default n + help + This feature enables lockdep extensions that check for correct + use of RCU APIs. This is currently under development. Say Y + if you want to debug RCU usage or help work on the PROVE_RCU + feature. + + Say N if you are unsure. + config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -567,7 +588,7 @@ config DEBUG_BUGVERBOSE depends on BUG depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 - default !EMBEDDED + default y help Say Y here to make BUG() panics output the file name and line number of the BUG call as well as the EIP and oops trace. This aids @@ -750,16 +771,28 @@ config RCU_TORTURE_TEST_RUNNABLE config RCU_CPU_STALL_DETECTOR bool "Check for stalled CPUs delaying RCU grace periods" depends on TREE_RCU || TREE_PREEMPT_RCU - default n + default y help This option causes RCU to printk information on which CPUs are delaying the current grace period, but only when the grace period extends for excessive time periods. - Say Y if you want RCU to perform such checks. + Say N if you want to disable such checks. + + Say Y if you are unsure. + +config RCU_CPU_STALL_VERBOSE + bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" + depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU + default n + help + This option causes RCU to printk detailed per-task information + for any tasks that are stalling the current RCU grace period. Say N if you are unsure. + Say Y if you want to enable such checks. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL @@ -831,8 +864,7 @@ config DEBUG_FORCE_WEAK_PER_CPU config LKDTM tristate "Linux Kernel Dump Test Tool Module" - depends on DEBUG_KERNEL - depends on KPROBES + depends on DEBUG_FS depends on BLOCK default n help @@ -843,7 +875,7 @@ config LKDTM called lkdtm. Documentation on how to use the module can be found in - drivers/misc/lkdtm.c + Documentation/fault-injection/provoke-crashes.txt config FAULT_INJECTION bool "Fault-injection framework" @@ -912,7 +944,7 @@ config LATENCYTOP config SYSCTL_SYSCALL_CHECK bool "Sysctl checks" - depends on SYSCTL_SYSCALL + depends on SYSCTL ---help--- sys_sysctl uses binary paths that have been found challenging to properly maintain and use. This enables checks that help diff --git a/lib/Makefile b/lib/Makefile index cff82612e98..59e46a014bc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -40,6 +40,7 @@ lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o +obj-$(CONFIG_LIST_SORT) += list_sort.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o @@ -70,6 +71,7 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o +lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o @@ -92,6 +94,8 @@ obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o obj-$(CONFIG_NLATTR) += nlattr.o +obj-$(CONFIG_LRU_CACHE) += lru_cache.o + obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o obj-$(CONFIG_GENERIC_CSUM) += checksum.o diff --git a/lib/argv_split.c b/lib/argv_split.c index 5205a8dae5b..4b1b083f219 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c @@ -4,17 +4,10 @@ #include <linux/kernel.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/slab.h> #include <linux/module.h> -static const char *skip_sep(const char *cp) -{ - while (*cp && isspace(*cp)) - cp++; - - return cp; -} - static const char *skip_arg(const char *cp) { while (*cp && !isspace(*cp)) @@ -28,7 +21,7 @@ static int count_argc(const char *str) int count = 0; while (*str) { - str = skip_sep(str); + str = skip_spaces(str); if (*str) { count++; str = skip_arg(str); @@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp) argvp = argv; while (*str) { - str = skip_sep(str); + str = skip_spaces(str); if (*str) { const char *p = str; diff --git a/lib/bitmap.c b/lib/bitmap.c index 702565821c9..ffb78c916cc 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) } EXPORT_SYMBOL(__bitmap_weight); +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) + +void bitmap_set(unsigned long *map, int start, int nr) +{ + unsigned long *p = map + BIT_WORD(start); + const int size = start + nr; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + + while (nr - bits_to_set >= 0) { + *p |= mask_to_set; + nr -= bits_to_set; + bits_to_set = BITS_PER_LONG; + mask_to_set = ~0UL; + p++; + } + if (nr) { + mask_to_set &= BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} +EXPORT_SYMBOL(bitmap_set); + +void bitmap_clear(unsigned long *map, int start, int nr) +{ + unsigned long *p = map + BIT_WORD(start); + const int size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + + while (nr - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + nr -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + mask_to_clear = ~0UL; + p++; + } + if (nr) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} +EXPORT_SYMBOL(bitmap_clear); + +/* + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + unsigned long index, end, i; +again: + index = find_next_zero_bit(map, size, start); + + /* Align allocation */ + index = __ALIGN_MASK(index, align_mask); + + end = index + nr; + if (end > size) + return end; + i = find_next_bit(map, end, index); + if (i < end) { + start = i + 1; + goto again; + } + return index; +} +EXPORT_SYMBOL(bitmap_find_next_zero_area); + /* * Bitmap printing & parsing functions: first version by Bill Irwin, * second version by Paul Jackson, third by Joe Korty. @@ -406,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, EXPORT_SYMBOL(__bitmap_parse); /** - * bitmap_parse_user() + * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap * * @ubuf: pointer to user buffer containing string. * @ulen: buffer size in bytes. If string is smaller than this @@ -538,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) EXPORT_SYMBOL(bitmap_parselist); /** - * bitmap_pos_to_ord(buf, pos, bits) + * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap * @pos: a bit position in @buf (0 <= @pos < @bits) * @bits: number of valid bit positions in @buf @@ -574,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) } /** - * bitmap_ord_to_pos(buf, ord, bits) + * bitmap_ord_to_pos - find position of n-th set bit in bitmap * @buf: pointer to bitmap * @ord: ordinal bit position (n-th set bit, n >= 0) * @bits: number of valid bit positions in @buf @@ -652,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src, bitmap_zero(dst, bits); w = bitmap_weight(new, bits); - for (oldbit = find_first_bit(src, bits); - oldbit < bits; - oldbit = find_next_bit(src, bits, oldbit + 1)) { + for_each_set_bit(oldbit, src, bits) { int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) set_bit(oldbit, dst); /* identity map */ else @@ -822,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, */ m = 0; - for (n = find_first_bit(relmap, bits); - n < bits; - n = find_next_bit(relmap, bits, n + 1)) { + for_each_set_bit(n, relmap, bits) { /* m == bitmap_pos_to_ord(relmap, n, bits) */ if (test_bit(m, orig)) set_bit(n, dst); @@ -853,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, return; bitmap_zero(dst, bits); - for (oldbit = find_first_bit(orig, bits); - oldbit < bits; - oldbit = find_next_bit(orig, bits, oldbit + 1)) + for_each_set_bit(oldbit, orig, bits) set_bit(oldbit % sz, dst); } EXPORT_SYMBOL(bitmap_fold); diff --git a/lib/checksum.c b/lib/checksum.c index b2e2fd46846..097508732f3 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -37,7 +37,8 @@ #include <asm/byteorder.h> -static inline unsigned short from32to16(unsigned long x) +#ifndef do_csum +static inline unsigned short from32to16(unsigned int x) { /* add up 16-bit and 16-bit for 16+c bit */ x = (x & 0xffff) + (x >> 16); @@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x) static unsigned int do_csum(const unsigned char *buff, int len) { int odd, count; - unsigned long result = 0; + unsigned int result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { #ifdef __LITTLE_ENDIAN - result = *buff; -#else result += (*buff << 8); +#else + result = *buff; #endif len--; buff++; @@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len) } count >>= 1; /* nr of 32-bit words.. */ if (count) { - unsigned long carry = 0; + unsigned int carry = 0; do { - unsigned long w = *(unsigned int *) buff; + unsigned int w = *(unsigned int *) buff; count--; buff += 4; result += carry; @@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len) out: return result; } +#endif /* * This is a version of ip_compute_csum() optimized for IP headers, diff --git a/lib/crc32.c b/lib/crc32.c index 49d1c9e3ce3..0f45fbff34c 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -30,11 +30,15 @@ #include <asm/atomic.h> #include "crc32defs.h" #if CRC_LE_BITS == 8 -#define tole(x) __constant_cpu_to_le32(x) -#define tobe(x) __constant_cpu_to_be32(x) +# define tole(x) __constant_cpu_to_le32(x) #else -#define tole(x) (x) -#define tobe(x) (x) +# define tole(x) (x) +#endif + +#if CRC_BE_BITS == 8 +# define tobe(x) __constant_cpu_to_be32(x) +#else +# define tobe(x) (x) #endif #include "crc32table.h" @@ -42,6 +46,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); MODULE_DESCRIPTION("Ethernet CRC32 calculations"); MODULE_LICENSE("GPL"); +#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 + +static inline u32 +crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) +{ +# ifdef __LITTLE_ENDIAN +# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) +# else +# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) +# endif + const u32 *b; + size_t rem_len; + + /* Align it */ + if (unlikely((long)buf & 3 && len)) { + do { + DO_CRC(*buf++); + } while ((--len) && ((long)buf)&3); + } + rem_len = len & 3; + /* load data 32 bits wide, xor data 32 bits wide. */ + len = len >> 2; + b = (const u32 *)buf; + for (--b; len; --len) { + crc ^= *++b; /* use pre increment for speed */ + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + } + len = rem_len; + /* And the last few bytes */ + if (len) { + u8 *p = (u8 *)(b + 1) - 1; + do { + DO_CRC(*++p); /* use pre increment for speed */ + } while (--len); + } + return crc; +#undef DO_CRC +} +#endif /** * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for @@ -72,52 +118,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { # if CRC_LE_BITS == 8 - const u32 *b =(u32 *)p; const u32 *tab = crc32table_le; -# ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) -# else -# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) -# endif - crc = __cpu_to_le32(crc); - /* Align it */ - if(unlikely(((long)b)&3 && len)){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (void *)p; - } while ((--len) && ((long)b)&3 ); - } - if(likely(len >= 4)){ - /* load data 32 bits wide, xor data 32 bits wide. */ - size_t save_len = len & 3; - len = len >> 2; - --b; /* use pre increment below(*++b) for speed */ - do { - crc ^= *++b; - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - } while (--len); - b++; /* point to next byte(s) */ - len = save_len; - } - /* And the last few bytes */ - if(len){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (void *)p; - } while (--len); - } - + crc = crc32_body(crc, p, len, tab); return __le32_to_cpu(crc); -#undef ENDIAN_SHIFT -#undef DO_CRC - # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; @@ -170,51 +175,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { # if CRC_BE_BITS == 8 - const u32 *b =(u32 *)p; const u32 *tab = crc32table_be; -# ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) -# else -# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) -# endif - crc = __cpu_to_be32(crc); - /* Align it */ - if(unlikely(((long)b)&3 && len)){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (u32 *)p; - } while ((--len) && ((long)b)&3 ); - } - if(likely(len >= 4)){ - /* load data 32 bits wide, xor data 32 bits wide. */ - size_t save_len = len & 3; - len = len >> 2; - --b; /* use pre increment below(*++b) for speed */ - do { - crc ^= *++b; - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - } while (--len); - b++; /* point to next byte(s) */ - len = save_len; - } - /* And the last few bytes */ - if(len){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (void *)p; - } while (--len); - } + crc = crc32_body(crc, p, len, tab); return __be32_to_cpu(crc); -#undef ENDIAN_SHIFT -#undef DO_CRC - # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; diff --git a/lib/ctype.c b/lib/ctype.c index d02ace14a32..26baa620e95 100644 --- a/lib/ctype.c +++ b/lib/ctype.c @@ -7,30 +7,30 @@ #include <linux/ctype.h> #include <linux/module.h> -unsigned char _ctype[] = { -_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ -_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ -_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ -_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ -_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ -_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ -_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ -_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ -_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ -_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ -_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ -_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ -_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ -_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ -_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ -_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ -_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ -_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ -_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ -_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ -_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ -_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ +const unsigned char _ctype[] = { +_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ +_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ +_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ +_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ +_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ +_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ +_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ +_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ +_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ +_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ +_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ +_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ +_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ +_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ EXPORT_SYMBOL(_ctype); diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b11731b9..5bf0020b924 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -23,6 +23,7 @@ * shut up after that. */ int debug_locks = 1; +EXPORT_SYMBOL_GPL(debug_locks); /* * The locking-testsuite uses <debug_locks_silent> to get a diff --git a/lib/debugobjects.c b/lib/debugobjects.c index eae56fddfa3..a9a8996d286 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -26,14 +26,14 @@ struct debug_bucket { struct hlist_head list; - spinlock_t lock; + raw_spinlock_t lock; }; static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; -static DEFINE_SPINLOCK(pool_lock); +static DEFINE_RAW_SPINLOCK(pool_lock); static HLIST_HEAD(obj_pool); @@ -96,10 +96,10 @@ static int fill_pool(void) if (!new) return obj_pool_free; - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); obj_pool_free++; - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); } return obj_pool_free; } @@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) { struct debug_obj *obj = NULL; - spin_lock(&pool_lock); + raw_spin_lock(&pool_lock); if (obj_pool.first) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); @@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) if (obj_pool_free < obj_pool_min_free) obj_pool_min_free = obj_pool_free; } - spin_unlock(&pool_lock); + raw_spin_unlock(&pool_lock); return obj; } @@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work) struct debug_obj *obj; unsigned long flags; - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); while (obj_pool_free > ODEBUG_POOL_SIZE) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); hlist_del(&obj->node); @@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work) * We release pool_lock across kmem_cache_free() to * avoid contention on pool_lock. */ - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); } - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); } /* @@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj) unsigned long flags; int sched = 0; - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); /* * schedule work when the pool is filled and the cache is * initialized: @@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj) hlist_add_head(&obj->node, &obj_pool); obj_pool_free++; obj_pool_used--; - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); if (sched) schedule_work(&debug_obj_work); } @@ -221,9 +221,9 @@ static void debug_objects_oom(void) printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); hlist_move_list(&db->list, &freelist); - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { @@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) { obj = alloc_object(addr, db, descr); if (!obj) { debug_objects_enabled = 0; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_objects_oom(); return; } @@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "init"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_init, addr, state); return; @@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) break; } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } /** @@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { @@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "activate"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_activate, addr, state); return; @@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) default: break; } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); return; } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); /* * This happens when a static object is activated. We * let the type specific code decide whether this is @@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { @@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) debug_print_object(&o, "deactivate"); } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } /** @@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) @@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "destroy"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_destroy, addr, state); return; @@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) break; } out_unlock: - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } /** @@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) @@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "free"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_free, addr, state); return; default: hlist_del(&obj->node); - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); free_object(obj); return; } out_unlock: - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } #ifdef CONFIG_DEBUG_OBJECTS_FREE @@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) repeat: cnt = 0; - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { cnt++; oaddr = (unsigned long) obj->object; @@ -587,7 +587,7 @@ repeat: debug_print_object(obj, "free"); descr = obj->descr; state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_free, (void *) oaddr, state); goto repeat; @@ -597,7 +597,7 @@ repeat: break; } } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { @@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj && state != ODEBUG_STATE_NONE) { @@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) } res = 0; out: - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); if (res) debug_objects_enabled = 0; return res; @@ -907,7 +907,7 @@ void __init debug_objects_early_init(void) int i; for (i = 0; i < ODEBUG_HASH_SIZE; i++) - spin_lock_init(&obj_hash[i].lock); + raw_spin_lock_init(&obj_hash[i].lock); for (i = 0; i < ODEBUG_POOL_SIZE; i++) hlist_add_head(&obj_static_pool[i].node, &obj_pool); diff --git a/lib/decompress.c b/lib/decompress.c index d2842f57167..a7606815541 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -9,6 +9,7 @@ #include <linux/decompress/bunzip2.h> #include <linux/decompress/unlzma.h> #include <linux/decompress/inflate.h> +#include <linux/decompress/unlzo.h> #include <linux/types.h> #include <linux/string.h> @@ -22,6 +23,9 @@ #ifndef CONFIG_DECOMPRESS_LZMA # define unlzma NULL #endif +#ifndef CONFIG_DECOMPRESS_LZO +# define unlzo NULL +#endif static const struct compress_format { unsigned char magic[2]; @@ -32,6 +36,7 @@ static const struct compress_format { { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, { {0x5d, 0x00}, "lzma", unlzma }, + { {0x89, 0x4c}, "lzo", unlzo }, { {0, 0}, NULL, NULL } }; diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 600f473a561..a4e971dee10 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd) again when using them (during symbol decoding).*/ base = hufGroup->base-1; limit = hufGroup->limit-1; - /* Calculate permute[]. Concurently, initialize + /* Calculate permute[]. Concurrently, initialize * temp[] and limit[]. */ pp = 0; for (i = minLen; i <= maxLen; i++) { @@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, /* Allocate bunzip_data. Most fields initialize to zero. */ bd = *bdp = malloc(i); + if (!bd) + return RETVAL_OUT_OF_MEMORY; memset(bd, 0, sizeof(struct bunzip_data)); /* Setup input buffer */ bd->inbuf = inbuf; @@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, bd->dbufSize = 100000*(i-BZh0); bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); + if (!bd->dbuf) + return RETVAL_OUT_OF_MEMORY; return RETVAL_OK; } @@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, if (!outbuf) { error("Could not allocate output bufer"); - return -1; + return RETVAL_OUT_OF_MEMORY; } if (buf) inbuf = buf; @@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, inbuf = malloc(BZIP2_IOBUF_SIZE); if (!inbuf) { error("Could not allocate input bufer"); + i = RETVAL_OUT_OF_MEMORY; goto exit_0; } i = start_bunzip(&bd, inbuf, len, fill); @@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { error("Compressed file ends unexpectedly"); } + if (!bd) + goto exit_1; if (bd->dbuf) large_free(bd->dbuf); if (pos) *pos = bd->inbufPos; free(bd); +exit_1: if (!buf) free(inbuf); exit_0: diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c new file mode 100644 index 00000000000..db521f45626 --- /dev/null +++ b/lib/decompress_unlzo.c @@ -0,0 +1,209 @@ +/* + * LZO decompressor for the Linux kernel. Code borrowed from the lzo + * implementation by Markus Franz Xaver Johannes Oberhumer. + * + * Linux kernel adaptation: + * Copyright (C) 2009 + * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com> + * + * Original code: + * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer + * All Rights Reserved. + * + * lzop and the LZO library are free software; you can redistribute them + * and/or modify them under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. + * If not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Markus F.X.J. Oberhumer + * <markus@oberhumer.com> + * http://www.oberhumer.com/opensource/lzop/ + */ + +#ifdef STATIC +#include "lzo/lzo1x_decompress.c" +#else +#include <linux/slab.h> +#include <linux/decompress/unlzo.h> +#endif + +#include <linux/types.h> +#include <linux/lzo.h> +#include <linux/decompress/mm.h> + +#include <linux/compiler.h> +#include <asm/unaligned.h> + +static const unsigned char lzop_magic[] = { + 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a }; + +#define LZO_BLOCK_SIZE (256*1024l) +#define HEADER_HAS_FILTER 0x00000800L + +STATIC inline int INIT parse_header(u8 *input, u8 *skip) +{ + int l; + u8 *parse = input; + u8 level = 0; + u16 version; + + /* read magic: 9 first bits */ + for (l = 0; l < 9; l++) { + if (*parse++ != lzop_magic[l]) + return 0; + } + /* get version (2bytes), skip library version (2), + * 'need to be extracted' version (2) and + * method (1) */ + version = get_unaligned_be16(parse); + parse += 7; + if (version >= 0x0940) + level = *parse++; + if (get_unaligned_be32(parse) & HEADER_HAS_FILTER) + parse += 8; /* flags + filter info */ + else + parse += 4; /* flags */ + + /* skip mode and mtime_low */ + parse += 8; + if (version >= 0x0940) + parse += 4; /* skip mtime_high */ + + l = *parse++; + /* don't care about the file name, and skip checksum */ + parse += l + 4; + + *skip = parse - input; + return 1; +} + +STATIC inline int INIT unlzo(u8 *input, int in_len, + int (*fill) (void *, unsigned int), + int (*flush) (void *, unsigned int), + u8 *output, int *posp, + void (*error_fn) (char *x)) +{ + u8 skip = 0, r = 0; + u32 src_len, dst_len; + size_t tmp; + u8 *in_buf, *in_buf_save, *out_buf; + int obytes_processed = 0; + + set_error_fn(error_fn); + + if (output) { + out_buf = output; + } else if (!flush) { + error("NULL output pointer and no flush function provided"); + goto exit; + } else { + out_buf = malloc(LZO_BLOCK_SIZE); + if (!out_buf) { + error("Could not allocate output buffer"); + goto exit; + } + } + + if (input && fill) { + error("Both input pointer and fill function provided, don't know what to do"); + goto exit_1; + } else if (input) { + in_buf = input; + } else if (!fill || !posp) { + error("NULL input pointer and missing position pointer or fill function"); + goto exit_1; + } else { + in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE)); + if (!in_buf) { + error("Could not allocate input buffer"); + goto exit_1; + } + } + in_buf_save = in_buf; + + if (posp) + *posp = 0; + + if (fill) + fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); + + if (!parse_header(input, &skip)) { + error("invalid header"); + goto exit_2; + } + in_buf += skip; + + if (posp) + *posp = skip; + + for (;;) { + /* read uncompressed block size */ + dst_len = get_unaligned_be32(in_buf); + in_buf += 4; + + /* exit if last block */ + if (dst_len == 0) { + if (posp) + *posp += 4; + break; + } + + if (dst_len > LZO_BLOCK_SIZE) { + error("dest len longer than block size"); + goto exit_2; + } + + /* read compressed block size, and skip block checksum info */ + src_len = get_unaligned_be32(in_buf); + in_buf += 8; + + if (src_len <= 0 || src_len > dst_len) { + error("file corrupted"); + goto exit_2; + } + + /* decompress */ + tmp = dst_len; + r = lzo1x_decompress_safe((u8 *) in_buf, src_len, + out_buf, &tmp); + + if (r != LZO_E_OK || dst_len != tmp) { + error("Compressed data violation"); + goto exit_2; + } + + obytes_processed += dst_len; + if (flush) + flush(out_buf, dst_len); + if (output) + out_buf += dst_len; + if (posp) + *posp += src_len + 12; + if (fill) { + in_buf = in_buf_save; + fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); + } else + in_buf += src_len; + } + +exit_2: + if (!input) + free(in_buf); +exit_1: + if (!output) + free(out_buf); +exit: + return obytes_processed; +} + +#define decompress unlzo diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ce6b7eabf67..ba8b67039d1 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, * times. Without a hardware IOMMU this results in the * same device addresses being put into the dma-debug * hash multiple times too. This can result in false - * positives being reported. Therfore we implement a + * positives being reported. Therefore we implement a * best-fit algorithm here which returns the entry from * the hash which fits best to the reference value * instead of the first-fit. @@ -587,7 +587,7 @@ out_unlock: return count; } -const struct file_operations filter_fops = { +static const struct file_operations filter_fops = { .read = filter_read, .write = filter_write, }; @@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev) return count; } -static int dma_debug_device_change(struct notifier_block *nb, - unsigned long action, void *data) +static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; int count; + if (global_disable) + return 0; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: @@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus) { struct notifier_block *nb; + if (global_disable) + return; + nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); if (nb == NULL) { pr_err("dma_debug_add_bus: out of memory\n"); @@ -909,6 +913,9 @@ static void check_sync(struct device *dev, ref->size); } + if (entry->direction == DMA_BIDIRECTIONAL) + goto out; + if (ref->direction != entry->direction) { err_printk(dev, entry, "DMA-API: device driver syncs " "DMA memory with different direction " @@ -919,9 +926,6 @@ static void check_sync(struct device *dev, dir2name[ref->direction]); } - if (entry->direction == DMA_BIDIRECTIONAL) - goto out; - if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && !(ref->direction == DMA_TO_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " @@ -944,7 +948,6 @@ static void check_sync(struct device *dev, out: put_hash_bucket(bucket, &flags); - } void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e22c148e4b7..f9350291598 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -21,6 +21,7 @@ #include <linux/list.h> #include <linux/sysctl.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/uaccess.h> #include <linux/dynamic_debug.h> #include <linux/debugfs.h> @@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) char *end; /* Skip leading whitespace */ - while (*buf && isspace(*buf)) - buf++; + buf = skip_spaces(buf); if (!*buf) break; /* oh, it was trailing whitespace */ diff --git a/lib/genalloc.c b/lib/genalloc.c index eed2bdb865e..e67f97495dd 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -11,6 +11,7 @@ */ #include <linux/module.h> +#include <linux/bitmap.h> #include <linux/genalloc.h> @@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) struct gen_pool_chunk *chunk; unsigned long addr, flags; int order = pool->min_alloc_order; - int nbits, bit, start_bit, end_bit; + int nbits, start_bit, end_bit; if (size == 0) return 0; @@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) end_bit -= nbits + 1; spin_lock_irqsave(&chunk->lock, flags); - bit = -1; - while (bit + 1 < end_bit) { - bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); - if (bit >= end_bit) - break; - - start_bit = bit; - if (nbits > 1) { - bit = find_next_bit(chunk->bits, bit + nbits, - bit + 1); - if (bit - start_bit < nbits) - continue; - } - - addr = chunk->start_addr + - ((unsigned long)start_bit << order); - while (nbits--) - __set_bit(start_bit++, chunk->bits); + start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, + nbits, 0); + if (start_bit >= end_bit) { spin_unlock_irqrestore(&chunk->lock, flags); - read_unlock(&pool->lock); - return addr; + continue; } + + addr = chunk->start_addr + ((unsigned long)start_bit << order); + + bitmap_set(chunk->bits, start_bit, nbits); spin_unlock_irqrestore(&chunk->lock, flags); + read_unlock(&pool->lock); + return addr; } read_unlock(&pool->lock); return 0; diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb12..63ee4eb1228 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -11,11 +11,18 @@ unsigned int hweight32(unsigned int w) { +#ifdef ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x55555555; + w = (w & 0x33333333) + ((w >> 2) & 0x33333333); + w = (w + (w >> 4)) & 0x0f0f0f0f; + return (w * 0x01010101) >> 24; +#else unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res + (res >> 4)) & 0x0F0F0F0F; res = res + (res >> 8); return (res + (res >> 16)) & 0x000000FF; +#endif } EXPORT_SYMBOL(hweight32); diff --git a/lib/idr.c b/lib/idr.c index 80ca9aca038..2eb1dca0368 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; /* if already at the top layer, we need to grow */ - if (!(p = pa[l])) { + if (id >= 1 << (idp->layers * IDR_BITS)) { *starting_id = id; return IDR_NEED_TO_GROW; } + p = pa[l]; + BUG_ON(!p); /* If we need to go up one layer, continue the * loop; otherwise, restart from the top. @@ -281,7 +283,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) /** * idr_get_new_above - allocate new idr entry above or equal to a start id * @idp: idr handle - * @ptr: pointer you want associated with the ide + * @ptr: pointer you want associated with the id * @start_id: id to start search at * @id: pointer to the allocated handle * @@ -313,7 +315,7 @@ EXPORT_SYMBOL(idr_get_new_above); /** * idr_get_new - allocate new idr entry * @idp: idr handle - * @ptr: pointer you want associated with the ide + * @ptr: pointer you want associated with the id * @id: pointer to the allocated handle * * This is the allocate id function. It should be called with any @@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id) int n; struct idr_layer *p; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); if (!p) return NULL; n = (p->layer+1) * IDR_BITS; @@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id) while (n > 0 && p) { n -= IDR_BITS; BUG_ON(n != p->layer*IDR_BITS); - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); } @@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp, struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); max = 1 << n; id = 0; @@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp, while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } if (p) { diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 75dbda03f4f..c0251f4ad08 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c @@ -3,41 +3,7 @@ */ #include <linux/module.h> -#include <linux/bitops.h> - -static unsigned long find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask) -{ - unsigned long index, end, i; -again: - index = find_next_zero_bit(map, size, start); - - /* Align allocation */ - index = (index + align_mask) & ~align_mask; - - end = index + nr; - if (end >= size) - return -1; - for (i = index; i < end; i++) { - if (test_bit(i, map)) { - start = i+1; - goto again; - } - } - return index; -} - -void iommu_area_reserve(unsigned long *map, unsigned long i, int len) -{ - unsigned long end = i + len; - while (i < end) { - __set_bit(i, map); - i++; - } -} +#include <linux/bitmap.h> int iommu_is_span_boundary(unsigned int index, unsigned int nr, unsigned long shift, @@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long align_mask) { unsigned long index; + + /* We don't want the last of the limit */ + size -= 1; again: - index = find_next_zero_area(map, size, start, nr, align_mask); - if (index != -1) { + index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); + if (index < size) { if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { /* we could do more effectively */ start = index + 1; goto again; } - iommu_area_reserve(map, index, nr); + bitmap_set(map, index, nr); + return index; } - return index; + return -1; } EXPORT_SYMBOL(iommu_area_alloc); -void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) -{ - unsigned long end = start + nr; - - while (start < end) { - __clear_bit(start, map); - start++; - } -} -EXPORT_SYMBOL(iommu_area_free); - unsigned long iommu_num_pages(unsigned long addr, unsigned long len, unsigned long io_page_size) { diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e352..b135d04aa48 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -5,10 +5,13 @@ * relegated to obsolescence, but used by various less * important (or lazy) subsystems. */ -#include <linux/smp_lock.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/semaphore.h> +#include <linux/smp_lock.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/bkl.h> /* * The 'big kernel lock' @@ -20,7 +23,7 @@ * * Don't use in new code. */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); +static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); /* @@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); * If it successfully gets the lock, it should increment * the preemption count like any spinlock does. * - * (This works on UP too - _raw_spin_trylock will never + * (This works on UP too - do_raw_spin_trylock will never * return false in that case) */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(&kernel_flag)) { + while (!do_raw_spin_trylock(&kernel_flag)) { if (need_resched()) return -EAGAIN; cpu_relax(); @@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) void __lockfunc __release_kernel_lock(void) { - _raw_spin_unlock(&kernel_flag); + do_raw_spin_unlock(&kernel_flag); preempt_enable_no_resched(); } /* * These are the BKL spinlocks - we try to be polite about preemption. * If SMP is not on (ie UP preemption), this all goes away because the - * _raw_spin_trylock() will always succeed. + * do_raw_spin_trylock() will always succeed. */ #ifdef CONFIG_PREEMPT static inline void __lock_kernel(void) { preempt_disable(); - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { + if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { /* * If preemption was disabled even before this * was called, there's nothing we can be polite * about - just spin. */ if (preempt_count() > 1) { - _raw_spin_lock(&kernel_flag); + do_raw_spin_lock(&kernel_flag); return; } @@ -79,10 +82,10 @@ static inline void __lock_kernel(void) */ do { preempt_enable(); - while (spin_is_locked(&kernel_flag)) + while (raw_spin_is_locked(&kernel_flag)) cpu_relax(); preempt_disable(); - } while (!_raw_spin_trylock(&kernel_flag)); + } while (!do_raw_spin_trylock(&kernel_flag)); } } @@ -93,7 +96,7 @@ static inline void __lock_kernel(void) */ static inline void __lock_kernel(void) { - _raw_spin_lock(&kernel_flag); + do_raw_spin_lock(&kernel_flag); } #endif @@ -103,7 +106,7 @@ static inline void __unlock_kernel(void) * the BKL is not covered by lockdep, so we open-code the * unlocking sequence (and thus avoid the dep-chain ops): */ - _raw_spin_unlock(&kernel_flag); + do_raw_spin_unlock(&kernel_flag); preempt_enable(); } @@ -113,21 +116,28 @@ static inline void __unlock_kernel(void) * This cannot happen asynchronously, so we only need to * worry about other CPU's. */ -void __lockfunc lock_kernel(void) +void __lockfunc _lock_kernel(const char *func, const char *file, int line) { - int depth = current->lock_depth+1; - if (likely(!depth)) + int depth = current->lock_depth + 1; + + trace_lock_kernel(func, file, line); + + if (likely(!depth)) { + might_sleep(); __lock_kernel(); + } current->lock_depth = depth; } -void __lockfunc unlock_kernel(void) +void __lockfunc _unlock_kernel(const char *func, const char *file, int line) { BUG_ON(current->lock_depth < 0); if (likely(--current->lock_depth < 0)) __unlock_kernel(); + + trace_unlock_kernel(func, file, line); } -EXPORT_SYMBOL(lock_kernel); -EXPORT_SYMBOL(unlock_kernel); +EXPORT_SYMBOL(_lock_kernel); +EXPORT_SYMBOL(_unlock_kernel); diff --git a/lib/list_sort.c b/lib/list_sort.c new file mode 100644 index 00000000000..4b5cb794c38 --- /dev/null +++ b/lib/list_sort.c @@ -0,0 +1,217 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/list_sort.h> +#include <linux/slab.h> +#include <linux/list.h> + +#define MAX_LIST_LENGTH_BITS 20 + +/* + * Returns a list organized in an intermediate format suited + * to chaining of merge() calls: null-terminated, no reserved or + * sentinel head node, "prev" links not maintained. + */ +static struct list_head *merge(void *priv, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *a, struct list_head *b) +{ + struct list_head head, *tail = &head; + + while (a && b) { + /* if equal, take 'a' -- important for sort stability */ + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a = a->next; + } else { + tail->next = b; + b = b->next; + } + tail = tail->next; + } + tail->next = a?:b; + return head.next; +} + +/* + * Combine final list merge with restoration of standard doubly-linked + * list structure. This approach duplicates code from merge(), but + * runs faster than the tidier alternatives of either a separate final + * prev-link restoration pass, or maintaining the prev links + * throughout. + */ +static void merge_and_restore_back_links(void *priv, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *head, + struct list_head *a, struct list_head *b) +{ + struct list_head *tail = head; + + while (a && b) { + /* if equal, take 'a' -- important for sort stability */ + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a->prev = tail; + a = a->next; + } else { + tail->next = b; + b->prev = tail; + b = b->next; + } + tail = tail->next; + } + tail->next = a ? : b; + + do { + /* + * In worst cases this loop may run many iterations. + * Continue callbacks to the client even though no + * element comparison is needed, so the client's cmp() + * routine can invoke cond_resched() periodically. + */ + (*cmp)(priv, tail, tail); + + tail->next->prev = tail; + tail = tail->next; + } while (tail->next); + + tail->next = head; + head->prev = tail; +} + +/** + * list_sort - sort a list + * @priv: private data, opaque to list_sort(), passed to @cmp + * @head: the list to sort + * @cmp: the elements comparison function + * + * This function implements "merge sort", which has O(nlog(n)) + * complexity. + * + * The comparison function @cmp must return a negative value if @a + * should sort before @b, and a positive value if @a should sort after + * @b. If @a and @b are equivalent, and their original relative + * ordering is to be preserved, @cmp must return 0. + */ +void list_sort(void *priv, struct list_head *head, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)) +{ + struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists + -- last slot is a sentinel */ + int lev; /* index into part[] */ + int max_lev = 0; + struct list_head *list; + + if (list_empty(head)) + return; + + memset(part, 0, sizeof(part)); + + head->prev->next = NULL; + list = head->next; + + while (list) { + struct list_head *cur = list; + list = list->next; + cur->next = NULL; + + for (lev = 0; part[lev]; lev++) { + cur = merge(priv, cmp, part[lev], cur); + part[lev] = NULL; + } + if (lev > max_lev) { + if (unlikely(lev >= ARRAY_SIZE(part)-1)) { + printk_once(KERN_DEBUG "list passed to" + " list_sort() too long for" + " efficiency\n"); + lev--; + } + max_lev = lev; + } + part[lev] = cur; + } + + for (lev = 0; lev < max_lev; lev++) + if (part[lev]) + list = merge(priv, cmp, part[lev], list); + + merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); +} +EXPORT_SYMBOL(list_sort); + +#ifdef DEBUG_LIST_SORT +struct debug_el { + struct list_head l_h; + int value; + unsigned serial; +}; + +static int cmp(void *priv, struct list_head *a, struct list_head *b) +{ + return container_of(a, struct debug_el, l_h)->value + - container_of(b, struct debug_el, l_h)->value; +} + +/* + * The pattern of set bits in the list length determines which cases + * are hit in list_sort(). + */ +#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ + +static int __init list_sort_test(void) +{ + int i, r = 1, count; + struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); + struct list_head *cur; + + printk(KERN_WARNING "testing list_sort()\n"); + + cur = head; + for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { + struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); + BUG_ON(!el); + /* force some equivalencies */ + el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); + el->serial = i; + + el->l_h.prev = cur; + cur->next = &el->l_h; + cur = cur->next; + } + head->prev = cur; + + list_sort(NULL, head, cmp); + + count = 1; + for (cur = head->next; cur->next != head; cur = cur->next) { + struct debug_el *el = container_of(cur, struct debug_el, l_h); + int cmp_result = cmp(NULL, cur, cur->next); + if (cur->next->prev != cur) { + printk(KERN_EMERG "list_sort() returned " + "a corrupted list!\n"); + return 1; + } else if (cmp_result > 0) { + printk(KERN_EMERG "list_sort() failed to sort!\n"); + return 1; + } else if (cmp_result == 0 && + el->serial >= container_of(cur->next, + struct debug_el, l_h)->serial) { + printk(KERN_EMERG "list_sort() failed to preserve order" + " of equivalent elements!\n"); + return 1; + } + kfree(cur->prev); + count++; + } + kfree(cur); + if (count != LIST_SORT_TEST_LENGTH) { + printk(KERN_EMERG "list_sort() returned list of" + "different length!\n"); + return 1; + } + return 0; +} +module_init(list_sort_test); +#endif diff --git a/lib/lmb.c b/lib/lmb.c index 0343c05609f..b1fc5260652 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size) } -long lmb_remove(u64 base, u64 size) +static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) { - struct lmb_region *rgn = &(lmb.memory); u64 rgnbegin, rgnend; u64 end = base + size; int i; @@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size) return lmb_add_region(rgn, end, rgnend - end); } +long lmb_remove(u64 base, u64 size) +{ + return __lmb_remove(&lmb.memory, base, size); +} + +long __init lmb_free(u64 base, u64 size) +{ + return __lmb_remove(&lmb.reserved, base, size); +} + long __init lmb_reserve(u64 base, u64 size) { struct lmb_region *_rgn = &lmb.reserved; @@ -263,7 +272,7 @@ long __init lmb_reserve(u64 base, u64 size) return lmb_add_region(_rgn, base, size); } -long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) +long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long i; @@ -493,6 +502,11 @@ int __init lmb_is_reserved(u64 addr) return 0; } +int lmb_is_region_reserved(u64 base, u64 size) +{ + return lmb_overlaps_region(&lmb.reserved, base, size); +} + /* * Given a <base, len>, find which memory regions belong to this range. * Adjust the request and return a contiguous chunk. diff --git a/lib/lru_cache.c b/lib/lru_cache.c new file mode 100644 index 00000000000..270de9d31b8 --- /dev/null +++ b/lib/lru_cache.c @@ -0,0 +1,560 @@ +/* + lru_cache.c + + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. + + Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. + Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. + Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. + + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + + */ + +#include <linux/module.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/string.h> /* for memset */ +#include <linux/seq_file.h> /* for seq_printf */ +#include <linux/lru_cache.h> + +MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " + "Lars Ellenberg <lars@linbit.com>"); +MODULE_DESCRIPTION("lru_cache - Track sets of hot objects"); +MODULE_LICENSE("GPL"); + +/* this is developers aid only. + * it catches concurrent access (lack of locking on the users part) */ +#define PARANOIA_ENTRY() do { \ + BUG_ON(!lc); \ + BUG_ON(!lc->nr_elements); \ + BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \ +} while (0) + +#define RETURN(x...) do { \ + clear_bit(__LC_PARANOIA, &lc->flags); \ + smp_mb__after_clear_bit(); return x ; } while (0) + +/* BUG() if e is not one of the elements tracked by lc */ +#define PARANOIA_LC_ELEMENT(lc, e) do { \ + struct lru_cache *lc_ = (lc); \ + struct lc_element *e_ = (e); \ + unsigned i = e_->lc_index; \ + BUG_ON(i >= lc_->nr_elements); \ + BUG_ON(lc_->lc_element[i] != e_); } while (0) + +/** + * lc_create - prepares to track objects in an active set + * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details + * @e_count: number of elements allowed to be active simultaneously + * @e_size: size of the tracked objects + * @e_off: offset to the &struct lc_element member in a tracked object + * + * Returns a pointer to a newly initialized struct lru_cache on success, + * or NULL on (allocation) failure. + */ +struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, + unsigned e_count, size_t e_size, size_t e_off) +{ + struct hlist_head *slot = NULL; + struct lc_element **element = NULL; + struct lru_cache *lc; + struct lc_element *e; + unsigned cache_obj_size = kmem_cache_size(cache); + unsigned i; + + WARN_ON(cache_obj_size < e_size); + if (cache_obj_size < e_size) + return NULL; + + /* e_count too big; would probably fail the allocation below anyways. + * for typical use cases, e_count should be few thousand at most. */ + if (e_count > LC_MAX_ACTIVE) + return NULL; + + slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL); + if (!slot) + goto out_fail; + element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL); + if (!element) + goto out_fail; + + lc = kzalloc(sizeof(*lc), GFP_KERNEL); + if (!lc) + goto out_fail; + + INIT_LIST_HEAD(&lc->in_use); + INIT_LIST_HEAD(&lc->lru); + INIT_LIST_HEAD(&lc->free); + + lc->name = name; + lc->element_size = e_size; + lc->element_off = e_off; + lc->nr_elements = e_count; + lc->new_number = LC_FREE; + lc->lc_cache = cache; + lc->lc_element = element; + lc->lc_slot = slot; + + /* preallocate all objects */ + for (i = 0; i < e_count; i++) { + void *p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) + break; + memset(p, 0, lc->element_size); + e = p + e_off; + e->lc_index = i; + e->lc_number = LC_FREE; + list_add(&e->list, &lc->free); + element[i] = e; + } + if (i == e_count) + return lc; + + /* else: could not allocate all elements, give up */ + for (i--; i; i--) { + void *p = element[i]; + kmem_cache_free(cache, p - e_off); + } + kfree(lc); +out_fail: + kfree(element); + kfree(slot); + return NULL; +} + +void lc_free_by_index(struct lru_cache *lc, unsigned i) +{ + void *p = lc->lc_element[i]; + WARN_ON(!p); + if (p) { + p -= lc->element_off; + kmem_cache_free(lc->lc_cache, p); + } +} + +/** + * lc_destroy - frees memory allocated by lc_create() + * @lc: the lru cache to destroy + */ +void lc_destroy(struct lru_cache *lc) +{ + unsigned i; + if (!lc) + return; + for (i = 0; i < lc->nr_elements; i++) + lc_free_by_index(lc, i); + kfree(lc->lc_element); + kfree(lc->lc_slot); + kfree(lc); +} + +/** + * lc_reset - does a full reset for @lc and the hash table slots. + * @lc: the lru cache to operate on + * + * It is roughly the equivalent of re-allocating a fresh lru_cache object, + * basically a short cut to lc_destroy(lc); lc = lc_create(...); + */ +void lc_reset(struct lru_cache *lc) +{ + unsigned i; + + INIT_LIST_HEAD(&lc->in_use); + INIT_LIST_HEAD(&lc->lru); + INIT_LIST_HEAD(&lc->free); + lc->used = 0; + lc->hits = 0; + lc->misses = 0; + lc->starving = 0; + lc->dirty = 0; + lc->changed = 0; + lc->flags = 0; + lc->changing_element = NULL; + lc->new_number = LC_FREE; + memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements); + + for (i = 0; i < lc->nr_elements; i++) { + struct lc_element *e = lc->lc_element[i]; + void *p = e; + p -= lc->element_off; + memset(p, 0, lc->element_size); + /* re-init it */ + e->lc_index = i; + e->lc_number = LC_FREE; + list_add(&e->list, &lc->free); + } +} + +/** + * lc_seq_printf_stats - print stats about @lc into @seq + * @seq: the seq_file to print into + * @lc: the lru cache to print statistics of + */ +size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) +{ + /* NOTE: + * total calls to lc_get are + * (starving + hits + misses) + * misses include "dirty" count (update from an other thread in + * progress) and "changed", when this in fact lead to an successful + * update of the cache. + */ + return seq_printf(seq, "\t%s: used:%u/%u " + "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n", + lc->name, lc->used, lc->nr_elements, + lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed); +} + +static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) +{ + return lc->lc_slot + (enr % lc->nr_elements); +} + + +/** + * lc_find - find element by label, if present in the hash table + * @lc: The lru_cache object + * @enr: element number + * + * Returns the pointer to an element, if the element with the requested + * "label" or element number is present in the hash table, + * or NULL if not found. Does not change the refcnt. + */ +struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr) +{ + struct hlist_node *n; + struct lc_element *e; + + BUG_ON(!lc); + BUG_ON(!lc->nr_elements); + hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { + if (e->lc_number == enr) + return e; + } + return NULL; +} + +/* returned element will be "recycled" immediately */ +static struct lc_element *lc_evict(struct lru_cache *lc) +{ + struct list_head *n; + struct lc_element *e; + + if (list_empty(&lc->lru)) + return NULL; + + n = lc->lru.prev; + e = list_entry(n, struct lc_element, list); + + PARANOIA_LC_ELEMENT(lc, e); + + list_del(&e->list); + hlist_del(&e->colision); + return e; +} + +/** + * lc_del - removes an element from the cache + * @lc: The lru_cache object + * @e: The element to remove + * + * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list, + * sets @e->enr to %LC_FREE. + */ +void lc_del(struct lru_cache *lc, struct lc_element *e) +{ + PARANOIA_ENTRY(); + PARANOIA_LC_ELEMENT(lc, e); + BUG_ON(e->refcnt); + + e->lc_number = LC_FREE; + hlist_del_init(&e->colision); + list_move(&e->list, &lc->free); + RETURN(); +} + +static struct lc_element *lc_get_unused_element(struct lru_cache *lc) +{ + struct list_head *n; + + if (list_empty(&lc->free)) + return lc_evict(lc); + + n = lc->free.next; + list_del(n); + return list_entry(n, struct lc_element, list); +} + +static int lc_unused_element_available(struct lru_cache *lc) +{ + if (!list_empty(&lc->free)) + return 1; /* something on the free list */ + if (!list_empty(&lc->lru)) + return 1; /* something to evict */ + + return 0; +} + + +/** + * lc_get - get element by label, maybe change the active set + * @lc: the lru cache to operate on + * @enr: the label to look up + * + * Finds an element in the cache, increases its usage count, + * "touches" and returns it. + * + * In case the requested number is not present, it needs to be added to the + * cache. Therefore it is possible that an other element becomes evicted from + * the cache. In either case, the user is notified so he is able to e.g. keep + * a persistent log of the cache changes, and therefore the objects in use. + * + * Return values: + * NULL + * The cache was marked %LC_STARVING, + * or the requested label was not in the active set + * and a changing transaction is still pending (@lc was marked %LC_DIRTY). + * Or no unused or free element could be recycled (@lc will be marked as + * %LC_STARVING, blocking further lc_get() operations). + * + * pointer to the element with the REQUESTED element number. + * In this case, it can be used right away + * + * pointer to an UNUSED element with some different element number, + * where that different number may also be %LC_FREE. + * + * In this case, the cache is marked %LC_DIRTY (blocking further changes), + * and the returned element pointer is removed from the lru list and + * hash collision chains. The user now should do whatever housekeeping + * is necessary. + * Then he must call lc_changed(lc,element_pointer), to finish + * the change. + * + * NOTE: The user needs to check the lc_number on EACH use, so he recognizes + * any cache set change. + */ +struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) +{ + struct lc_element *e; + + PARANOIA_ENTRY(); + if (lc->flags & LC_STARVING) { + ++lc->starving; + RETURN(NULL); + } + + e = lc_find(lc, enr); + if (e) { + ++lc->hits; + if (e->refcnt++ == 0) + lc->used++; + list_move(&e->list, &lc->in_use); /* Not evictable... */ + RETURN(e); + } + + ++lc->misses; + + /* In case there is nothing available and we can not kick out + * the LRU element, we have to wait ... + */ + if (!lc_unused_element_available(lc)) { + __set_bit(__LC_STARVING, &lc->flags); + RETURN(NULL); + } + + /* it was not present in the active set. + * we are going to recycle an unused (or even "free") element. + * user may need to commit a transaction to record that change. + * we serialize on flags & TF_DIRTY */ + if (test_and_set_bit(__LC_DIRTY, &lc->flags)) { + ++lc->dirty; + RETURN(NULL); + } + + e = lc_get_unused_element(lc); + BUG_ON(!e); + + clear_bit(__LC_STARVING, &lc->flags); + BUG_ON(++e->refcnt != 1); + lc->used++; + + lc->changing_element = e; + lc->new_number = enr; + + RETURN(e); +} + +/* similar to lc_get, + * but only gets a new reference on an existing element. + * you either get the requested element, or NULL. + * will be consolidated into one function. + */ +struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr) +{ + struct lc_element *e; + + PARANOIA_ENTRY(); + if (lc->flags & LC_STARVING) { + ++lc->starving; + RETURN(NULL); + } + + e = lc_find(lc, enr); + if (e) { + ++lc->hits; + if (e->refcnt++ == 0) + lc->used++; + list_move(&e->list, &lc->in_use); /* Not evictable... */ + } + RETURN(e); +} + +/** + * lc_changed - tell @lc that the change has been recorded + * @lc: the lru cache to operate on + * @e: the element pending label change + */ +void lc_changed(struct lru_cache *lc, struct lc_element *e) +{ + PARANOIA_ENTRY(); + BUG_ON(e != lc->changing_element); + PARANOIA_LC_ELEMENT(lc, e); + ++lc->changed; + e->lc_number = lc->new_number; + list_add(&e->list, &lc->in_use); + hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number)); + lc->changing_element = NULL; + lc->new_number = LC_FREE; + clear_bit(__LC_DIRTY, &lc->flags); + smp_mb__after_clear_bit(); + RETURN(); +} + + +/** + * lc_put - give up refcnt of @e + * @lc: the lru cache to operate on + * @e: the element to put + * + * If refcnt reaches zero, the element is moved to the lru list, + * and a %LC_STARVING (if set) is cleared. + * Returns the new (post-decrement) refcnt. + */ +unsigned int lc_put(struct lru_cache *lc, struct lc_element *e) +{ + PARANOIA_ENTRY(); + PARANOIA_LC_ELEMENT(lc, e); + BUG_ON(e->refcnt == 0); + BUG_ON(e == lc->changing_element); + if (--e->refcnt == 0) { + /* move it to the front of LRU. */ + list_move(&e->list, &lc->lru); + lc->used--; + clear_bit(__LC_STARVING, &lc->flags); + smp_mb__after_clear_bit(); + } + RETURN(e->refcnt); +} + +/** + * lc_element_by_index + * @lc: the lru cache to operate on + * @i: the index of the element to return + */ +struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i) +{ + BUG_ON(i >= lc->nr_elements); + BUG_ON(lc->lc_element[i] == NULL); + BUG_ON(lc->lc_element[i]->lc_index != i); + return lc->lc_element[i]; +} + +/** + * lc_index_of + * @lc: the lru cache to operate on + * @e: the element to query for its index position in lc->element + */ +unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e) +{ + PARANOIA_LC_ELEMENT(lc, e); + return e->lc_index; +} + +/** + * lc_set - associate index with label + * @lc: the lru cache to operate on + * @enr: the label to set + * @index: the element index to associate label with. + * + * Used to initialize the active set to some previously recorded state. + */ +void lc_set(struct lru_cache *lc, unsigned int enr, int index) +{ + struct lc_element *e; + + if (index < 0 || index >= lc->nr_elements) + return; + + e = lc_element_by_index(lc, index); + e->lc_number = enr; + + hlist_del_init(&e->colision); + hlist_add_head(&e->colision, lc_hash_slot(lc, enr)); + list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru); +} + +/** + * lc_dump - Dump a complete LRU cache to seq in textual form. + * @lc: the lru cache to operate on + * @seq: the &struct seq_file pointer to seq_printf into + * @utext: user supplied "heading" or other info + * @detail: function pointer the user may provide to dump further details + * of the object the lc_element is embedded in. + */ +void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, + void (*detail) (struct seq_file *, struct lc_element *)) +{ + unsigned int nr_elements = lc->nr_elements; + struct lc_element *e; + int i; + + seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext); + for (i = 0; i < nr_elements; i++) { + e = lc_element_by_index(lc, i); + if (e->lc_number == LC_FREE) { + seq_printf(seq, "\t%2d: FREE\n", i); + } else { + seq_printf(seq, "\t%2d: %4u %4u ", i, + e->lc_number, e->refcnt); + detail(seq, e); + } + } +} + +EXPORT_SYMBOL(lc_create); +EXPORT_SYMBOL(lc_reset); +EXPORT_SYMBOL(lc_destroy); +EXPORT_SYMBOL(lc_set); +EXPORT_SYMBOL(lc_del); +EXPORT_SYMBOL(lc_try_get); +EXPORT_SYMBOL(lc_find); +EXPORT_SYMBOL(lc_get); +EXPORT_SYMBOL(lc_put); +EXPORT_SYMBOL(lc_changed); +EXPORT_SYMBOL(lc_element_by_index); +EXPORT_SYMBOL(lc_index_of); +EXPORT_SYMBOL(lc_seq_printf_stats); +EXPORT_SYMBOL(lc_seq_dump_details); diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 5dc6b29c157..f2fd0985022 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c @@ -11,11 +11,13 @@ * Richard Purdie <rpurdie@openedhand.com> */ +#ifndef STATIC #include <linux/module.h> #include <linux/kernel.h> -#include <linux/lzo.h> -#include <asm/byteorder.h> +#endif + #include <asm/unaligned.h> +#include <linux/lzo.h> #include "lzodefs.h" #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) @@ -244,9 +246,10 @@ lookbehind_overrun: *out_len = op - out; return LZO_E_LOOKBEHIND_OVERRUN; } - +#ifndef STATIC EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X Decompressor"); +#endif diff --git a/lib/parser.c b/lib/parser.c index b00d02059a5..fb34977246b 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[]) args[argc].from = s; switch (*p++) { - case 's': - if (strlen(s) == 0) + case 's': { + size_t str_len = strlen(s); + + if (str_len == 0) return 0; - else if (len == -1 || len > strlen(s)) - len = strlen(s); + if (len == -1 || len > str_len) + len = str_len; args[argc].to = s + len; break; + } case 'd': simple_strtol(s, &args[argc].to, 0); goto num; diff --git a/lib/plist.c b/lib/plist.c index d6c64a824e1..1471988d919 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top) static void plist_check_head(struct plist_head *head) { - WARN_ON(!head->lock); - if (head->lock) - WARN_ON_SMP(!spin_is_locked(head->lock)); + WARN_ON(!head->rawlock && !head->spinlock); + if (head->rawlock) + WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); + if (head->spinlock) + WARN_ON_SMP(!spin_is_locked(head->spinlock)); plist_check_list(&head->prio_list); plist_check_list(&head->node_list); } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 23abbd93cae..6b9670d6bbf 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node) * ensure that the addition of a single element in the tree cannot fail. On * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. + * + * To make use of this facility, the radix tree must be initialised without + * __GFP_WAIT being passed to INIT_RADIX_TREE(). */ int radix_tree_preload(gfp_t gfp_mask) { @@ -361,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, unsigned int height, shift; struct radix_tree_node *node, **slot; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (node == NULL) return NULL; @@ -381,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, do { slot = (struct radix_tree_node **) (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); + node = rcu_dereference_raw(*slot); if (node == NULL) return NULL; @@ -543,7 +546,6 @@ out: } EXPORT_SYMBOL(radix_tree_tag_clear); -#ifndef __KERNEL__ /* Only the test harness uses this at present */ /** * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root @@ -566,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (node == NULL) return 0; @@ -600,13 +602,12 @@ int radix_tree_tag_get(struct radix_tree_root *root, BUG_ON(ret && saw_unset_tag); return !!ret; } - node = rcu_dereference(node->slots[offset]); + node = rcu_dereference_raw(node->slots[offset]); shift -= RADIX_TREE_MAP_SHIFT; height--; } } EXPORT_SYMBOL(radix_tree_tag_get); -#endif /** * radix_tree_next_hole - find the next hole (not-present entry) @@ -710,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); + slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) goto out; } @@ -757,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long cur_index = first_index; unsigned int ret; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -786,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference(slot); + results[ret + nr_found] = rcu_dereference_raw(slot); nr_found++; } ret += nr_found; @@ -825,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long cur_index = first_index; unsigned int ret; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -914,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, } } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); + slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) break; } @@ -950,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -979,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference(slot); + results[ret + nr_found] = rcu_dereference_raw(slot); nr_found++; } ret += nr_found; @@ -1019,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 26187edcc7e..09f5ce1810d 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -7,15 +7,12 @@ * parameter. Now every user can use their own standalone ratelimit_state. * * This file is released under the GPLv2. - * */ -#include <linux/kernel.h> +#include <linux/ratelimit.h> #include <linux/jiffies.h> #include <linux/module.h> -static DEFINE_SPINLOCK(ratelimit_lock); - /* * __ratelimit - rate limiting * @rs: ratelimit_state data @@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock); * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks * in every @rs->ratelimit_jiffies */ -int __ratelimit(struct ratelimit_state *rs) +int ___ratelimit(struct ratelimit_state *rs, const char *func) { unsigned long flags; + int ret; if (!rs->interval) return 1; - spin_lock_irqsave(&ratelimit_lock, flags); + /* + * If we contend on this state's lock then almost + * by definition we are too busy to print a message, + * in addition to the one that will be printed by + * the entity that is holding the lock already: + */ + if (!spin_trylock_irqsave(&rs->lock, flags)) + return 1; + if (!rs->begin) rs->begin = jiffies; if (time_is_before_jiffies(rs->begin + rs->interval)) { if (rs->missed) printk(KERN_WARNING "%s: %d callbacks suppressed\n", - __func__, rs->missed); - rs->begin = 0; + func, rs->missed); + rs->begin = 0; rs->printed = 0; - rs->missed = 0; + rs->missed = 0; } - if (rs->burst && rs->burst > rs->printed) - goto print; - - rs->missed++; - spin_unlock_irqrestore(&ratelimit_lock, flags); - return 0; + if (rs->burst && rs->burst > rs->printed) { + rs->printed++; + ret = 1; + } else { + rs->missed++; + ret = 0; + } + spin_unlock_irqrestore(&rs->lock, flags); -print: - rs->printed++; - spin_unlock_irqrestore(&ratelimit_lock, flags); - return 1; + return ret; } -EXPORT_SYMBOL(__ratelimit); +EXPORT_SYMBOL(___ratelimit); diff --git a/lib/rational.c b/lib/rational.c index b3c099b5478..3ed247b8066 100644 --- a/lib/rational.c +++ b/lib/rational.c @@ -7,6 +7,7 @@ */ #include <linux/rational.h> +#include <linux/module.h> /* * calculate best rational approximation for a given fraction diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 9df3ca56db1..ccf95bff798 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,6 +17,19 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; +int rwsem_is_locked(struct rw_semaphore *sem) +{ + int ret = 1; + unsigned long flags; + + if (spin_trylock_irqsave(&sem->wait_lock, flags)) { + ret = (sem->activity != 0); + spin_unlock_irqrestore(&sem->wait_lock, flags); + } + return ret; +} +EXPORT_SYMBOL(rwsem_is_locked); + /* * initialise the semaphore */ @@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } +EXPORT_SYMBOL(__init_rwsem); /* * handle the lock release when processes blocked on it that can now run @@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(__init_rwsem); -EXPORT_SYMBOL(__down_read); -EXPORT_SYMBOL(__down_read_trylock); -EXPORT_SYMBOL(__down_write_nested); -EXPORT_SYMBOL(__down_write); -EXPORT_SYMBOL(__down_write_trylock); -EXPORT_SYMBOL(__up_read); -EXPORT_SYMBOL(__up_write); -EXPORT_SYMBOL(__downgrade_write); diff --git a/lib/show_mem.c b/lib/show_mem.c index 238e72a18ce..fdc77c82f92 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -15,7 +15,7 @@ void show_mem(void) unsigned long total = 0, reserved = 0, shared = 0, nonshared = 0, highmem = 0; - printk(KERN_INFO "Mem-Info:\n"); + printk("Mem-Info:\n"); show_free_areas(); for_each_online_pgdat(pgdat) { @@ -49,15 +49,15 @@ void show_mem(void) pgdat_resize_unlock(pgdat, &flags); } - printk(KERN_INFO "%lu pages RAM\n", total); + printk("%lu pages RAM\n", total); #ifdef CONFIG_HIGHMEM - printk(KERN_INFO "%lu pages HighMem\n", highmem); + printk("%lu pages HighMem\n", highmem); #endif - printk(KERN_INFO "%lu pages reserved\n", reserved); - printk(KERN_INFO "%lu pages shared\n", shared); - printk(KERN_INFO "%lu pages non-shared\n", nonshared); + printk("%lu pages reserved\n", reserved); + printk("%lu pages shared\n", shared); + printk("%lu pages non-shared\n", nonshared); #ifdef CONFIG_QUICKLIST - printk(KERN_INFO "%lu pages in pagetable cache\n", + printk("%lu pages in pagetable cache\n", quicklist_total_size()); #endif } diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9c4b0256490..4755b98b6df 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -13,8 +13,8 @@ #include <linux/delay.h> #include <linux/module.h> -void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } -EXPORT_SYMBOL(__spin_lock_init); +EXPORT_SYMBOL(__raw_spin_lock_init); void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) @@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; @@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, EXPORT_SYMBOL(__rwlock_init); -static void spin_bug(spinlock_t *lock, const char *msg) +static void spin_bug(raw_spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg) #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) static inline void -debug_spin_lock_before(spinlock_t *lock) +debug_spin_lock_before(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock) lock, "cpu recursion"); } -static inline void debug_spin_lock_after(spinlock_t *lock) +static inline void debug_spin_lock_after(raw_spinlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_spin_unlock(spinlock_t *lock) +static inline void debug_spin_unlock(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); - SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); + SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock) lock->owner_cpu = -1; } -static void __spin_lock_debug(spinlock_t *lock) +static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_spin_trylock(&lock->raw_lock)) + if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } @@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock) } } -void _raw_spin_lock(spinlock_t *lock) +void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); - if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) + if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); } -int _raw_spin_trylock(spinlock_t *lock) +int do_raw_spin_trylock(raw_spinlock_t *lock) { - int ret = __raw_spin_trylock(&lock->raw_lock); + int ret = arch_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); @@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock) return ret; } -void _raw_spin_unlock(spinlock_t *lock) +void do_raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); - __raw_spin_unlock(&lock->raw_lock); + arch_spin_unlock(&lock->raw_lock); } static void rwlock_bug(rwlock_t *lock, const char *msg) @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_read_trylock(&lock->raw_lock)) + if (arch_read_trylock(&lock->raw_lock)) return; __delay(1); } @@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock) } #endif -void _raw_read_lock(rwlock_t *lock) +void do_raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_lock(&lock->raw_lock); + arch_read_lock(&lock->raw_lock); } -int _raw_read_trylock(rwlock_t *lock) +int do_raw_read_trylock(rwlock_t *lock) { - int ret = __raw_read_trylock(&lock->raw_lock); + int ret = arch_read_trylock(&lock->raw_lock); #ifndef CONFIG_SMP /* @@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock) return ret; } -void _raw_read_unlock(rwlock_t *lock) +void do_raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_unlock(&lock->raw_lock); + arch_read_unlock(&lock->raw_lock); } static inline void debug_write_lock_before(rwlock_t *lock) @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_write_trylock(&lock->raw_lock)) + if (arch_write_trylock(&lock->raw_lock)) return; __delay(1); } @@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock) } #endif -void _raw_write_lock(rwlock_t *lock) +void do_raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); - __raw_write_lock(&lock->raw_lock); + arch_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } -int _raw_write_trylock(rwlock_t *lock) +int do_raw_write_trylock(rwlock_t *lock) { - int ret = __raw_write_trylock(&lock->raw_lock); + int ret = arch_write_trylock(&lock->raw_lock); if (ret) debug_write_lock_after(lock); @@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock) return ret; } -void _raw_write_unlock(rwlock_t *lock) +void do_raw_write_unlock(rwlock_t *lock) { debug_write_unlock(lock); - __raw_write_unlock(&lock->raw_lock); + arch_write_unlock(&lock->raw_lock); } diff --git a/lib/string.c b/lib/string.c index e96421ab9a9..f71bead1be3 100644 --- a/lib/string.c +++ b/lib/string.c @@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len) /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; - c1 = c2 = 0; - if (len) { - do { - c1 = *s1; - c2 = *s2; - s1++; - s2++; - if (!c1) - break; - if (!c2) - break; - if (c1 == c2) - continue; - c1 = tolower(c1); - c2 = tolower(c2); - if (c1 != c2) - break; - } while (--len); - } + if (!len) + return 0; + + do { + c1 = *s1++; + c2 = *s2++; + if (!c1 || !c2) + break; + if (c1 == c2) + continue; + c1 = tolower(c1); + c2 = tolower(c2); + if (c1 != c2) + break; + } while (--len); return (int)c1 - (int)c2; } EXPORT_SYMBOL(strnicmp); @@ -338,20 +334,34 @@ EXPORT_SYMBOL(strnchr); #endif /** - * strstrip - Removes leading and trailing whitespace from @s. + * skip_spaces - Removes leading whitespace from @str. + * @str: The string to be stripped. + * + * Returns a pointer to the first non-whitespace character in @str. + */ +char *skip_spaces(const char *str) +{ + while (isspace(*str)) + ++str; + return (char *)str; +} +EXPORT_SYMBOL(skip_spaces); + +/** + * strim - Removes leading and trailing whitespace from @s. * @s: The string to be stripped. * * Note that the first trailing whitespace is replaced with a %NUL-terminator * in the given string @s. Returns a pointer to the first non-whitespace * character in @s. */ -char *strstrip(char *s) +char *strim(char *s) { size_t size; char *end; + s = skip_spaces(s); size = strlen(s); - if (!size) return s; @@ -360,12 +370,9 @@ char *strstrip(char *s) end--; *(end + 1) = '\0'; - while (*s && isspace(*s)) - s++; - return s; } -EXPORT_SYMBOL(strstrip); +EXPORT_SYMBOL(strim); #ifndef __HAVE_ARCH_STRLEN /** @@ -656,7 +663,7 @@ EXPORT_SYMBOL(memscan); */ char *strstr(const char *s1, const char *s2) { - int l1, l2; + size_t l1, l2; l2 = strlen(s2); if (!l2) @@ -673,6 +680,31 @@ char *strstr(const char *s1, const char *s2) EXPORT_SYMBOL(strstr); #endif +#ifndef __HAVE_ARCH_STRNSTR +/** + * strnstr - Find the first substring in a length-limited string + * @s1: The string to be searched + * @s2: The string to search for + * @len: the maximum number of characters to search + */ +char *strnstr(const char *s1, const char *s2, size_t len) +{ + size_t l2; + + l2 = strlen(s2); + if (!l2) + return (char *)s1; + while (len >= l2) { + len--; + if (!memcmp(s1, s2, l2)) + return (char *)s1; + s1++; + } + return NULL; +} +EXPORT_SYMBOL(strnstr); +#endif + #ifndef __HAVE_ARCH_MEMCHR /** * memchr - Find a character in an area of memory. diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ac25cd28e80..437eedb5a53 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr; */ static DEFINE_SPINLOCK(io_tlb_lock); +static int late_alloc; + static int __init setup_io_tlb_npages(char *str) { @@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str) ++str; if (!strcmp(str, "force")) swiotlb_force = 1; + return 1; } __setup("swiotlb=", setup_io_tlb_npages); @@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, return phys_to_dma(hwdev, virt_to_phys(address)); } -static void swiotlb_print_info(unsigned long bytes) +void swiotlb_print_info(void) { + unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; phys_addr_t pstart, pend; pstart = virt_to_phys(io_tlb_start); @@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes) * structures for the software IO TLB used to implement the DMA API. */ void __init -swiotlb_init_with_default_size(size_t default_size) +swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long i, bytes; @@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size) io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); - - swiotlb_print_info(bytes); + if (verbose) + swiotlb_print_info(); } void __init -swiotlb_init(void) +swiotlb_init(int verbose) { - swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ + swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ } /* @@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size) if (!io_tlb_overflow_buffer) goto cleanup4; - swiotlb_print_info(bytes); + swiotlb_print_info(); + + late_alloc = 1; return 0; @@ -281,6 +287,32 @@ cleanup1: return -ENOMEM; } +void __init swiotlb_free(void) +{ + if (!io_tlb_overflow_buffer) + return; + + if (late_alloc) { + free_pages((unsigned long)io_tlb_overflow_buffer, + get_order(io_tlb_overflow)); + free_pages((unsigned long)io_tlb_orig_addr, + get_order(io_tlb_nslabs * sizeof(phys_addr_t))); + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + free_pages((unsigned long)io_tlb_start, + get_order(io_tlb_nslabs << IO_TLB_SHIFT)); + } else { + free_bootmem_late(__pa(io_tlb_overflow_buffer), + io_tlb_overflow); + free_bootmem_late(__pa(io_tlb_orig_addr), + io_tlb_nslabs * sizeof(phys_addr_t)); + free_bootmem_late(__pa(io_tlb_list), + io_tlb_nslabs * sizeof(int)); + free_bootmem_late(__pa(io_tlb_start), + io_tlb_nslabs << IO_TLB_SHIFT); + } +} + static int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= virt_to_phys(io_tlb_start) && @@ -453,7 +485,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) /* * Return the buffer to the free list by setting the corresponding - * entries to indicate the number of contigous entries available. + * entries to indicate the number of contiguous entries available. * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ @@ -517,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { + if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { /* * The allocated memory isn't reachable by the device. */ @@ -539,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ - if (dev_addr + size > dma_mask) { + if (dev_addr + size - 1 > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 33bed5e67a2..af4aaa6c36f 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -9,7 +9,7 @@ * Wirzenius wrote this portably, Torvalds fucked it up :-) */ -/* +/* * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> * - changed to provide snprintf and vsnprintf functions * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> @@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp) } /** - * simple_strtoul - convert a string to an unsigned long + * simple_strtoull - convert a string to an unsigned long long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) +unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { - unsigned long result = 0; + unsigned long long result = 0; if (!base) base = simple_guess_base(cp); @@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) result = result * base + value; cp++; } - if (endp) *endp = (char *)cp; + return result; } -EXPORT_SYMBOL(simple_strtoul); +EXPORT_SYMBOL(simple_strtoull); /** - * simple_strtol - convert a string to a signed long + * simple_strtoul - convert a string to an unsigned long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -long simple_strtol(const char *cp, char **endp, unsigned int base) +unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { - if(*cp == '-') - return -simple_strtoul(cp + 1, endp, base); - return simple_strtoul(cp, endp, base); + return simple_strtoull(cp, endp, base); } -EXPORT_SYMBOL(simple_strtol); +EXPORT_SYMBOL(simple_strtoul); /** - * simple_strtoull - convert a string to an unsigned long long + * simple_strtol - convert a string to a signed long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) +long simple_strtol(const char *cp, char **endp, unsigned int base) { - unsigned long long result = 0; - - if (!base) - base = simple_guess_base(cp); - - if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') - cp += 2; - - while (isxdigit(*cp)) { - unsigned int value; - - value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; - if (value >= base) - break; - result = result * base + value; - cp++; - } + if (*cp == '-') + return -simple_strtoul(cp + 1, endp, base); - if (endp) - *endp = (char *)cp; - return result; + return simple_strtoul(cp, endp, base); } -EXPORT_SYMBOL(simple_strtoull); +EXPORT_SYMBOL(simple_strtol); /** * simple_strtoll - convert a string to a signed long long @@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull); */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { - if(*cp=='-') + if (*cp == '-') return -simple_strtoull(cp + 1, endp, base); + return simple_strtoull(cp, endp, base); } @@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) val = simple_strtoul(cp, &tail, base); if (tail == cp) return -EINVAL; + if ((*tail == '\0') || ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { *res = val; @@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll); static int skip_atoi(const char **s) { - int i=0; + int i = 0; while (isdigit(**s)) i = i*10 + *((*s)++) - '0'; + return i; } @@ -302,7 +286,7 @@ static int skip_atoi(const char **s) /* Formats correctly any integer in [0,99999]. * Outputs from one to five digits depending on input. * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ -static char* put_dec_trunc(char *buf, unsigned q) +static char *put_dec_trunc(char *buf, unsigned q) { unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; @@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q) d3 = d3 - 10*q; *buf++ = d3 + '0'; /* next digit */ if (q != 0) - *buf++ = q + '0'; /* most sign. digit */ + *buf++ = q + '0'; /* most sign. digit */ } } } + return buf; } /* Same with if's removed. Always emits five digits */ -static char* put_dec_full(char *buf, unsigned q) +static char *put_dec_full(char *buf, unsigned q) { /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ /* but anyway, gcc produces better code with full-sized ints */ @@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q) d2 = (q>>8) & 0xf; d3 = (q>>12); - /* Possible ways to approx. divide by 10 */ - /* gcc -O2 replaces multiply with shifts and adds */ - // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) - // (x * 0x67) >> 10: 1100111 - // (x * 0x34) >> 9: 110100 - same - // (x * 0x1a) >> 8: 11010 - same - // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) - + /* + * Possible ways to approx. divide by 10 + * gcc -O2 replaces multiply with shifts and adds + * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) + * (x * 0x67) >> 10: 1100111 + * (x * 0x34) >> 9: 110100 - same + * (x * 0x1a) >> 8: 11010 - same + * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) + */ d0 = 6*(d3 + d2 + d1) + (q & 0xf); q = (d0 * 0xcd) >> 11; d0 = d0 - 10*q; @@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q) d3 = d3 - 10*q; *buf++ = d3 + '0'; *buf++ = q + '0'; + return buf; } /* No inlining helps gcc to use registers better */ -static noinline char* put_dec(char *buf, unsigned long long num) +static noinline char *put_dec(char *buf, unsigned long long num) { while (1) { unsigned rem; @@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num, spec.flags &= ~ZEROPAD; sign = 0; if (spec.flags & SIGN) { - if ((signed long long) num < 0) { + if ((signed long long)num < 0) { sign = '-'; - num = - (signed long long) num; + num = -(signed long long)num; spec.field_width--; } else if (spec.flags & PLUS) { sign = '+'; @@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num, else if (spec.base != 10) { /* 8 or 16 */ int mask = spec.base - 1; int shift = 3; - if (spec.base == 16) shift = 4; + + if (spec.base == 16) + shift = 4; do { tmp[i++] = (digits[((unsigned char)num) & mask] | locase); num >>= shift; @@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num, /* leading space padding */ spec.field_width -= spec.precision; if (!(spec.flags & (ZEROPAD+LEFT))) { - while(--spec.field_width >= 0) { + while (--spec.field_width >= 0) { if (buf < end) *buf = ' '; ++buf; @@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num, *buf = ' '; ++buf; } + return buf; } -static char *string(char *buf, char *end, char *s, struct printf_spec spec) +static char *string(char *buf, char *end, const char *s, struct printf_spec spec) { int len, i; if ((unsigned long)s < PAGE_SIZE) - s = "<NULL>"; + s = "(null)"; len = strnlen(s, spec.precision); @@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec) *buf = ' '; ++buf; } + return buf; } @@ -585,47 +576,101 @@ static char *symbol_string(char *buf, char *end, void *ptr, sprint_symbol(sym, value); else kallsyms_lookup(value, NULL, NULL, NULL, sym); + return string(buf, end, sym, spec); #else - spec.field_width = 2*sizeof(void *); + spec.field_width = 2 * sizeof(void *); spec.flags |= SPECIAL | SMALL | ZEROPAD; spec.base = 16; + return number(buf, end, value, spec); #endif } static char *resource_string(char *buf, char *end, struct resource *res, - struct printf_spec spec) + struct printf_spec spec, const char *fmt) { #ifndef IO_RSRC_PRINTK_SIZE -#define IO_RSRC_PRINTK_SIZE 4 +#define IO_RSRC_PRINTK_SIZE 6 #endif #ifndef MEM_RSRC_PRINTK_SIZE -#define MEM_RSRC_PRINTK_SIZE 8 +#define MEM_RSRC_PRINTK_SIZE 10 #endif - struct printf_spec num_spec = { + struct printf_spec hex_spec = { .base = 16, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; - /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ - char sym[4*sizeof(resource_size_t) + 8]; + struct printf_spec dec_spec = { + .base = 10, + .precision = -1, + .flags = 0, + }; + struct printf_spec str_spec = { + .field_width = -1, + .precision = 10, + .flags = LEFT, + }; + struct printf_spec flag_spec = { + .base = 16, + .precision = -1, + .flags = SPECIAL | SMALL, + }; + + /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) + * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ +#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) +#define FLAG_BUF_SIZE (2 * sizeof(res->flags)) +#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") +#define RAW_BUF_SIZE sizeof("[mem - flags 0x]") + char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, + 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; + char *p = sym, *pend = sym + sizeof(sym); - int size = -1; + int size = -1, addr = 0; + int decode = (fmt[0] == 'R') ? 1 : 0; - if (res->flags & IORESOURCE_IO) + if (res->flags & IORESOURCE_IO) { size = IO_RSRC_PRINTK_SIZE; - else if (res->flags & IORESOURCE_MEM) + addr = 1; + } else if (res->flags & IORESOURCE_MEM) { size = MEM_RSRC_PRINTK_SIZE; + addr = 1; + } *p++ = '['; - num_spec.field_width = size; - p = number(p, pend, res->start, num_spec); - *p++ = '-'; - p = number(p, pend, res->end, num_spec); + if (res->flags & IORESOURCE_IO) + p = string(p, pend, "io ", str_spec); + else if (res->flags & IORESOURCE_MEM) + p = string(p, pend, "mem ", str_spec); + else if (res->flags & IORESOURCE_IRQ) + p = string(p, pend, "irq ", str_spec); + else if (res->flags & IORESOURCE_DMA) + p = string(p, pend, "dma ", str_spec); + else { + p = string(p, pend, "??? ", str_spec); + decode = 0; + } + hex_spec.field_width = size; + p = number(p, pend, res->start, addr ? hex_spec : dec_spec); + if (res->start != res->end) { + *p++ = '-'; + p = number(p, pend, res->end, addr ? hex_spec : dec_spec); + } + if (decode) { + if (res->flags & IORESOURCE_MEM_64) + p = string(p, pend, " 64bit", str_spec); + if (res->flags & IORESOURCE_PREFETCH) + p = string(p, pend, " pref", str_spec); + if (res->flags & IORESOURCE_DISABLED) + p = string(p, pend, " disabled", str_spec); + } else { + p = string(p, pend, " flags ", str_spec); + p = number(p, pend, res->flags, flag_spec); + } *p++ = ']'; - *p = 0; + *p = '\0'; return string(buf, end, sym, spec); } @@ -636,24 +681,55 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; + char separator; + + if (fmt[1] == 'F') { /* FDDI canonical format */ + separator = '-'; + } else { + separator = ':'; + } for (i = 0; i < 6; i++) { p = pack_hex_byte(p, addr[i]); if (fmt[0] == 'M' && i != 5) - *p++ = ':'; + *p++ = separator; } *p = '\0'; return string(buf, end, mac_addr, spec); } -static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) +static char *ip4_string(char *p, const u8 *addr, const char *fmt) { int i; - + bool leading_zeros = (fmt[0] == 'i'); + int index; + int step; + + switch (fmt[2]) { + case 'h': +#ifdef __BIG_ENDIAN + index = 0; + step = 1; +#else + index = 3; + step = -1; +#endif + break; + case 'l': + index = 3; + step = -1; + break; + case 'n': + case 'b': + default: + index = 0; + step = 1; + break; + } for (i = 0; i < 4; i++) { char temp[3]; /* hold each IP quad in reverse order */ - int digits = put_dec_trunc(temp, addr[i]) - temp; + int digits = put_dec_trunc(temp, addr[index]) - temp; if (leading_zeros) { if (digits < 3) *p++ = '0'; @@ -665,23 +741,21 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) *p++ = temp[digits]; if (i < 3) *p++ = '.'; + index += step; } - *p = '\0'; + return p; } static char *ip6_compressed_string(char *p, const char *addr) { - int i; - int j; - int range; + int i, j, range; unsigned char zerolength[8]; int longest = 1; int colonpos = -1; u16 word; - u8 hi; - u8 lo; + u8 hi, lo; bool needcolon = false; bool useIPv4; struct in6_addr in6; @@ -735,8 +809,9 @@ static char *ip6_compressed_string(char *p, const char *addr) p = pack_hex_byte(p, hi); else *p++ = hex_asc_lo(hi); + p = pack_hex_byte(p, lo); } - if (hi || lo > 0x0f) + else if (lo > 0x0f) p = pack_hex_byte(p, lo); else *p++ = hex_asc_lo(lo); @@ -746,24 +821,25 @@ static char *ip6_compressed_string(char *p, const char *addr) if (useIPv4) { if (needcolon) *p++ = ':'; - p = ip4_string(p, &in6.s6_addr[12], false); + p = ip4_string(p, &in6.s6_addr[12], "I4"); } - *p = '\0'; + return p; } static char *ip6_string(char *p, const char *addr, const char *fmt) { int i; + for (i = 0; i < 8; i++) { p = pack_hex_byte(p, *addr++); p = pack_hex_byte(p, *addr++); if (fmt[0] == 'I' && i != 7) *p++ = ':'; } - *p = '\0'; + return p; } @@ -785,11 +861,57 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, { char ip4_addr[sizeof("255.255.255.255")]; - ip4_string(ip4_addr, addr, fmt[0] == 'i'); + ip4_string(ip4_addr, addr, fmt); return string(buf, end, ip4_addr, spec); } +static char *uuid_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) +{ + char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; + char *p = uuid; + int i; + static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; + static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; + const u8 *index = be; + bool uc = false; + + switch (*(++fmt)) { + case 'L': + uc = true; /* fall-through */ + case 'l': + index = le; + break; + case 'B': + uc = true; + break; + } + + for (i = 0; i < 16; i++) { + p = pack_hex_byte(p, addr[index[i]]); + switch (i) { + case 3: + case 5: + case 7: + case 9: + *p++ = '-'; + break; + } + } + + *p = 0; + + if (uc) { + p = uuid; + do { + *p = toupper(*p); + } while (*(++p)); + } + + return string(buf, end, uuid, spec); +} + /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format @@ -801,19 +923,34 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, * - 'f' For simple symbolic function names without offset * - 'S' For symbolic direct pointers with offset * - 's' For symbolic direct pointers without offset - * - 'R' For a struct resource pointer, it prints the range of - * addresses (not the name nor the flags) + * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] + * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons + * - 'MF' For a 6-byte MAC FDDI address, it prints the address + * with a dash-separated hex notation * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's * - 'i' [46] for 'raw' IPv4/IPv6 addresses * IPv6 omits the colons (01020304...0f) * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) + * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I6c' for IPv6 addresses printed as specified by - * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt + * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 + * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form + * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + * Options for %pU are: + * b big endian lower case hex (default) + * B big endian UPPER case hex + * l little endian lower case hex + * L little endian UPPER case hex + * big endian output byte order is: + * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] + * little endian output byte order is: + * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] + * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. @@ -828,14 +965,16 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'F': case 'f': ptr = dereference_function_descriptor(ptr); - case 's': /* Fallthrough */ case 'S': + case 's': return symbol_string(buf, end, ptr, spec, *fmt); case 'R': - return resource_string(buf, end, ptr, spec); + case 'r': + return resource_string(buf, end, ptr, spec, fmt); case 'M': /* Colon separated: 00:01:02:03:04:05 */ case 'm': /* Contiguous: 000102030405 */ + /* [mM]F (FDDI, bit reversed) */ return mac_address_string(buf, end, ptr, spec, fmt); case 'I': /* Formatted IP supported * 4: 1.2.3.4 @@ -853,6 +992,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, return ip4_addr_string(buf, end, ptr, spec, fmt); } break; + case 'U': + return uuid_string(buf, end, ptr, spec, fmt); } spec.flags |= SMALL; if (spec.field_width == -1) { @@ -970,8 +1111,8 @@ precision: qualifier: /* get the conversion qualifier */ spec->qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { + if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || + TOLOWER(*fmt) == 'z' || *fmt == 't') { spec->qualifier = *fmt++; if (unlikely(spec->qualifier == *fmt)) { if (spec->qualifier == 'l') { @@ -1038,7 +1179,7 @@ qualifier: spec->type = FORMAT_TYPE_LONG; else spec->type = FORMAT_TYPE_ULONG; - } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { + } else if (TOLOWER(spec->qualifier) == 'z') { spec->type = FORMAT_TYPE_SIZE_T; } else if (spec->qualifier == 't') { spec->type = FORMAT_TYPE_PTRDIFF; @@ -1074,7 +1215,18 @@ qualifier: * %ps output the name of a text symbol without offset * %pF output the name of a function pointer with its offset * %pf output the name of a function pointer without its offset - * %pR output the address range in a struct resource + * %pR output the address range in a struct resource with decoded flags + * %pr output the address range in a struct resource with raw flags + * %pM output a 6-byte MAC address with colons + * %pm output a 6-byte MAC address without colons + * %pI4 print an IPv4 address without leading zeros + * %pi4 print an IPv4 address with leading zeros + * %pI6 print an IPv6 address with colons + * %pi6 print an IPv6 address without colons + * %pI6c print an IPv6 address as specified by + * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 + * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper + * case. * %n is ignored * * The return value is the number of characters which would @@ -1091,8 +1243,7 @@ qualifier: int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { unsigned long long num; - char *str, *end, c; - int read; + char *str, *end; struct printf_spec spec = {0}; /* Reject out-of-range values early. Large positive sizes are @@ -1111,8 +1262,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) while (*fmt) { const char *old_fmt = fmt; - - read = format_decode(fmt, &spec); + int read = format_decode(fmt, &spec); fmt += read; @@ -1136,7 +1286,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) spec.precision = va_arg(args, int); break; - case FORMAT_TYPE_CHAR: + case FORMAT_TYPE_CHAR: { + char c; + if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) @@ -1155,6 +1307,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ++str; } break; + } case FORMAT_TYPE_STR: str = string(str, end, va_arg(args, char *), spec); @@ -1185,8 +1338,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) if (qualifier == 'l') { long *ip = va_arg(args, long *); *ip = (str - buf); - } else if (qualifier == 'Z' || - qualifier == 'z') { + } else if (TOLOWER(qualifier) == 'z') { size_t *ip = va_arg(args, size_t *); *ip = (str - buf); } else { @@ -1269,7 +1421,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { int i; - i=vsnprintf(buf,size,fmt,args); + i = vsnprintf(buf, size, fmt, args); + return (i >= size) ? (size - 1) : i; } EXPORT_SYMBOL(vscnprintf); @@ -1288,14 +1441,15 @@ EXPORT_SYMBOL(vscnprintf); * * See the vsnprintf() documentation for format string extensions over C99. */ -int snprintf(char * buf, size_t size, const char *fmt, ...) +int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); - i=vsnprintf(buf,size,fmt,args); + i = vsnprintf(buf, size, fmt, args); va_end(args); + return i; } EXPORT_SYMBOL(snprintf); @@ -1311,7 +1465,7 @@ EXPORT_SYMBOL(snprintf); * the trailing '\0'. If @size is <= 0 the function returns 0. */ -int scnprintf(char * buf, size_t size, const char *fmt, ...) +int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; @@ -1319,6 +1473,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...) va_start(args, fmt); i = vsnprintf(buf, size, fmt, args); va_end(args); + return (i >= size) ? (size - 1) : i; } EXPORT_SYMBOL(scnprintf); @@ -1356,14 +1511,15 @@ EXPORT_SYMBOL(vsprintf); * * See the vsnprintf() documentation for format string extensions over C99. */ -int sprintf(char * buf, const char *fmt, ...) +int sprintf(char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); - i=vsnprintf(buf, INT_MAX, fmt, args); + i = vsnprintf(buf, INT_MAX, fmt, args); va_end(args); + return i; } EXPORT_SYMBOL(sprintf); @@ -1396,7 +1552,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) { struct printf_spec spec = {0}; char *str, *end; - int read; str = (char *)bin_buf; end = (char *)(bin_buf + size); @@ -1421,14 +1576,15 @@ do { \ str += sizeof(type); \ } while (0) - while (*fmt) { - read = format_decode(fmt, &spec); + int read = format_decode(fmt, &spec); fmt += read; switch (spec.type) { case FORMAT_TYPE_NONE: + case FORMAT_TYPE_INVALID: + case FORMAT_TYPE_PERCENT_CHAR: break; case FORMAT_TYPE_WIDTH: @@ -1443,13 +1599,14 @@ do { \ case FORMAT_TYPE_STR: { const char *save_str = va_arg(args, char *); size_t len; + if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE || (unsigned long)save_str < PAGE_SIZE) - save_str = "<NULL>"; - len = strlen(save_str); - if (str + len + 1 < end) - memcpy(str, save_str, len + 1); - str += len + 1; + save_str = "(null)"; + len = strlen(save_str) + 1; + if (str + len < end) + memcpy(str, save_str, len); + str += len; break; } @@ -1460,19 +1617,13 @@ do { \ fmt++; break; - case FORMAT_TYPE_PERCENT_CHAR: - break; - - case FORMAT_TYPE_INVALID: - break; - case FORMAT_TYPE_NRCHARS: { /* skip %n 's argument */ int qualifier = spec.qualifier; void *skip_arg; if (qualifier == 'l') skip_arg = va_arg(args, long *); - else if (qualifier == 'Z' || qualifier == 'z') + else if (TOLOWER(qualifier) == 'z') skip_arg = va_arg(args, size_t *); else skip_arg = va_arg(args, int *); @@ -1508,8 +1659,8 @@ do { \ } } } - return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; + return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; #undef save_arg } EXPORT_SYMBOL_GPL(vbin_printf); @@ -1538,11 +1689,9 @@ EXPORT_SYMBOL_GPL(vbin_printf); */ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) { - unsigned long long num; - char *str, *end, c; - const char *args = (const char *)bin_buf; - struct printf_spec spec = {0}; + char *str, *end; + const char *args = (const char *)bin_buf; if (WARN_ON_ONCE((int) size < 0)) return 0; @@ -1572,10 +1721,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) } while (*fmt) { - int read; const char *old_fmt = fmt; - - read = format_decode(fmt, &spec); + int read = format_decode(fmt, &spec); fmt += read; @@ -1599,7 +1746,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) spec.precision = get_arg(int); break; - case FORMAT_TYPE_CHAR: + case FORMAT_TYPE_CHAR: { + char c; + if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) @@ -1617,11 +1766,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) ++str; } break; + } case FORMAT_TYPE_STR: { const char *str_arg = args; - size_t len = strlen(str_arg); - args += len + 1; + args += strlen(str_arg) + 1; str = string(str, end, (char *)str_arg, spec); break; } @@ -1633,11 +1782,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) break; case FORMAT_TYPE_PERCENT_CHAR: - if (str < end) - *str = '%'; - ++str; - break; - case FORMAT_TYPE_INVALID: if (str < end) *str = '%'; @@ -1648,15 +1792,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) /* skip */ break; - default: + default: { + unsigned long long num; + switch (spec.type) { case FORMAT_TYPE_LONG_LONG: num = get_arg(long long); break; case FORMAT_TYPE_ULONG: - num = get_arg(unsigned long); - break; case FORMAT_TYPE_LONG: num = get_arg(unsigned long); break; @@ -1686,8 +1830,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) } str = number(str, end, num, spec); - } - } + } /* default: */ + } /* switch(spec.type) */ + } /* while(*fmt) */ if (size > 0) { if (str < end) @@ -1721,6 +1866,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) va_start(args, fmt); ret = vbin_printf(bin_buf, size, fmt, args); va_end(args); + return ret; } EXPORT_SYMBOL_GPL(bprintf); @@ -1733,27 +1879,23 @@ EXPORT_SYMBOL_GPL(bprintf); * @fmt: format of buffer * @args: arguments */ -int vsscanf(const char * buf, const char * fmt, va_list args) +int vsscanf(const char *buf, const char *fmt, va_list args) { const char *str = buf; char *next; char digit; int num = 0; - int qualifier; - int base; - int field_width; - int is_sign = 0; + int qualifier, base, field_width; + bool is_sign; - while(*fmt && *str) { + while (*fmt && *str) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. */ if (isspace(*fmt)) { - while (isspace(*fmt)) - ++fmt; - while (isspace(*str)) - ++str; + fmt = skip_spaces(++fmt); + str = skip_spaces(str); } /* anything that is not a conversion must match exactly */ @@ -1766,7 +1908,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) if (!*fmt) break; ++fmt; - + /* skip this conversion. * advance both strings to next white space */ @@ -1785,8 +1927,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args) /* get conversion qualifier */ qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt == 'Z' || *fmt == 'z') { + if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || + TOLOWER(*fmt) == 'z') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'h') { @@ -1798,16 +1940,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args) } } } - base = 10; - is_sign = 0; if (!*fmt || !*str) break; - switch(*fmt++) { + base = 10; + is_sign = 0; + + switch (*fmt++) { case 'c': { - char *s = (char *) va_arg(args,char*); + char *s = (char *)va_arg(args, char*); if (field_width == -1) field_width = 1; do { @@ -1818,17 +1961,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args) continue; case 's': { - char *s = (char *) va_arg(args, char *); - if(field_width == -1) + char *s = (char *)va_arg(args, char *); + if (field_width == -1) field_width = INT_MAX; /* first, skip leading white space in buffer */ - while (isspace(*str)) - str++; + str = skip_spaces(str); /* now copy until next white space */ - while (*str && !isspace(*str) && field_width--) { + while (*str && !isspace(*str) && field_width--) *s++ = *str++; - } *s = '\0'; num++; } @@ -1836,7 +1977,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) case 'n': /* return number of characters read so far */ { - int *i = (int *)va_arg(args,int*); + int *i = (int *)va_arg(args, int*); *i = str - buf; } continue; @@ -1848,14 +1989,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args) base = 16; break; case 'i': - base = 0; + base = 0; case 'd': is_sign = 1; case 'u': break; case '%': /* looking for '%' in str */ - if (*str++ != '%') + if (*str++ != '%') return num; continue; default: @@ -1866,71 +2007,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args) /* have some sort of integer conversion. * first, skip white space in buffer. */ - while (isspace(*str)) - str++; + str = skip_spaces(str); digit = *str; if (is_sign && digit == '-') digit = *(str + 1); if (!digit - || (base == 16 && !isxdigit(digit)) - || (base == 10 && !isdigit(digit)) - || (base == 8 && (!isdigit(digit) || digit > '7')) - || (base == 0 && !isdigit(digit))) - break; + || (base == 16 && !isxdigit(digit)) + || (base == 10 && !isdigit(digit)) + || (base == 8 && (!isdigit(digit) || digit > '7')) + || (base == 0 && !isdigit(digit))) + break; - switch(qualifier) { + switch (qualifier) { case 'H': /* that's 'hh' in format */ if (is_sign) { - signed char *s = (signed char *) va_arg(args,signed char *); - *s = (signed char) simple_strtol(str,&next,base); + signed char *s = (signed char *)va_arg(args, signed char *); + *s = (signed char)simple_strtol(str, &next, base); } else { - unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); - *s = (unsigned char) simple_strtoul(str, &next, base); + unsigned char *s = (unsigned char *)va_arg(args, unsigned char *); + *s = (unsigned char)simple_strtoul(str, &next, base); } break; case 'h': if (is_sign) { - short *s = (short *) va_arg(args,short *); - *s = (short) simple_strtol(str,&next,base); + short *s = (short *)va_arg(args, short *); + *s = (short)simple_strtol(str, &next, base); } else { - unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); - *s = (unsigned short) simple_strtoul(str, &next, base); + unsigned short *s = (unsigned short *)va_arg(args, unsigned short *); + *s = (unsigned short)simple_strtoul(str, &next, base); } break; case 'l': if (is_sign) { - long *l = (long *) va_arg(args,long *); - *l = simple_strtol(str,&next,base); + long *l = (long *)va_arg(args, long *); + *l = simple_strtol(str, &next, base); } else { - unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); - *l = simple_strtoul(str,&next,base); + unsigned long *l = (unsigned long *)va_arg(args, unsigned long *); + *l = simple_strtoul(str, &next, base); } break; case 'L': if (is_sign) { - long long *l = (long long*) va_arg(args,long long *); - *l = simple_strtoll(str,&next,base); + long long *l = (long long *)va_arg(args, long long *); + *l = simple_strtoll(str, &next, base); } else { - unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); - *l = simple_strtoull(str,&next,base); + unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *); + *l = simple_strtoull(str, &next, base); } break; case 'Z': case 'z': { - size_t *s = (size_t*) va_arg(args,size_t*); - *s = (size_t) simple_strtoul(str,&next,base); + size_t *s = (size_t *)va_arg(args, size_t *); + *s = (size_t)simple_strtoul(str, &next, base); } break; default: if (is_sign) { - int *i = (int *) va_arg(args, int*); - *i = (int) simple_strtol(str,&next,base); + int *i = (int *)va_arg(args, int *); + *i = (int)simple_strtol(str, &next, base); } else { - unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); - *i = (unsigned int) simple_strtoul(str,&next,base); + unsigned int *i = (unsigned int *)va_arg(args, unsigned int*); + *i = (unsigned int)simple_strtoul(str, &next, base); } break; } @@ -1961,14 +2101,15 @@ EXPORT_SYMBOL(vsscanf); * @fmt: formatting of buffer * @...: resulting arguments */ -int sscanf(const char * buf, const char * fmt, ...) +int sscanf(const char *buf, const char *fmt, ...) { va_list args; int i; - va_start(args,fmt); - i = vsscanf(buf,fmt,args); + va_start(args, fmt); + i = vsscanf(buf, fmt, args); va_end(args); + return i; } EXPORT_SYMBOL(sscanf); diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 8550b0c05d0..215447c5526 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -8,6 +8,21 @@ #include "inflate.h" #include "inffast.h" +/* Only do the unaligned "Faster" variant when + * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set + * + * On powerpc, it won't be as we don't include autoconf.h + * automatically for the boot wrapper, which is intended as + * we run in an environment where we may not be able to deal + * with (even rare) alignment faults. In addition, we do not + * define __KERNEL__ for arch/powerpc/boot unlike x86 + */ + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#include <asm/unaligned.h> +#include <asm/byteorder.h> +#endif + #ifndef ASMINF /* Allow machine dependent optimization for post-increment or pre-increment. @@ -24,9 +39,11 @@ #ifdef POSTINC # define OFF 0 # define PUP(a) *(a)++ +# define UP_UNALIGNED(a) get_unaligned((a)++) #else # define OFF 1 # define PUP(a) *++(a) +# define UP_UNALIGNED(a) get_unaligned(++(a)) #endif /* @@ -239,18 +256,62 @@ void inflate_fast(z_streamp strm, unsigned start) } } else { +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + unsigned short *sout; + unsigned long loops; + + from = out - dist; /* copy direct from output */ + /* minimum length is three */ + /* Align out addr */ + if (!((long)(out - 1 + OFF) & 1)) { + PUP(out) = PUP(from); + len--; + } + sout = (unsigned short *)(out - OFF); + if (dist > 2) { + unsigned short *sfrom; + + sfrom = (unsigned short *)(from - OFF); + loops = len >> 1; + do + PUP(sout) = UP_UNALIGNED(sfrom); + while (--loops); + out = (unsigned char *)sout + OFF; + from = (unsigned char *)sfrom + OFF; + } else { /* dist == 1 or dist == 2 */ + unsigned short pat16; + + pat16 = *(sout-2+2*OFF); + if (dist == 1) +#if defined(__BIG_ENDIAN) + pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); +#elif defined(__LITTLE_ENDIAN) + pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); +#else +#error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined +#endif + loops = len >> 1; + do + PUP(sout) = pat16; + while (--loops); + out = (unsigned char *)sout + OFF; + } + if (len & 1) + PUP(out) = PUP(from); +#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ from = out - dist; /* copy direct from output */ do { /* minimum length is three */ - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); - len -= 3; + PUP(out) = PUP(from); + PUP(out) = PUP(from); + PUP(out) = PUP(from); + len -= 3; } while (len > 2); if (len) { - PUP(out) = PUP(from); - if (len > 1) - PUP(out) = PUP(from); + PUP(out) = PUP(from); + if (len > 1) + PUP(out) = PUP(from); } +#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ } } else if ((op & 64) == 0) { /* 2nd level distance code */ |