From 3527fb326f07bc8e85cf66d4f987ebeea24e8e4a Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Thu, 5 Jun 2008 22:46:19 -0700 Subject: lib: export bitrev16 Bluetooth will be able to use this. Signed-off-by: Harvey Harrison Cc: Marcel Holtmann Cc: Dave Young Cc: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitrev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/bitrev.c b/lib/bitrev.c index 989aff73f88..3956203456d 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c @@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = { }; EXPORT_SYMBOL_GPL(byte_rev_table); -static __always_inline u16 bitrev16(u16 x) +u16 bitrev16(u16 x) { return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); } +EXPORT_SYMBOL(bitrev16); /** * bitrev32 - reverse the order of bits in a u32 value -- cgit v1.2.3-70-g09d2 From f595ec964daf7f99668039d7303ddedd09a75142 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 12 Jun 2008 10:47:56 +0200 Subject: common implementation of iterative div/mod We have a few instances of the open-coded iterative div/mod loop, used when we don't expcet the dividend to be much bigger than the divisor. Unfortunately modern gcc's have the tendency to strength "reduce" this into a full mod operation, which isn't necessarily any faster, and even if it were, doesn't exist if gcc implements it in libgcc. The workaround is to put a dummy asm statement in the loop to prevent gcc from performing the transformation. This patch creates a single implementation of this loop, and uses it to replace the open-coded versions I know about. Signed-off-by: Jeremy Fitzhardinge Cc: Andrew Morton Cc: john stultz Cc: Segher Boessenkool Cc: Christian Kujau Cc: Robert Hancock Signed-off-by: Ingo Molnar --- arch/x86/xen/time.c | 13 +++---------- include/linux/math64.h | 2 ++ include/linux/time.h | 11 ++--------- lib/div64.c | 23 +++++++++++++++++++++++ 4 files changed, 30 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c39e1a5aa24..52b2e385698 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -150,11 +151,7 @@ static void do_stolen_accounting(void) if (stolen < 0) stolen = 0; - ticks = 0; - while (stolen >= NS_PER_TICK) { - ticks++; - stolen -= NS_PER_TICK; - } + ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __get_cpu_var(residual_stolen) = stolen; account_steal_time(NULL, ticks); @@ -166,11 +163,7 @@ static void do_stolen_accounting(void) if (blocked < 0) blocked = 0; - ticks = 0; - while (blocked >= NS_PER_TICK) { - ticks++; - blocked -= NS_PER_TICK; - } + ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __get_cpu_var(residual_blocked) = blocked; account_steal_time(idle_task(smp_processor_id()), ticks); } diff --git a/include/linux/math64.h b/include/linux/math64.h index c1a5f81501f..177785e1e4a 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -81,4 +81,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor) } #endif +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); + #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/time.h b/include/linux/time.h index d32ef0ad4c0..05f9517a8ed 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -6,6 +6,7 @@ #ifdef __KERNEL__ # include # include +# include #endif #ifndef _STRUCT_TIMESPEC @@ -172,15 +173,7 @@ extern struct timeval ns_to_timeval(const s64 nsec); */ static inline void timespec_add_ns(struct timespec *a, u64 ns) { - ns += a->tv_nsec; - while(unlikely(ns >= NSEC_PER_SEC)) { - /* The following asm() prevents the compiler from - * optimising this loop into a modulo operation. */ - asm("" : "+r"(ns)); - - ns -= NSEC_PER_SEC; - a->tv_sec++; - } + a->tv_sec += iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); a->tv_nsec = ns; } #endif /* __KERNEL__ */ diff --git a/lib/div64.c b/lib/div64.c index bb5bd0c0f03..76c01542d3e 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -98,3 +98,26 @@ EXPORT_SYMBOL(div64_u64); #endif #endif /* BITS_PER_LONG == 32 */ + +/* + * Iterative div/mod for use when dividend is not expected to be much + * bigger than divisor. + */ +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} +EXPORT_SYMBOL(iter_div_u64_rem); -- cgit v1.2.3-70-g09d2 From d5e181f78ac753893eb930868a52a4488cd3de0a Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 12 Jun 2008 10:47:58 +0200 Subject: add an inlined version of iter_div_u64_rem iter_div_u64_rem is used in the x86-64 vdso, which cannot call other kernel code. For this case, provide the always_inlined version, __iter_div_u64_rem. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- include/linux/math64.h | 19 +++++++++++++++++++ lib/div64.c | 15 +-------------- 2 files changed, 20 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/include/linux/math64.h b/include/linux/math64.h index 177785e1e4a..c87f1528703 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -83,4 +83,23 @@ static inline s64 div_s64(s64 dividend, s32 divisor) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + #endif /* _LINUX_MATH64_H */ diff --git a/lib/div64.c b/lib/div64.c index 76c01542d3e..a111eb8de9c 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -105,19 +105,6 @@ EXPORT_SYMBOL(div64_u64); */ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { - u32 ret = 0; - - while (dividend >= divisor) { - /* The following asm() prevents the compiler from - optimising this loop into a modulo operation. */ - asm("" : "+rm"(dividend)); - - dividend -= divisor; - ret++; - } - - *remainder = dividend; - - return ret; + return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem); -- cgit v1.2.3-70-g09d2 From 643b52b9c0b4e959436b4b551ebf4060d06d5ae8 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 12 Jun 2008 15:21:52 -0700 Subject: radix-tree: fix small lockless radix-tree bug We shrink a radix tree when its root node has only one child, in the left most slot. The child becomes the new root node. To perform this operation in a manner compatible with concurrent lockless lookups, we atomically switch the root pointer from the parent to its child. However a concurrent lockless lookup may now have loaded a pointer to the parent (and is presently deciding what to do next). For this reason, we also have to keep the parent node in a valid state after shrinking the tree, until the next RCU grace period -- otherwise this lookup with the parent pointer may not do the right thing. Notably, we need to keep the child in the left most slot there in case that is requested by the lookup. This is all pretty standard RCU stuff. It is worth repeating because in my eagerness to obey the radix tree node constructor scheme, I had broken it by zeroing the radix tree node before the grace period. What could happen is that a lookup can load the parent pointer, then decide it wants to follow the left most child slot, only to find the slot contained NULL due to the concurrent shrinker having zeroed the parent node before waiting for a grace period. The lookup would return a false negative as a result. Fix it by doing that clearing in the RCU callback. I would normally want to rip out the constructor entirely, but radix tree nodes are one of those places where they make sense (only few cachelines will be touched soon after allocation). This was never actually found in any lockless pagecache testing or by the test harness, but by seeing the odd problem with my scalable vmap rewrite. I have not tickled the test harness into reproducing it yet, but I'll keep working at it. Fortunately, it is not a problem anywhere lockless pagecache is used in mainline kernels (pagecache probe is not a guarantee, and brd does not have concurrent lookups and deletes). Signed-off-by: Nick Piggin Acked-by: Peter Zijlstra Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 120 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 62 insertions(+), 58 deletions(-) (limited to 'lib') diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bd521716ab1..169a2f8dabc 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root) return root->gfp_mask & __GFP_BITS_MASK; } +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __set_bit(offset, node->tags[tag]); +} + +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __clear_bit(offset, node->tags[tag]); +} + +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + return test_bit(offset, node->tags[tag]); +} + +static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear_all(struct radix_tree_root *root) +{ + root->gfp_mask &= __GFP_BITS_MASK; +} + +static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) +{ + return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); +} + +/* + * Returns 1 if any slot in the node has this tag set. + * Otherwise returns 0. + */ +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) +{ + int idx; + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + if (node->tags[tag][idx]) + return 1; + } + return 0; +} /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. @@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); + + /* + * must only free zeroed nodes into the slab. radix_tree_shrink + * can leave us with a non-NULL entry in the first slot, so clear + * that here to make sure. + */ + tag_clear(node, 0, 0); + tag_clear(node, 1, 0); + node->slots[0] = NULL; + node->count = 0; + kmem_cache_free(radix_tree_node_cachep, node); } @@ -165,59 +227,6 @@ out: } EXPORT_SYMBOL(radix_tree_preload); -static inline void tag_set(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - __set_bit(offset, node->tags[tag]); -} - -static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - __clear_bit(offset, node->tags[tag]); -} - -static inline int tag_get(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - return test_bit(offset, node->tags[tag]); -} - -static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) -{ - root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); -} - - -static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) -{ - root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); -} - -static inline void root_tag_clear_all(struct radix_tree_root *root) -{ - root->gfp_mask &= __GFP_BITS_MASK; -} - -static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) -{ - return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); -} - -/* - * Returns 1 if any slot in the node has this tag set. - * Otherwise returns 0. - */ -static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) -{ - int idx; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (node->tags[tag][idx]) - return 1; - } - return 0; -} - /* * Return the maximum key which can be store into a * radix tree with height HEIGHT. @@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) newptr = radix_tree_ptr_to_indirect(newptr); root->rnode = newptr; root->height--; - /* must only free zeroed nodes into the slab */ - tag_clear(to_free, 0, 0); - tag_clear(to_free, 1, 0); - to_free->slots[0] = NULL; - to_free->count = 0; radix_tree_node_free(to_free); } } -- cgit v1.2.3-70-g09d2 From 50db04dd9c74178e68a981a7127c37252ffb3242 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 15 Jun 2008 00:47:36 +0200 Subject: debugobjects: fix lockdep warning Daniel J Blueman reported: | ======================================================= | [ INFO: possible circular locking dependency detected ] | 2.6.26-rc5-201c #1 | ------------------------------------------------------- | nscd/3669 is trying to acquire lock: | (&n->list_lock){.+..}, at: [] deactivate_slab+0x173/0x1e0 | | but task is already holding lock: | (&obj_hash[i].lock){++..}, at: [] | __debug_object_init+0x2f/0x350 | | which lock already depends on the new lock. There are two locks involved here; the first is a SLUB-local lock, and the second is a debugobjects-local lock. They are basically taken in two different orders: 1. SLUB { debugobjects { ... } } 2. debugobjects { SLUB { ... } } This patch changes pattern #2 by trying to fill the memory pool (e.g. the call into SLUB/kmalloc()) outside the debugobjects lock, so now the two patterns look like this: 1. SLUB { debugobjects { ... } } 2. SLUB { } debugobjects { ... } [ daniel.blueman@gmail.com: pool_lock needs to be taken irq safe in fill_pool ] Reported-by: Daniel J Blueman Signed-off-by: Vegard Nossum Signed-off-by: Thomas Gleixner --- lib/debugobjects.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a76a5e122ae..85b18d79be8 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -68,6 +68,7 @@ static int fill_pool(void) { gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; struct debug_obj *new; + unsigned long flags; if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) return obj_pool_free; @@ -81,10 +82,10 @@ static int fill_pool(void) if (!new) return obj_pool_free; - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); obj_pool_free++; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); } return obj_pool_free; } @@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) } /* - * Allocate a new object. If the pool is empty and no refill possible, - * switch off the debugger. + * Allocate a new object. If the pool is empty, switch off the debugger. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) { struct debug_obj *obj = NULL; - int retry = 0; -repeat: spin_lock(&pool_lock); if (obj_pool.first) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); @@ -141,9 +139,6 @@ repeat: } spin_unlock(&pool_lock); - if (fill_pool() && !obj && !retry++) - goto repeat; - return obj; } @@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; + fill_pool(); + db = get_bucket((unsigned long) addr); spin_lock_irqsave(&db->lock, flags); -- cgit v1.2.3-70-g09d2 From aebb6a849cfe7d89bcacaaecc20a480dfc1180e7 Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Mon, 30 Jun 2008 12:42:23 -0700 Subject: textsearch: fix Boyer-Moore text search bug The current logic has a bug which cannot find matching pattern, if the pattern is matched from the first character of target string. for example: pattern=abc, string=abcdefg pattern=a, string=abcdefg Searching algorithm should return 0 for those things. Signed-off-by: Joonwoo Park Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- lib/ts_bm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/ts_bm.c b/lib/ts_bm.c index d90822c378a..4a7fce72898 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) struct ts_bm *bm = ts_config_priv(conf); unsigned int i, text_len, consumed = state->offset; const u8 *text; - int shift = bm->patlen, bs; + int shift = bm->patlen - 1, bs; for (;;) { text_len = conf->get_next_block(consumed, &text, conf, state); -- cgit v1.2.3-70-g09d2 From cde53535991fbb5c34a1566f25955297c1487b8d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 4 Jul 2008 09:59:22 -0700 Subject: Christoph has moved Remove all clameter@sgi.com addresses from the kernel tree since they will become invalid on June 27th. Change my maintainer email address for the slab allocators to cl@linux-foundation.org (which will be the new email address for the future). Signed-off-by: Christoph Lameter Signed-off-by: Christoph Lameter Cc: Pekka Enberg Cc: Stephen Rothwell Cc: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/slabinfo.c | 4 ++-- Documentation/vm/slub.txt | 2 +- MAINTAINERS | 2 +- include/asm-generic/atomic.h | 2 +- include/linux/slab.h | 2 +- include/linux/slub_def.h | 2 +- kernel/workqueue.c | 2 +- lib/radix-tree.c | 2 +- mm/allocpercpu.c | 2 +- mm/migrate.c | 2 +- mm/slub.c | 2 +- mm/sparse-vmemmap.c | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c index e4230ed16ee..df3227605d5 100644 --- a/Documentation/vm/slabinfo.c +++ b/Documentation/vm/slabinfo.c @@ -1,7 +1,7 @@ /* * Slabinfo: Tool to get reports about slabs * - * (C) 2007 sgi, Christoph Lameter + * (C) 2007 sgi, Christoph Lameter * * Compile by: * @@ -99,7 +99,7 @@ void fatal(const char *x, ...) void usage(void) { - printf("slabinfo 5/7/2007. (c) 2007 sgi. clameter@sgi.com\n\n" + printf("slabinfo 5/7/2007. (c) 2007 sgi.\n\n" "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n" "-a|--aliases Show aliases\n" "-A|--activity Most active slabs first\n" diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index 7c13f22a0c9..bb1f5c6e28b 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt @@ -266,4 +266,4 @@ of other objects. slub_debug=FZ,dentry -Christoph Lameter, , May 30, 2007 +Christoph Lameter, May 30, 2007 diff --git a/MAINTAINERS b/MAINTAINERS index 460e699fd28..13b7b19692e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3672,7 +3672,7 @@ S: Maintained SLAB ALLOCATOR P: Christoph Lameter -M: clameter@sgi.com +M: cl@linux-foundation.org P: Pekka Enberg M: penberg@cs.helsinki.fi P: Matt Mackall diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 85fd0aa27a8..4ec0a296bde 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -2,7 +2,7 @@ #define _ASM_GENERIC_ATOMIC_H /* * Copyright (C) 2005 Silicon Graphics, Inc. - * Christoph Lameter + * Christoph Lameter * * Allows to provide arch independent atomic definitions without the need to * edit all arch specific atomic.h files. diff --git a/include/linux/slab.h b/include/linux/slab.h index c2ad3501659..9aa90a6f20e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -1,7 +1,7 @@ /* * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). * - * (C) SGI 2006, Christoph Lameter + * (C) SGI 2006, Christoph Lameter * Cleaned up and restructured to ease the addition of alternative * implementations of SLAB allocators. */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index cef6f8fddd7..d117ea2825a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -4,7 +4,7 @@ /* * SLUB : A Slab allocator without object queues. * - * (C) 2007 SGI, Christoph Lameter + * (C) 2007 SGI, Christoph Lameter */ #include #include diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 29fc39f1029..ce7799540c9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -13,7 +13,7 @@ * Kai Petzke * Theodore Ts'o * - * Made to use alloc_percpu by Christoph Lameter . + * Made to use alloc_percpu by Christoph Lameter. */ #include diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 169a2f8dabc..56ec21a7f73 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig - * Copyright (C) 2005 SGI, Christoph Lameter + * Copyright (C) 2005 SGI, Christoph Lameter * Copyright (C) 2006 Nick Piggin * * This program is free software; you can redistribute it and/or diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index f4026bae6ee..05f2b4009cc 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -1,7 +1,7 @@ /* * linux/mm/allocpercpu.c * - * Separated from slab.c August 11, 2006 Christoph Lameter + * Separated from slab.c August 11, 2006 Christoph Lameter */ #include #include diff --git a/mm/migrate.c b/mm/migrate.c index 112bcaeaa10..55bd355d170 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -9,7 +9,7 @@ * IWAMOTO Toshihiro * Hirokazu Takahashi * Dave Hansen - * Christoph Lameter + * Christoph Lameter */ #include diff --git a/mm/slub.c b/mm/slub.c index 2c9a62d1f42..1a427c0ae83 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5,7 +5,7 @@ * The allocator synchronizes using per slab locks and only * uses a centralized lock to manage a pool of partial slabs. * - * (C) 2007 SGI, Christoph Lameter + * (C) 2007 SGI, Christoph Lameter */ #include diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 99c4f36eb8a..a91b5f8fcaf 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -1,7 +1,7 @@ /* * Virtual Memory Map support * - * (C) 2007 sgi. Christoph Lameter . + * (C) 2007 sgi. Christoph Lameter. * * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, * virt_to_page, page_address() to be implemented as a base offset -- cgit v1.2.3-70-g09d2 From da9eac8990dc614ab4756f2a3d84870b675f1f1e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 4 Jul 2008 09:59:36 -0700 Subject: lib: taint kernel in common report_bug() WARN path. Commit 95b570c9cef3b12356454c7112571b7e406b4b51 ("Taint kernel after WARN_ON(condition)") introduced a TAINT_WARN that was implemented for all architectures using the generic warn_on_slowpath(), which excluded any architecture that set HAVE_ARCH_WARN_ON. As all of the architectures that implement their own WARN_ON() all go through the report_bug() path (specifically handling BUG_TRAP_TYPE_WARN), taint the kernel there as well for consistency. Tested on avr32 and sh. Also relevant for s390, parisc, and powerpc. Signed-off-by: Haavard Skinnemoen Signed-off-by: Paul Mundt Acked-by: Kyle McMartin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bug.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib') diff --git a/lib/bug.c b/lib/bug.c index 530f38f5578..bfeafd60ee9 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -37,6 +37,7 @@ */ #include #include +#include #include #include @@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) (void *)bugaddr); show_regs(regs); + add_taint(TAINT_WARN); return BUG_TRAP_TYPE_WARN; } -- cgit v1.2.3-70-g09d2 From 0f9bfa569d46f2346a53a940b2b9e49a38635732 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 6 Jul 2008 16:06:25 -0700 Subject: vsprintf: split out '%s' handling logic The actual code is the same, just split out into a helper function. This makes it easier to read, and allows for future sharing of the string code. Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 57 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 26 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 6021757a449..926c7e00e2d 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -482,6 +482,35 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int return buf; } +static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) +{ + int len, i; + + if ((unsigned long)s < PAGE_SIZE) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) { + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + } + for (i = 0; i < len; ++i) { + if (buf < end) + *buf = *s; + ++buf; ++s; + } + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + return buf; +} + /** * vsnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into @@ -502,11 +531,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int */ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { - int len; unsigned long long num; - int i, base; + int base; char *str, *end, c; - const char *s; int flags; /* flags to number() */ @@ -622,29 +649,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) continue; case 's': - s = va_arg(args, char *); - if ((unsigned long)s < PAGE_SIZE) - s = ""; - - len = strnlen(s, precision); - - if (!(flags & LEFT)) { - while (len < field_width--) { - if (str < end) - *str = ' '; - ++str; - } - } - for (i = 0; i < len; ++i) { - if (str < end) - *str = *s; - ++str; ++s; - } - while (len < field_width--) { - if (str < end) - *str = ' '; - ++str; - } + str = string(str, end, va_arg(args, char *), field_width, precision, flags); continue; case 'p': -- cgit v1.2.3-70-g09d2 From 78a8bf69b32980879975f7e31d30386c50bfe851 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 6 Jul 2008 16:16:15 -0700 Subject: vsprintf: split out '%p' handling logic The actual code is the same, just split out into a helper function. This makes it easier to read, and allows for simple future extension of %p handling. Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 926c7e00e2d..f569feb7662 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -511,6 +511,16 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio return buf; } +static char *pointer(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + flags |= SMALL; + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); +} + /** * vsnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into @@ -653,17 +663,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) continue; case 'p': - flags |= SMALL; - if (field_width == -1) { - field_width = 2*sizeof(void *); - flags |= ZEROPAD; - } - str = number(str, end, - (unsigned long) va_arg(args, void *), - 16, field_width, precision, flags); + str = pointer(str, end, va_arg(args, void *), field_width, precision, flags); continue; - case 'n': /* FIXME: * What does C99 say about the overflow case here? */ -- cgit v1.2.3-70-g09d2 From 4d8a743cdd2690c0bc8d1b8cbd02cffb1ead849f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 6 Jul 2008 16:24:57 -0700 Subject: vsprintf: add infrastructure support for extended '%p' specifiers This expands the kernel '%p' handling with an arbitrary alphanumberic specifier extension string immediately following the '%p'. Right now it's just being ignored, but the next commit will start adding some specific pointer type extensions. NOTE! The reason the extension is appended to the '%p' is to allow minimal gcc type checking: gcc will still see the '%p' and will check that the argument passed in is indeed a pointer, and yet will not complain about the extended information that gcc doesn't understand about (on the other hand, it also won't actually check that the pointer type and the extension are compatible). Alphanumeric characters were chosen because there is no sane existing use for a string format with a hex pointer representation immediately followed by alphanumerics (which is what such a format string would have traditionally resulted in). Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index f569feb7662..5d6f0718b6d 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -511,7 +511,14 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio return buf; } -static char *pointer(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +/* + * Show a '%p' thing. A kernel extension is that the '%p' is followed + * by an extra set of alphanumeric characters that are extended format + * specifiers. + * + * Right now don't actually handle any such, but we will.. + */ +static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) { flags |= SMALL; if (field_width == -1) { @@ -663,7 +670,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) continue; case 'p': - str = pointer(str, end, va_arg(args, void *), field_width, precision, flags); + str = pointer(fmt+1, str, end, + va_arg(args, void *), + field_width, precision, flags); + /* Skip all alphanumeric pointer suffixes */ + while (isalnum(fmt[1])) + fmt++; continue; case 'n': -- cgit v1.2.3-70-g09d2 From 0fe1ef24f7bd0020f29ffe287dfdb9ead33ca0b2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 6 Jul 2008 16:43:12 -0700 Subject: vsprintf: add support for '%pS' and '%pF' pointer formats They print out a pointer in symbolic format, if possible (ie using symbolic KALLSYMS information). The '%pS' format is for regular direct pointers (which can point to data or code and that you find on the stack during backtraces etc), while '%pF' is for C function pointer types. On most architectures, the two mean exactly the same thing, but some architectures use an indirect pointer for C function pointers, where the function pointer points to a function descriptor (which in turn contains the actual pointer to the code). The '%pF' code automatically does the appropriate function descriptor dereference on such architectures. Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 5d6f0718b6d..1dc2d1d18fa 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include /* for PAGE_SIZE */ #include @@ -511,15 +513,52 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio return buf; } +static inline void *dereference_function_descriptor(void *ptr) +{ +#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) + void *p; + if (!probe_kernel_address(ptr, p)) + ptr = p; +#endif + return ptr; +} + +static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + unsigned long value = (unsigned long) ptr; +#ifdef CONFIG_KALLSYMS + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, value); + return string(buf, end, sym, field_width, precision, flags); +#else + field_width = 2*sizeof(void *); + flags |= SPECIAL | SMALL | ZEROPAD; + return number(buf, end, value, 16, field_width, precision, flags); +#endif +} + /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format * specifiers. * - * Right now don't actually handle any such, but we will.. + * Right now we just handle 'F' (for symbolic Function descriptor pointers) + * and 'S' (for Symbolic direct pointers), but this can easily be + * extended in the future (network address types etc). + * + * The difference between 'S' and 'F' is that on ia64 and ppc64 function + * pointers are really function descriptors, which contain a pointer the + * real address. */ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) { + switch (*fmt) { + case 'F': + ptr = dereference_function_descriptor(ptr); + /* Fallthrough */ + case 'S': + return symbol_string(buf, end, ptr, field_width, precision, flags); + } flags |= SMALL; if (field_width == -1) { field_width = 2*sizeof(void *); -- cgit v1.2.3-70-g09d2