diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 31 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/bitmap.c | 158 | ||||
-rw-r--r-- | lib/debugobjects.c | 890 | ||||
-rw-r--r-- | lib/devres.c | 4 | ||||
-rw-r--r-- | lib/div64.c | 35 | ||||
-rw-r--r-- | lib/find_next_bit.c | 69 | ||||
-rw-r--r-- | lib/idr.c | 12 | ||||
-rw-r--r-- | lib/inflate.c | 3 | ||||
-rw-r--r-- | lib/iomap.c | 2 | ||||
-rw-r--r-- | lib/klist.c | 235 | ||||
-rw-r--r-- | lib/kobject.c | 44 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 10 | ||||
-rw-r--r-- | lib/lmb.c | 99 | ||||
-rw-r--r-- | lib/percpu_counter.c | 1 | ||||
-rw-r--r-- | lib/proportions.c | 38 | ||||
-rw-r--r-- | lib/radix-tree.c | 9 | ||||
-rw-r--r-- | lib/ratelimit.c | 51 | ||||
-rw-r--r-- | lib/string.c | 27 | ||||
-rw-r--r-- | lib/swiotlb.c | 149 |
21 files changed, 1614 insertions, 263 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2d53dc092e8..8cc8e8722a3 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -7,6 +7,12 @@ menu "Library routines" config BITREVERSE tristate +config GENERIC_FIND_FIRST_BIT + def_bool n + +config GENERIC_FIND_NEXT_BIT + def_bool n + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 754cc0027f2..d2099f41aa1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -194,6 +194,37 @@ config TIMER_STATS (it defaults to deactivated on bootup and will only be activated if some application like powertop activates it explicitly). +config DEBUG_OBJECTS + bool "Debug object operations" + depends on DEBUG_KERNEL + help + If you say Y here, additional code will be inserted into the + kernel to track the life time of various objects and validate + the operations on those objects. + +config DEBUG_OBJECTS_SELFTEST + bool "Debug objects selftest" + depends on DEBUG_OBJECTS + help + This enables the selftest of the object debug code. + +config DEBUG_OBJECTS_FREE + bool "Debug objects in freed memory" + depends on DEBUG_OBJECTS + help + This enables checks whether a k/v free operation frees an area + which contains an object which has not been deactivated + properly. This can make kmalloc/kfree-intensive workloads + much slower. + +config DEBUG_OBJECTS_TIMERS + bool "Debug timer objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + timer routines to track the life time of timer objects and + validate the timer operations. + config DEBUG_SLAB bool "Debug slab memory allocations" depends on DEBUG_KERNEL && SLAB diff --git a/lib/Makefile b/lib/Makefile index bf8000fc7d4..74b0cfb1fcc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o + proportions.o prio_heap.o ratelimit.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -29,12 +29,14 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_PLIST) += plist.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o +obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o ifneq ($(CONFIG_HAVE_DEC_LOCK),y) lib-y += dec_and_lock.o diff --git a/lib/bitmap.c b/lib/bitmap.c index a6939e18d7b..c4cb48f77f0 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -714,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, } EXPORT_SYMBOL(bitmap_bitremap); +/** + * bitmap_onto - translate one bitmap relative to another + * @dst: resulting translated bitmap + * @orig: original untranslated bitmap + * @relmap: bitmap relative to which translated + * @bits: number of bits in each of these bitmaps + * + * Set the n-th bit of @dst iff there exists some m such that the + * n-th bit of @relmap is set, the m-th bit of @orig is set, and + * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. + * (If you understood the previous sentence the first time your + * read it, you're overqualified for your current job.) + * + * In other words, @orig is mapped onto (surjectively) @dst, + * using the the map { <n, m> | the n-th bit of @relmap is the + * m-th set bit of @relmap }. + * + * Any set bits in @orig above bit number W, where W is the + * weight of (number of set bits in) @relmap are mapped nowhere. + * In particular, if for all bits m set in @orig, m >= W, then + * @dst will end up empty. In situations where the possibility + * of such an empty result is not desired, one way to avoid it is + * to use the bitmap_fold() operator, below, to first fold the + * @orig bitmap over itself so that all its set bits x are in the + * range 0 <= x < W. The bitmap_fold() operator does this by + * setting the bit (m % W) in @dst, for each bit (m) set in @orig. + * + * Example [1] for bitmap_onto(): + * Let's say @relmap has bits 30-39 set, and @orig has bits + * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, + * @dst will have bits 31, 33, 35, 37 and 39 set. + * + * When bit 0 is set in @orig, it means turn on the bit in + * @dst corresponding to whatever is the first bit (if any) + * that is turned on in @relmap. Since bit 0 was off in the + * above example, we leave off that bit (bit 30) in @dst. + * + * When bit 1 is set in @orig (as in the above example), it + * means turn on the bit in @dst corresponding to whatever + * is the second bit that is turned on in @relmap. The second + * bit in @relmap that was turned on in the above example was + * bit 31, so we turned on bit 31 in @dst. + * + * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, + * because they were the 4th, 6th, 8th and 10th set bits + * set in @relmap, and the 4th, 6th, 8th and 10th bits of + * @orig (i.e. bits 3, 5, 7 and 9) were also set. + * + * When bit 11 is set in @orig, it means turn on the bit in + * @dst corresponding to whatever is the twelth bit that is + * turned on in @relmap. In the above example, there were + * only ten bits turned on in @relmap (30..39), so that bit + * 11 was set in @orig had no affect on @dst. + * + * Example [2] for bitmap_fold() + bitmap_onto(): + * Let's say @relmap has these ten bits set: + * 40 41 42 43 45 48 53 61 74 95 + * (for the curious, that's 40 plus the first ten terms of the + * Fibonacci sequence.) + * + * Further lets say we use the following code, invoking + * bitmap_fold() then bitmap_onto, as suggested above to + * avoid the possitility of an empty @dst result: + * + * unsigned long *tmp; // a temporary bitmap's bits + * + * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); + * bitmap_onto(dst, tmp, relmap, bits); + * + * Then this table shows what various values of @dst would be, for + * various @orig's. I list the zero-based positions of each set bit. + * The tmp column shows the intermediate result, as computed by + * using bitmap_fold() to fold the @orig bitmap modulo ten + * (the weight of @relmap). + * + * @orig tmp @dst + * 0 0 40 + * 1 1 41 + * 9 9 95 + * 10 0 40 (*) + * 1 3 5 7 1 3 5 7 41 43 48 61 + * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 + * 0 9 18 27 0 9 8 7 40 61 74 95 + * 0 10 20 30 0 40 + * 0 11 22 33 0 1 2 3 40 41 42 43 + * 0 12 24 36 0 2 4 6 40 42 45 53 + * 78 102 211 1 2 8 41 42 74 (*) + * + * (*) For these marked lines, if we hadn't first done bitmap_fold() + * into tmp, then the @dst result would have been empty. + * + * If either of @orig or @relmap is empty (no set bits), then @dst + * will be returned empty. + * + * If (as explained above) the only set bits in @orig are in positions + * m where m >= W, (where W is the weight of @relmap) then @dst will + * once again be returned empty. + * + * All bits in @dst not set by the above rule are cleared. + */ +void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, int bits) +{ + int n, m; /* same meaning as in above comment */ + + if (dst == orig) /* following doesn't handle inplace mappings */ + return; + bitmap_zero(dst, bits); + + /* + * The following code is a more efficient, but less + * obvious, equivalent to the loop: + * for (m = 0; m < bitmap_weight(relmap, bits); m++) { + * n = bitmap_ord_to_pos(orig, m, bits); + * if (test_bit(m, orig)) + * set_bit(n, dst); + * } + */ + + m = 0; + for (n = find_first_bit(relmap, bits); + n < bits; + n = find_next_bit(relmap, bits, n + 1)) { + /* m == bitmap_pos_to_ord(relmap, n, bits) */ + if (test_bit(m, orig)) + set_bit(n, dst); + m++; + } +} +EXPORT_SYMBOL(bitmap_onto); + +/** + * bitmap_fold - fold larger bitmap into smaller, modulo specified size + * @dst: resulting smaller bitmap + * @orig: original larger bitmap + * @sz: specified size + * @bits: number of bits in each of these bitmaps + * + * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. + * Clear all other bits in @dst. See further the comment and + * Example [2] for bitmap_onto() for why and how to use this. + */ +void bitmap_fold(unsigned long *dst, const unsigned long *orig, + int sz, int bits) +{ + int oldbit; + + if (dst == orig) /* following doesn't handle inplace mappings */ + return; + bitmap_zero(dst, bits); + + for (oldbit = find_first_bit(orig, bits); + oldbit < bits; + oldbit = find_next_bit(orig, bits, oldbit + 1)) + set_bit(oldbit % sz, dst); +} +EXPORT_SYMBOL(bitmap_fold); + /* * Common code for bitmap_*_region() routines. * bitmap: array of unsigned longs corresponding to the bitmap diff --git a/lib/debugobjects.c b/lib/debugobjects.c new file mode 100644 index 00000000000..a76a5e122ae --- /dev/null +++ b/lib/debugobjects.c @@ -0,0 +1,890 @@ +/* + * Generic infrastructure for lifetime debugging of objects. + * + * Started by Thomas Gleixner + * + * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> + * + * For licencing details see kernel-base/COPYING + */ +#include <linux/debugobjects.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/hash.h> + +#define ODEBUG_HASH_BITS 14 +#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) + +#define ODEBUG_POOL_SIZE 512 +#define ODEBUG_POOL_MIN_LEVEL 256 + +#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT +#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) +#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) + +struct debug_bucket { + struct hlist_head list; + spinlock_t lock; +}; + +static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; + +static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; + +static DEFINE_SPINLOCK(pool_lock); + +static HLIST_HEAD(obj_pool); + +static int obj_pool_min_free = ODEBUG_POOL_SIZE; +static int obj_pool_free = ODEBUG_POOL_SIZE; +static int obj_pool_used; +static int obj_pool_max_used; +static struct kmem_cache *obj_cache; + +static int debug_objects_maxchain __read_mostly; +static int debug_objects_fixups __read_mostly; +static int debug_objects_warnings __read_mostly; +static int debug_objects_enabled __read_mostly; +static struct debug_obj_descr *descr_test __read_mostly; + +static int __init enable_object_debug(char *str) +{ + debug_objects_enabled = 1; + return 0; +} +early_param("debug_objects", enable_object_debug); + +static const char *obj_states[ODEBUG_STATE_MAX] = { + [ODEBUG_STATE_NONE] = "none", + [ODEBUG_STATE_INIT] = "initialized", + [ODEBUG_STATE_INACTIVE] = "inactive", + [ODEBUG_STATE_ACTIVE] = "active", + [ODEBUG_STATE_DESTROYED] = "destroyed", + [ODEBUG_STATE_NOTAVAILABLE] = "not available", +}; + +static int fill_pool(void) +{ + gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; + struct debug_obj *new; + + if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) + return obj_pool_free; + + if (unlikely(!obj_cache)) + return obj_pool_free; + + while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { + + new = kmem_cache_zalloc(obj_cache, gfp); + if (!new) + return obj_pool_free; + + spin_lock(&pool_lock); + hlist_add_head(&new->node, &obj_pool); + obj_pool_free++; + spin_unlock(&pool_lock); + } + return obj_pool_free; +} + +/* + * Lookup an object in the hash bucket. + */ +static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) +{ + struct hlist_node *node; + struct debug_obj *obj; + int cnt = 0; + + hlist_for_each_entry(obj, node, &b->list, node) { + cnt++; + if (obj->object == addr) + return obj; + } + if (cnt > debug_objects_maxchain) + debug_objects_maxchain = cnt; + + return NULL; +} + +/* + * Allocate a new object. If the pool is empty and no refill possible, + * switch off the debugger. + */ +static struct debug_obj * +alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) +{ + struct debug_obj *obj = NULL; + int retry = 0; + +repeat: + spin_lock(&pool_lock); + if (obj_pool.first) { + obj = hlist_entry(obj_pool.first, typeof(*obj), node); + + obj->object = addr; + obj->descr = descr; + obj->state = ODEBUG_STATE_NONE; + hlist_del(&obj->node); + + hlist_add_head(&obj->node, &b->list); + + obj_pool_used++; + if (obj_pool_used > obj_pool_max_used) + obj_pool_max_used = obj_pool_used; + + obj_pool_free--; + if (obj_pool_free < obj_pool_min_free) + obj_pool_min_free = obj_pool_free; + } + spin_unlock(&pool_lock); + + if (fill_pool() && !obj && !retry++) + goto repeat; + + return obj; +} + +/* + * Put the object back into the pool or give it back to kmem_cache: + */ +static void free_object(struct debug_obj *obj) +{ + unsigned long idx = (unsigned long)(obj - obj_static_pool); + + if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { + spin_lock(&pool_lock); + hlist_add_head(&obj->node, &obj_pool); + obj_pool_free++; + obj_pool_used--; + spin_unlock(&pool_lock); + } else { + spin_lock(&pool_lock); + obj_pool_used--; + spin_unlock(&pool_lock); + kmem_cache_free(obj_cache, obj); + } +} + +/* + * We run out of memory. That means we probably have tons of objects + * allocated. + */ +static void debug_objects_oom(void) +{ + struct debug_bucket *db = obj_hash; + struct hlist_node *node, *tmp; + struct debug_obj *obj; + unsigned long flags; + int i; + + printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); + + for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { + spin_lock_irqsave(&db->lock, flags); + hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_del(&obj->node); + free_object(obj); + } + spin_unlock_irqrestore(&db->lock, flags); + } +} + +/* + * We use the pfn of the address for the hash. That way we can check + * for freed objects simply by checking the affected bucket. + */ +static struct debug_bucket *get_bucket(unsigned long addr) +{ + unsigned long hash; + + hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); + return &obj_hash[hash]; +} + +static void debug_print_object(struct debug_obj *obj, char *msg) +{ + static int limit; + + if (limit < 5 && obj->descr != descr_test) { + limit++; + printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, + obj_states[obj->state], obj->descr->name); + WARN_ON(1); + } + debug_objects_warnings++; +} + +/* + * Try to repair the damage, so we have a better chance to get useful + * debug output. + */ +static void +debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), + void * addr, enum debug_obj_state state) +{ + if (fixup) + debug_objects_fixups += fixup(addr, state); +} + +static void debug_object_is_on_stack(void *addr, int onstack) +{ + void *stack = current->stack; + int is_on_stack; + static int limit; + + if (limit > 4) + return; + + is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); + + if (is_on_stack == onstack) + return; + + limit++; + if (is_on_stack) + printk(KERN_WARNING + "ODEBUG: object is on stack, but not annotated\n"); + else + printk(KERN_WARNING + "ODEBUG: object is not on stack, but annotated\n"); + WARN_ON(1); +} + +static void +__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) { + obj = alloc_object(addr, db, descr); + if (!obj) { + debug_objects_enabled = 0; + spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return; + } + debug_object_is_on_stack(addr, onstack); + } + + switch (obj->state) { + case ODEBUG_STATE_NONE: + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_INIT; + break; + + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "init"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_init, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "init"); + break; + default: + break; + } + + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_init - debug checks when an object is initialized + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_init(void *addr, struct debug_obj_descr *descr) +{ + if (!debug_objects_enabled) + return; + + __debug_object_init(addr, descr, 0); +} + +/** + * debug_object_init_on_stack - debug checks when an object on stack is + * initialized + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) +{ + if (!debug_objects_enabled) + return; + + __debug_object_init(addr, descr, 1); +} + +/** + * debug_object_activate - debug checks when an object is activated + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_activate(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_ACTIVE; + break; + + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "activate"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_activate, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "activate"); + break; + default: + break; + } + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + spin_unlock_irqrestore(&db->lock, flags); + /* + * This happens when a static object is activated. We + * let the type specific code decide whether this is + * true or not. + */ + debug_object_fixup(descr->fixup_activate, addr, + ODEBUG_STATE_NOTAVAILABLE); +} + +/** + * debug_object_deactivate - debug checks when an object is deactivated + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + case ODEBUG_STATE_ACTIVE: + obj->state = ODEBUG_STATE_INACTIVE; + break; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "deactivate"); + break; + default: + break; + } + } else { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + debug_print_object(&o, "deactivate"); + } + + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_destroy - debug checks when an object is destroyed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_destroy(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) + goto out_unlock; + + switch (obj->state) { + case ODEBUG_STATE_NONE: + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_DESTROYED; + break; + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "destroy"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_destroy, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "destroy"); + break; + default: + break; + } +out_unlock: + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_free - debug checks when an object is freed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_free(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) + goto out_unlock; + + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "free"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_free, addr, state); + return; + default: + hlist_del(&obj->node); + free_object(obj); + break; + } +out_unlock: + spin_unlock_irqrestore(&db->lock, flags); +} + +#ifdef CONFIG_DEBUG_OBJECTS_FREE +static void __debug_check_no_obj_freed(const void *address, unsigned long size) +{ + unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; + struct hlist_node *node, *tmp; + struct debug_obj_descr *descr; + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + int cnt; + + saddr = (unsigned long) address; + eaddr = saddr + size; + paddr = saddr & ODEBUG_CHUNK_MASK; + chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); + chunks >>= ODEBUG_CHUNK_SHIFT; + + for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { + db = get_bucket(paddr); + +repeat: + cnt = 0; + spin_lock_irqsave(&db->lock, flags); + hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + cnt++; + oaddr = (unsigned long) obj->object; + if (oaddr < saddr || oaddr >= eaddr) + continue; + + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "free"); + descr = obj->descr; + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_free, + (void *) oaddr, state); + goto repeat; + default: + hlist_del(&obj->node); + free_object(obj); + break; + } + } + spin_unlock_irqrestore(&db->lock, flags); + if (cnt > debug_objects_maxchain) + debug_objects_maxchain = cnt; + } +} + +void debug_check_no_obj_freed(const void *address, unsigned long size) +{ + if (debug_objects_enabled) + __debug_check_no_obj_freed(address, size); +} +#endif + +#ifdef CONFIG_DEBUG_FS + +static int debug_stats_show(struct seq_file *m, void *v) +{ + seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); + seq_printf(m, "warnings :%d\n", debug_objects_warnings); + seq_printf(m, "fixups :%d\n", debug_objects_fixups); + seq_printf(m, "pool_free :%d\n", obj_pool_free); + seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); + seq_printf(m, "pool_used :%d\n", obj_pool_used); + seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); + return 0; +} + +static int debug_stats_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, debug_stats_show, NULL); +} + +static const struct file_operations debug_stats_fops = { + .open = debug_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init debug_objects_init_debugfs(void) +{ + struct dentry *dbgdir, *dbgstats; + + if (!debug_objects_enabled) + return 0; + + dbgdir = debugfs_create_dir("debug_objects", NULL); + if (!dbgdir) + return -ENOMEM; + + dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, + &debug_stats_fops); + if (!dbgstats) + goto err; + + return 0; + +err: + debugfs_remove(dbgdir); + + return -ENOMEM; +} +__initcall(debug_objects_init_debugfs); + +#else +static inline void debug_objects_init_debugfs(void) { } +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST + +/* Random data structure for the self test */ +struct self_test { + unsigned long dummy1[6]; + int static_init; + unsigned long dummy2[3]; +}; + +static __initdata struct debug_obj_descr descr_type_test; + +/* + * fixup_init is called when: + * - an active object is initialized + */ +static int __init fixup_init(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_init(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +/* + * fixup_activate is called when: + * - an active object is activated + * - an unknown object is activated (might be a statically initialized object) + */ +static int __init fixup_activate(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_NOTAVAILABLE: + if (obj->static_init == 1) { + debug_object_init(obj, &descr_type_test); + debug_object_activate(obj, &descr_type_test); + /* + * Real code should return 0 here ! This is + * not a fixup of some bad behaviour. We + * merily call the debug_init function to keep + * track of the object. + */ + return 1; + } else { + /* Real code needs to emit a warning here */ + } + return 0; + + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_activate(obj, &descr_type_test); + return 1; + + default: + return 0; + } +} + +/* + * fixup_destroy is called when: + * - an active object is destroyed + */ +static int __init fixup_destroy(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_destroy(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +/* + * fixup_free is called when: + * - an active object is freed + */ +static int __init fixup_free(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_free(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +static int +check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + int res = -EINVAL; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj && state != ODEBUG_STATE_NONE) { + printk(KERN_ERR "ODEBUG: selftest object not found\n"); + WARN_ON(1); + goto out; + } + if (obj && obj->state != state) { + printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", + obj->state, state); + WARN_ON(1); + goto out; + } + if (fixups != debug_objects_fixups) { + printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", + fixups, debug_objects_fixups); + WARN_ON(1); + goto out; + } + if (warnings != debug_objects_warnings) { + printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", + warnings, debug_objects_warnings); + WARN_ON(1); + goto out; + } + res = 0; +out: + spin_unlock_irqrestore(&db->lock, flags); + if (res) + debug_objects_enabled = 0; + return res; +} + +static __initdata struct debug_obj_descr descr_type_test = { + .name = "selftest", + .fixup_init = fixup_init, + .fixup_activate = fixup_activate, + .fixup_destroy = fixup_destroy, + .fixup_free = fixup_free, +}; + +static __initdata struct self_test obj = { .static_init = 0 }; + +static void __init debug_objects_selftest(void) +{ + int fixups, oldfixups, warnings, oldwarnings; + unsigned long flags; + + local_irq_save(flags); + + fixups = oldfixups = debug_objects_fixups; + warnings = oldwarnings = debug_objects_warnings; + descr_test = &descr_type_test; + + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) + goto out; + debug_object_deactivate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) + goto out; + debug_object_destroy(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) + goto out; + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_deactivate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_free(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) + goto out; + + obj.static_init = 1; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) + goto out; + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) + goto out; + debug_object_free(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) + goto out; + +#ifdef CONFIG_DEBUG_OBJECTS_FREE + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) + goto out; + __debug_check_no_obj_freed(&obj, sizeof(obj)); + if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) + goto out; +#endif + printk(KERN_INFO "ODEBUG: selftest passed\n"); + +out: + debug_objects_fixups = oldfixups; + debug_objects_warnings = oldwarnings; + descr_test = NULL; + + local_irq_restore(flags); +} +#else +static inline void debug_objects_selftest(void) { } +#endif + +/* + * Called during early boot to initialize the hash buckets and link + * the static object pool objects into the poll list. After this call + * the object tracker is fully operational. + */ +void __init debug_objects_early_init(void) +{ + int i; + + for (i = 0; i < ODEBUG_HASH_SIZE; i++) + spin_lock_init(&obj_hash[i].lock); + + for (i = 0; i < ODEBUG_POOL_SIZE; i++) + hlist_add_head(&obj_static_pool[i].node, &obj_pool); +} + +/* + * Called after the kmem_caches are functional to setup a dedicated + * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag + * prevents that the debug code is called on kmem_cache_free() for the + * debug tracker objects to avoid recursive calls. + */ +void __init debug_objects_mem_init(void) +{ + if (!debug_objects_enabled) + return; + + obj_cache = kmem_cache_create("debug_objects_cache", + sizeof (struct debug_obj), 0, + SLAB_DEBUG_OBJECTS, NULL); + + if (!obj_cache) + debug_objects_enabled = 0; + else + debug_objects_selftest(); +} diff --git a/lib/devres.c b/lib/devres.c index edc27a5d1b7..26c87c49d77 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data) * * Managed ioremap(). Map is automatically unmapped on driver detach. */ -void __iomem *devm_ioremap(struct device *dev, unsigned long offset, +void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; @@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap); * Managed ioremap_nocache(). Map is automatically unmapped on driver * detach. */ -void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, +void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; diff --git a/lib/div64.c b/lib/div64.c index b71cf93c529..bb5bd0c0f03 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -16,9 +16,8 @@ * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. */ -#include <linux/types.h> #include <linux/module.h> -#include <asm/div64.h> +#include <linux/math64.h> /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 @@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) EXPORT_SYMBOL(__div64_32); +#ifndef div_s64_rem +s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) +{ + u64 quotient; + + if (dividend < 0) { + quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); + *remainder = -*remainder; + if (divisor > 0) + quotient = -quotient; + } else { + quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); + if (divisor < 0) + quotient = -quotient; + } + return quotient; +} +EXPORT_SYMBOL(div_s64_rem); +#endif + /* 64bit divisor, dividend and result. dynamic precision */ -uint64_t div64_64(uint64_t dividend, uint64_t divisor) +#ifndef div64_u64 +u64 div64_u64(u64 dividend, u64 divisor) { - uint32_t high, d; + u32 high, d; high = divisor >> 32; if (high) { @@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor) } else d = divisor; - do_div(dividend, d); - - return dividend; + return div_u64(dividend, d); } -EXPORT_SYMBOL(div64_64); +EXPORT_SYMBOL(div64_u64); +#endif #endif /* BITS_PER_LONG == 32 */ diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 78ccd73a884..24c59ded47a 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -16,14 +16,12 @@ #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT +/* + * Find the next set bit in a memory region. */ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) + unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -60,7 +58,6 @@ found_first: found_middle: return result + __ffs(tmp); } - EXPORT_SYMBOL(find_next_bit); /* @@ -68,7 +65,7 @@ EXPORT_SYMBOL(find_next_bit); * Linus' asm-alpha/bitops.h. */ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) + unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -105,8 +102,62 @@ found_first: found_middle: return result + ffz(tmp); } - EXPORT_SYMBOL(find_next_zero_bit); +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT +/* + * Find the first set bit in a memory region. + */ +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(find_first_bit); + +/* + * Find the first cleared bit in a memory region. + */ +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) | (~0UL << size); + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found: + return result + ffz(tmp); +} +EXPORT_SYMBOL(find_first_zero_bit); +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef __BIG_ENDIAN diff --git a/lib/idr.c b/lib/idr.c index afbb0b1023d..7a02e173f02 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id) while (idp->id_free_cnt >= IDR_FREE_MAX) { p = alloc_layer(idp); kmem_cache_free(idr_layer_cache, p); - return; } + return; } EXPORT_SYMBOL(idr_remove); @@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) memset(idr_layer, 0, sizeof(struct idr_layer)); } -static int init_id_cache(void) +void __init idr_init_cache(void) { - if (!idr_layer_cache) - idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, 0, idr_cache_ctor); - return 0; + idr_layer_cache = kmem_cache_create("idr_layer_cache", + sizeof(struct idr_layer), 0, SLAB_PANIC, + idr_cache_ctor); } /** @@ -602,7 +601,6 @@ static int init_id_cache(void) */ void idr_init(struct idr *idp) { - init_id_cache(); memset(idp, 0, sizeof(struct idr)); spin_lock_init(&idp->lock); } diff --git a/lib/inflate.c b/lib/inflate.c index 845f91d3ac1..9762294be06 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -811,6 +811,9 @@ DEBG("<dyn"); ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ #endif + if (ll == NULL) + return 1; + /* make local bit buffer */ b = bb; k = bk; diff --git a/lib/iomap.c b/lib/iomap.c index dd6ca48fe6b..37a3ea4cac9 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len || !start) diff --git a/lib/klist.c b/lib/klist.c index 120bd175aa7..cca37f96faa 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -1,38 +1,37 @@ /* - * klist.c - Routines for manipulating klists. + * klist.c - Routines for manipulating klists. * + * Copyright (C) 2005 Patrick Mochel * - * This klist interface provides a couple of structures that wrap around - * struct list_head to provide explicit list "head" (struct klist) and - * list "node" (struct klist_node) objects. For struct klist, a spinlock - * is included that protects access to the actual list itself. struct - * klist_node provides a pointer to the klist that owns it and a kref - * reference count that indicates the number of current users of that node - * in the list. + * This file is released under the GPL v2. * - * The entire point is to provide an interface for iterating over a list - * that is safe and allows for modification of the list during the - * iteration (e.g. insertion and removal), including modification of the - * current node on the list. + * This klist interface provides a couple of structures that wrap around + * struct list_head to provide explicit list "head" (struct klist) and list + * "node" (struct klist_node) objects. For struct klist, a spinlock is + * included that protects access to the actual list itself. struct + * klist_node provides a pointer to the klist that owns it and a kref + * reference count that indicates the number of current users of that node + * in the list. * - * It works using a 3rd object type - struct klist_iter - that is declared - * and initialized before an iteration. klist_next() is used to acquire the - * next element in the list. It returns NULL if there are no more items. - * Internally, that routine takes the klist's lock, decrements the reference - * count of the previous klist_node and increments the count of the next - * klist_node. It then drops the lock and returns. + * The entire point is to provide an interface for iterating over a list + * that is safe and allows for modification of the list during the + * iteration (e.g. insertion and removal), including modification of the + * current node on the list. * - * There are primitives for adding and removing nodes to/from a klist. - * When deleting, klist_del() will simply decrement the reference count. - * Only when the count goes to 0 is the node removed from the list. - * klist_remove() will try to delete the node from the list and block - * until it is actually removed. This is useful for objects (like devices) - * that have been removed from the system and must be freed (but must wait - * until all accessors have finished). + * It works using a 3rd object type - struct klist_iter - that is declared + * and initialized before an iteration. klist_next() is used to acquire the + * next element in the list. It returns NULL if there are no more items. + * Internally, that routine takes the klist's lock, decrements the + * reference count of the previous klist_node and increments the count of + * the next klist_node. It then drops the lock and returns. * - * Copyright (C) 2005 Patrick Mochel - * - * This file is released under the GPL v2. + * There are primitives for adding and removing nodes to/from a klist. + * When deleting, klist_del() will simply decrement the reference count. + * Only when the count goes to 0 is the node removed from the list. + * klist_remove() will try to delete the node from the list and block until + * it is actually removed. This is useful for objects (like devices) that + * have been removed from the system and must be freed (but must wait until + * all accessors have finished). */ #include <linux/klist.h> @@ -40,10 +39,10 @@ /** - * klist_init - Initialize a klist structure. - * @k: The klist we're initializing. - * @get: The get function for the embedding object (NULL if none) - * @put: The put function for the embedding object (NULL if none) + * klist_init - Initialize a klist structure. + * @k: The klist we're initializing. + * @get: The get function for the embedding object (NULL if none) + * @put: The put function for the embedding object (NULL if none) * * Initialises the klist structure. If the klist_node structures are * going to be embedded in refcounted objects (necessary for safe @@ -51,8 +50,7 @@ * functions that take and release references on the embedding * objects. */ - -void klist_init(struct klist * k, void (*get)(struct klist_node *), +void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); @@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *), k->get = get; k->put = put; } - EXPORT_SYMBOL_GPL(klist_init); - -static void add_head(struct klist * k, struct klist_node * n) +static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } -static void add_tail(struct klist * k, struct klist_node * n) +static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } - -static void klist_node_init(struct klist * k, struct klist_node * n) +static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); init_completion(&n->n_removed); @@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n) k->get(n); } - /** - * klist_add_head - Initialize a klist_node and add it to front. - * @n: node we're adding. - * @k: klist it's going on. + * klist_add_head - Initialize a klist_node and add it to front. + * @n: node we're adding. + * @k: klist it's going on. */ - -void klist_add_head(struct klist_node * n, struct klist * k) +void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } - EXPORT_SYMBOL_GPL(klist_add_head); - /** - * klist_add_tail - Initialize a klist_node and add it to back. - * @n: node we're adding. - * @k: klist it's going on. + * klist_add_tail - Initialize a klist_node and add it to back. + * @n: node we're adding. + * @k: klist it's going on. */ - -void klist_add_tail(struct klist_node * n, struct klist * k) +void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } - EXPORT_SYMBOL_GPL(klist_add_tail); +/** + * klist_add_after - Init a klist_node and add it after an existing node + * @n: node we're adding. + * @pos: node to put @n after + */ +void klist_add_after(struct klist_node *n, struct klist_node *pos) +{ + struct klist *k = pos->n_klist; + + klist_node_init(k, n); + spin_lock(&k->k_lock); + list_add(&n->n_node, &pos->n_node); + spin_unlock(&k->k_lock); +} +EXPORT_SYMBOL_GPL(klist_add_after); + +/** + * klist_add_before - Init a klist_node and add it before an existing node + * @n: node we're adding. + * @pos: node to put @n after + */ +void klist_add_before(struct klist_node *n, struct klist_node *pos) +{ + struct klist *k = pos->n_klist; + + klist_node_init(k, n); + spin_lock(&k->k_lock); + list_add_tail(&n->n_node, &pos->n_node); + spin_unlock(&k->k_lock); +} +EXPORT_SYMBOL_GPL(klist_add_before); -static void klist_release(struct kref * kref) +static void klist_release(struct kref *kref) { - struct klist_node * n = container_of(kref, struct klist_node, n_ref); + struct klist_node *n = container_of(kref, struct klist_node, n_ref); list_del(&n->n_node); complete(&n->n_removed); n->n_klist = NULL; } -static int klist_dec_and_del(struct klist_node * n) +static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } - /** - * klist_del - Decrement the reference count of node and try to remove. - * @n: node we're deleting. + * klist_del - Decrement the reference count of node and try to remove. + * @n: node we're deleting. */ - -void klist_del(struct klist_node * n) +void klist_del(struct klist_node *n) { - struct klist * k = n->n_klist; + struct klist *k = n->n_klist; void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); @@ -152,48 +170,40 @@ void klist_del(struct klist_node * n) if (put) put(n); } - EXPORT_SYMBOL_GPL(klist_del); - /** - * klist_remove - Decrement the refcount of node and wait for it to go away. - * @n: node we're removing. + * klist_remove - Decrement the refcount of node and wait for it to go away. + * @n: node we're removing. */ - -void klist_remove(struct klist_node * n) +void klist_remove(struct klist_node *n) { klist_del(n); wait_for_completion(&n->n_removed); } - EXPORT_SYMBOL_GPL(klist_remove); - /** - * klist_node_attached - Say whether a node is bound to a list or not. - * @n: Node that we're testing. + * klist_node_attached - Say whether a node is bound to a list or not. + * @n: Node that we're testing. */ - -int klist_node_attached(struct klist_node * n) +int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } - EXPORT_SYMBOL_GPL(klist_node_attached); - /** - * klist_iter_init_node - Initialize a klist_iter structure. - * @k: klist we're iterating. - * @i: klist_iter we're filling. - * @n: node to start with. + * klist_iter_init_node - Initialize a klist_iter structure. + * @k: klist we're iterating. + * @i: klist_iter we're filling. + * @n: node to start with. * - * Similar to klist_iter_init(), but starts the action off with @n, - * instead of with the list head. + * Similar to klist_iter_init(), but starts the action off with @n, + * instead of with the list head. */ - -void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n) +void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n) { i->i_klist = k; i->i_head = &k->k_list; @@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_ if (n) kref_get(&n->n_ref); } - EXPORT_SYMBOL_GPL(klist_iter_init_node); - /** - * klist_iter_init - Iniitalize a klist_iter structure. - * @k: klist we're iterating. - * @i: klist_iter structure we're filling. + * klist_iter_init - Iniitalize a klist_iter structure. + * @k: klist we're iterating. + * @i: klist_iter structure we're filling. * - * Similar to klist_iter_init_node(), but start with the list head. + * Similar to klist_iter_init_node(), but start with the list head. */ - -void klist_iter_init(struct klist * k, struct klist_iter * i) +void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } - EXPORT_SYMBOL_GPL(klist_iter_init); - /** - * klist_iter_exit - Finish a list iteration. - * @i: Iterator structure. + * klist_iter_exit - Finish a list iteration. + * @i: Iterator structure. * - * Must be called when done iterating over list, as it decrements the - * refcount of the current node. Necessary in case iteration exited before - * the end of the list was reached, and always good form. + * Must be called when done iterating over list, as it decrements the + * refcount of the current node. Necessary in case iteration exited before + * the end of the list was reached, and always good form. */ - -void klist_iter_exit(struct klist_iter * i) +void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_del(i->i_cur); i->i_cur = NULL; } } - EXPORT_SYMBOL_GPL(klist_iter_exit); - -static struct klist_node * to_klist_node(struct list_head * n) +static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } - /** - * klist_next - Ante up next node in list. - * @i: Iterator structure. + * klist_next - Ante up next node in list. + * @i: Iterator structure. * - * First grab list lock. Decrement the reference count of the previous - * node, if there was one. Grab the next node, increment its reference - * count, drop the lock, and return that next node. + * First grab list lock. Decrement the reference count of the previous + * node, if there was one. Grab the next node, increment its reference + * count, drop the lock, and return that next node. */ - -struct klist_node * klist_next(struct klist_iter * i) +struct klist_node *klist_next(struct klist_iter *i) { - struct list_head * next; - struct klist_node * lnode = i->i_cur; - struct klist_node * knode = NULL; + struct list_head *next; + struct klist_node *lnode = i->i_cur; + struct klist_node *knode = NULL; void (*put)(struct klist_node *) = i->i_klist->put; spin_lock(&i->i_klist->k_lock); @@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i) put(lnode); return knode; } - EXPORT_SYMBOL_GPL(klist_next); - - diff --git a/lib/kobject.c b/lib/kobject.c index 2c649037092..718e5101c26 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -90,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) } pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), - kobj, __FUNCTION__, path); + kobj, __func__, path); } /** @@ -181,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj) } pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", - kobject_name(kobj), kobj, __FUNCTION__, + kobject_name(kobj), kobj, __func__, parent ? kobject_name(parent) : "<NULL>", kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); @@ -196,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj) printk(KERN_ERR "%s failed for %s with " "-EEXIST, don't try to register things with " "the same name in the same directory.\n", - __FUNCTION__, kobject_name(kobj)); + __func__, kobject_name(kobj)); else printk(KERN_ERR "%s failed for %s (%d)\n", - __FUNCTION__, kobject_name(kobj), error); + __func__, kobject_name(kobj), error); dump_stack(); } else kobj->state_in_sysfs = 1; @@ -216,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj) static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { - va_list aq; - char *name; - - va_copy(aq, vargs); - name = kvasprintf(GFP_KERNEL, fmt, vargs); - va_end(aq); - - if (!name) - return -ENOMEM; - /* Free the old name, if necessary. */ kfree(kobj->name); - /* Now, set the new name */ - kobj->name = name; + kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); + if (!kobj->name) + return -ENOMEM; return 0; } @@ -246,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, */ int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { - va_list args; + va_list vargs; int retval; - va_start(args, fmt); - retval = kobject_set_name_vargs(kobj, fmt, args); - va_end(args); + va_start(vargs, fmt); + retval = kobject_set_name_vargs(kobj, fmt, vargs); + va_end(vargs); return retval; } @@ -301,12 +292,9 @@ EXPORT_SYMBOL(kobject_init); static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, const char *fmt, va_list vargs) { - va_list aq; int retval; - va_copy(aq, vargs); - retval = kobject_set_name_vargs(kobj, fmt, aq); - va_end(aq); + retval = kobject_set_name_vargs(kobj, fmt, vargs); if (retval) { printk(KERN_ERR "kobject: can not set name properly!\n"); return retval; @@ -540,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj) const char *name = kobj->name; pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); if (t && !t->release) pr_debug("kobject: '%s' (%p): does not have a release() " @@ -600,7 +588,7 @@ void kobject_put(struct kobject *kobj) static void dynamic_kobj_release(struct kobject *kobj) { - pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__); + pr_debug("kobject: (%p): %s\n", kobj, __func__); kfree(kobj); } @@ -657,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) retval = kobject_add(kobj, parent, "%s", name); if (retval) { printk(KERN_WARNING "%s: kobject_add error: %d\n", - __FUNCTION__, retval); + __func__, retval); kobject_put(kobj); kobj = NULL; } @@ -765,7 +753,7 @@ static void kset_release(struct kobject *kobj) { struct kset *kset = container_of(kobj, struct kset, kobj); pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); kfree(kset); } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9fb6b86cf6b..2fa545a6316 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -101,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, int retval = 0; pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); /* search the kset we belong to */ top_kobj = kobj; @@ -111,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (!top_kobj->kset) { pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " "without kset!\n", kobject_name(kobj), kobj, - __FUNCTION__); + __func__); return -EINVAL; } @@ -123,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (!uevent_ops->filter(kset, kobj)) { pr_debug("kobject: '%s' (%p): %s: filter function " "caused the event to drop!\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); return 0; } @@ -135,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (!subsystem) { pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " "event to drop!\n", kobject_name(kobj), kobj, - __FUNCTION__); + __func__); return 0; } @@ -177,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, - __FUNCTION__, retval); + __func__, retval); goto exit; } } diff --git a/lib/lmb.c b/lib/lmb.c index 207147ab25e..83287d3869a 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -46,14 +46,13 @@ void lmb_dump_all(void) #endif /* DEBUG */ } -static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1, - u64 base2, u64 size2) +static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, + u64 size2) { return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static long __init lmb_addrs_adjacent(u64 base1, u64 size1, - u64 base2, u64 size2) +static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) { if (base2 == base1 + size1) return 1; @@ -63,7 +62,7 @@ static long __init lmb_addrs_adjacent(u64 base1, u64 size1, return 0; } -static long __init lmb_regions_adjacent(struct lmb_region *rgn, +static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { u64 base1 = rgn->region[r1].base; @@ -74,7 +73,7 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn, return lmb_addrs_adjacent(base1, size1, base2, size2); } -static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) +static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) { unsigned long i; @@ -86,7 +85,7 @@ static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) } /* Assumption: base addr of region 1 < base addr of region 2 */ -static void __init lmb_coalesce_regions(struct lmb_region *rgn, +static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { rgn->region[r1].size += rgn->region[r2].size; @@ -118,7 +117,7 @@ void __init lmb_analyze(void) lmb.memory.size += lmb.memory.region[i].size; } -static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) +static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long coalesced = 0; long adjacent, i; @@ -182,7 +181,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) return 0; } -long __init lmb_add(u64 base, u64 size) +long lmb_add(u64 base, u64 size) { struct lmb_region *_rgn = &lmb.memory; @@ -194,6 +193,55 @@ long __init lmb_add(u64 base, u64 size) } +long lmb_remove(u64 base, u64 size) +{ + struct lmb_region *rgn = &(lmb.memory); + u64 rgnbegin, rgnend; + u64 end = base + size; + int i; + + rgnbegin = rgnend = 0; /* supress gcc warnings */ + + /* Find the region where (base, size) belongs to */ + for (i=0; i < rgn->cnt; i++) { + rgnbegin = rgn->region[i].base; + rgnend = rgnbegin + rgn->region[i].size; + + if ((rgnbegin <= base) && (end <= rgnend)) + break; + } + + /* Didn't find the region */ + if (i == rgn->cnt) + return -1; + + /* Check to see if we are removing entire region */ + if ((rgnbegin == base) && (rgnend == end)) { + lmb_remove_region(rgn, i); + return 0; + } + + /* Check to see if region is matching at the front */ + if (rgnbegin == base) { + rgn->region[i].base = end; + rgn->region[i].size -= size; + return 0; + } + + /* Check to see if the region is matching at the end */ + if (rgnend == end) { + rgn->region[i].size -= size; + return 0; + } + + /* + * We need to split the entry - adjust the current one to the + * beginging of the hole and add the region after hole. + */ + rgn->region[i].size = base - rgn->region[i].base; + return lmb_add_region(rgn, end, rgnend - end); +} + long __init lmb_reserve(u64 base, u64 size) { struct lmb_region *_rgn = &lmb.reserved; @@ -426,3 +474,36 @@ int __init lmb_is_reserved(u64 addr) } return 0; } + +/* + * Given a <base, len>, find which memory regions belong to this range. + * Adjust the request and return a contiguous chunk. + */ +int lmb_find(struct lmb_property *res) +{ + int i; + u64 rstart, rend; + + rstart = res->base; + rend = rstart + res->size - 1; + + for (i = 0; i < lmb.memory.cnt; i++) { + u64 start = lmb.memory.region[i].base; + u64 end = start + lmb.memory.region[i].size - 1; + + if (start > rend) + return -1; + + if ((end >= rstart) && (start < rend)) { + /* adjust the request */ + if (rstart < start) + rstart = start; + if (rend > end) + rend = end; + res->base = rstart; + res->size = rend - rstart + 1; + return 0; + } + } + return -1; +} diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 393a0e915c2..119174494cb 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc) return; free_percpu(fbc->counters); + fbc->counters = NULL; #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); diff --git a/lib/proportions.c b/lib/proportions.c index 9508d9a7af3..4f387a643d7 100644 --- a/lib/proportions.c +++ b/lib/proportions.c @@ -73,12 +73,6 @@ #include <linux/proportions.h> #include <linux/rcupdate.h> -/* - * Limit the time part in order to ensure there are some bits left for the - * cycle counter. - */ -#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) - int prop_descriptor_init(struct prop_descriptor *pd, int shift) { int err; @@ -268,6 +262,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) } /* + * identical to __prop_inc_percpu, except that it limits this pl's fraction to + * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded. + */ +void __prop_inc_percpu_max(struct prop_descriptor *pd, + struct prop_local_percpu *pl, long frac) +{ + struct prop_global *pg = prop_get_global(pd); + + prop_norm_percpu(pg, pl); + + if (unlikely(frac != PROP_FRAC_BASE)) { + unsigned long period_2 = 1UL << (pg->shift - 1); + unsigned long counter_mask = period_2 - 1; + unsigned long global_count; + long numerator, denominator; + + numerator = percpu_counter_read_positive(&pl->events); + global_count = percpu_counter_read(&pg->events); + denominator = period_2 + (global_count & counter_mask); + + if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT)) + goto out_put; + } + + percpu_counter_add(&pl->events, 1); + percpu_counter_add(&pg->events, 1); + +out_put: + prop_put_global(pd, pg); +} + +/* * Obtain a fraction of this proportion * * p_{j} = x_{j} / (period/2 + t % period/2) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 65f0e758ec3..bd521716ab1 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) } } if (ret == NULL) - ret = kmem_cache_alloc(radix_tree_node_cachep, - set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); BUG_ON(radix_tree_is_indirect_ptr(ret)); return ret; @@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask) rtp = &__get_cpu_var(radix_tree_preloads); while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { preempt_enable(); - node = kmem_cache_alloc(radix_tree_node_cachep, - set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); + node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; preempt_disable(); @@ -1098,7 +1096,8 @@ void __init radix_tree_init(void) { radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, - SLAB_PANIC, radix_tree_node_ctor); + SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, + radix_tree_node_ctor); radix_tree_init_maxindex(); hotcpu_notifier(radix_tree_callback, 0); } diff --git a/lib/ratelimit.c b/lib/ratelimit.c new file mode 100644 index 00000000000..485e3040dcd --- /dev/null +++ b/lib/ratelimit.c @@ -0,0 +1,51 @@ +/* + * ratelimit.c - Do something with rate limit. + * + * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> + * + * This file is released under the GPLv2. + * + */ + +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/module.h> + +/* + * __ratelimit - rate limiting + * @ratelimit_jiffies: minimum time in jiffies between two callbacks + * @ratelimit_burst: number of callbacks we do before ratelimiting + * + * This enforces a rate limit: not more than @ratelimit_burst callbacks + * in every ratelimit_jiffies + */ +int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) +{ + static DEFINE_SPINLOCK(ratelimit_lock); + static unsigned toks = 10 * 5 * HZ; + static unsigned long last_msg; + static int missed; + unsigned long flags; + unsigned long now = jiffies; + + spin_lock_irqsave(&ratelimit_lock, flags); + toks += now - last_msg; + last_msg = now; + if (toks > (ratelimit_burst * ratelimit_jiffies)) + toks = ratelimit_burst * ratelimit_jiffies; + if (toks >= ratelimit_jiffies) { + int lost = missed; + + missed = 0; + toks -= ratelimit_jiffies; + spin_unlock_irqrestore(&ratelimit_lock, flags); + if (lost) + printk(KERN_WARNING "%s: %d messages suppressed\n", + __func__, lost); + return 1; + } + missed++; + spin_unlock_irqrestore(&ratelimit_lock, flags); + return 0; +} +EXPORT_SYMBOL(__ratelimit); diff --git a/lib/string.c b/lib/string.c index 5efafed3d6b..b19b87af65a 100644 --- a/lib/string.c +++ b/lib/string.c @@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct) EXPORT_SYMBOL(strsep); #endif +/** + * sysfs_streq - return true if strings are equal, modulo trailing newline + * @s1: one string + * @s2: another string + * + * This routine returns true iff two strings are equal, treating both + * NUL and newline-then-NUL as equivalent string terminations. It's + * geared for use with sysfs input strings, which generally terminate + * with newlines but are compared against values without newlines. + */ +bool sysfs_streq(const char *s1, const char *s2) +{ + while (*s1 && *s1 == *s2) { + s1++; + s2++; + } + + if (*s1 == *s2) + return true; + if (!*s1 && *s2 == '\n' && !s2[1]) + return true; + if (*s1 == '\n' && !s1[1] && !*s2) + return true; + return false; +} +EXPORT_SYMBOL(sysfs_streq); + #ifndef __HAVE_ARCH_MEMSET /** * memset - Fill a region of memory with the given value diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 025922807e6..d568894df8c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/iommu-helper.h> #define OFFSET(val,align) ((unsigned long) \ ( (val) & ( (align) - 1))) @@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) return (addr & ~mask) != 0; } -static inline unsigned int is_span_boundary(unsigned int index, - unsigned int nslots, - unsigned long offset_slots, - unsigned long max_slots) -{ - unsigned long offset = (offset_slots + index) & (max_slots - 1); - return offset + nslots > max_slots; -} - /* * Allocates bounce buffer and returns its kernel virtual address. */ @@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); - { - index = ALIGN(io_tlb_index, stride); - if (index >= io_tlb_nslabs) - index = 0; - wrap = index; - - do { - while (is_span_boundary(index, nslots, offset_slots, - max_slots)) { - index += stride; - if (index >= io_tlb_nslabs) - index = 0; - if (index == wrap) - goto not_found; - } - - /* - * If we find a slot that indicates we have 'nslots' - * number of contiguous buffers, we allocate the - * buffers from that slot and mark the entries as '0' - * indicating unavailable. - */ - if (io_tlb_list[index] >= nslots) { - int count = 0; - - for (i = index; i < (int) (index + nslots); i++) - io_tlb_list[i] = 0; - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; - dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); - - /* - * Update the indices to avoid searching in - * the next round. - */ - io_tlb_index = ((index + nslots) < io_tlb_nslabs - ? (index + nslots) : 0); - - goto found; - } + index = ALIGN(io_tlb_index, stride); + if (index >= io_tlb_nslabs) + index = 0; + wrap = index; + + do { + while (iommu_is_span_boundary(index, nslots, offset_slots, + max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; - } while (index != wrap); + if (index == wrap) + goto not_found; + } - not_found: - spin_unlock_irqrestore(&io_tlb_lock, flags); - return NULL; - } - found: + /* + * If we find a slot that indicates we have 'nslots' number of + * contiguous buffers, we allocate the buffers from that slot + * and mark the entries as '0' indicating unavailable. + */ + if (io_tlb_list[index] >= nslots) { + int count = 0; + + for (i = index; i < (int) (index + nslots); i++) + io_tlb_list[i] = 0; + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); + + /* + * Update the indices to avoid searching in the next + * round. + */ + io_tlb_index = ((index + nslots) < io_tlb_nslabs + ? (index + nslots) : 0); + + goto found; + } + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (index != wrap); + +not_found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + return NULL; +found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* @@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, + int dir, struct dma_attrs *attrs) { dma_addr_t dev_addr = virt_to_bus(ptr); void *map; @@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) return dev_addr; } +EXPORT_SYMBOL(swiotlb_map_single_attrs); + +dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +{ + return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); +} /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) * whatever the device wrote there. */ void -swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, - int dir) +swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, struct dma_attrs *attrs) { char *dma_addr = bus_to_virt(dev_addr); @@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); } +EXPORT_SYMBOL(swiotlb_unmap_single_attrs); +void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + int dir) +{ + return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); +} /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. @@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, SYNC_FOR_DEVICE); } +void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, + struct dma_attrs *); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_single @@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, * same here. */ int -swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) +swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir, struct dma_attrs *attrs) { struct scatterlist *sg; void *addr; @@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); - swiotlb_unmap_sg(hwdev, sgl, i, dir); + swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, + attrs); sgl[0].dma_length = 0; return 0; } @@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, } return nelems; } +EXPORT_SYMBOL(swiotlb_map_sg_attrs); + +int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir) +{ + return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); +} /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_single() above. */ void -swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) +swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, int dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } } +EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); + +void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir) +{ + return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); +} /* * Make physical memory consistent for a set of streaming mode DMA translations |