diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 12 | ||||
-rw-r--r-- | lib/Kconfig.debug | 29 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 6 | ||||
-rw-r--r-- | lib/crc-t10dif.c | 83 | ||||
-rw-r--r-- | lib/crc32.c | 17 | ||||
-rw-r--r-- | lib/debugobjects.c | 20 | ||||
-rw-r--r-- | lib/decompress_inflate.c | 2 | ||||
-rw-r--r-- | lib/div64.c | 40 | ||||
-rw-r--r-- | lib/dump_stack.c | 4 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 2 | ||||
-rw-r--r-- | lib/earlycpio.c | 27 | ||||
-rw-r--r-- | lib/genalloc.c | 22 | ||||
-rw-r--r-- | lib/hexdump.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 34 | ||||
-rw-r--r-- | lib/lockref.c | 183 | ||||
-rw-r--r-- | lib/lz4/lz4_decompress.c | 8 | ||||
-rw-r--r-- | lib/percpu_ida.c | 335 | ||||
-rw-r--r-- | lib/radix-tree.c | 41 | ||||
-rw-r--r-- | lib/raid6/.gitignore | 1 | ||||
-rw-r--r-- | lib/raid6/Makefile | 46 | ||||
-rw-r--r-- | lib/raid6/algos.c | 9 | ||||
-rw-r--r-- | lib/raid6/neon.c | 58 | ||||
-rw-r--r-- | lib/raid6/neon.uc | 80 | ||||
-rw-r--r-- | lib/raid6/test/Makefile | 35 | ||||
-rw-r--r-- | lib/raid6/tilegx.uc | 86 | ||||
-rw-r--r-- | lib/rbtree.c | 40 | ||||
-rw-r--r-- | lib/rbtree_test.c | 12 | ||||
-rw-r--r-- | lib/swiotlb.c | 8 | ||||
-rw-r--r-- | lib/vsprintf.c | 82 |
30 files changed, 1221 insertions, 109 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 71d9f81f6ee..b3c8be0da17 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -48,6 +48,16 @@ config STMP_DEVICE config PERCPU_RWSEM boolean +config ARCH_USE_CMPXCHG_LOCKREF + bool + +config CMPXCHG_LOCKREF + def_bool y if ARCH_USE_CMPXCHG_LOCKREF + depends on SMP + depends on !GENERIC_LOCKBREAK + depends on !DEBUG_SPINLOCK + depends on !DEBUG_LOCK_ALLOC + config CRC_CCITT tristate "CRC-CCITT functions" help @@ -66,6 +76,8 @@ config CRC16 config CRC_T10DIF tristate "CRC calculation for the T10 Data Integrity Field" + select CRYPTO + select CRYPTO_CRCT10DIF help This option is only needed if a module that's not in the kernel tree needs to calculate CRC checks for use with the diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1501aa55322..06344d986eb 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -597,7 +597,7 @@ endmenu # "Memory Debugging" config DEBUG_SHIRQ bool "Debug shared IRQ handlers" - depends on DEBUG_KERNEL && GENERIC_HARDIRQS + depends on DEBUG_KERNEL help Enable this to generate a spurious interrupt as soon as a shared interrupt handler is registered, and just before one is deregistered. @@ -908,7 +908,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE + select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC select KALLSYMS select KALLSYMS_ALL @@ -981,6 +981,25 @@ config DEBUG_KOBJECT If you say Y here, some extra kobject debugging messages will be sent to the syslog. +config DEBUG_KOBJECT_RELEASE + bool "kobject release debugging" + depends on DEBUG_KERNEL + help + kobjects are reference counted objects. This means that their + last reference count put is not predictable, and the kobject can + live on past the point at which a driver decides to drop it's + initial reference to the kobject gained on allocation. An + example of this would be a struct device which has just been + unregistered. + + However, some buggy drivers assume that after such an operation, + the memory backing the kobject can be immediately freed. This + goes completely against the principles of a refcounted object. + + If you say Y here, the kernel will delay the release of kobjects + on the last reference count to improve the visibility of this + kind of kobject release bug. + config HAVE_DEBUG_BUGVERBOSE bool @@ -1347,7 +1366,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC help Provide stacktrace filter for fault-injection capabilities @@ -1357,7 +1376,7 @@ config LATENCYTOP depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT depends on PROC_FS - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC select KALLSYMS select KALLSYMS_ALL select STACKTRACE @@ -1442,7 +1461,7 @@ config BACKTRACE_SELF_TEST config RBTREE_TEST tristate "Red-Black tree test" - depends on m && DEBUG_KERNEL + depends on DEBUG_KERNEL help A benchmark measuring the performance of the rbtree library. Also includes rbtree invariant checks. diff --git a/lib/Makefile b/lib/Makefile index 7baccfd8a4e..f3bb2cb98ad 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -13,18 +13,20 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o percpu-refcount.o + earlycpio.o percpu-refcount.o percpu_ida.o obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o klist.o +obj-y += lockref.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ - bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o + bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ + percpu_ida.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 5fbed5caba6..4f134d8907a 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -8,9 +8,7 @@ */ #include <linux/cpu_rmap.h> -#ifdef CONFIG_GENERIC_HARDIRQS #include <linux/interrupt.h> -#endif #include <linux/export.h> /* @@ -213,8 +211,6 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, } EXPORT_SYMBOL(cpu_rmap_update); -#ifdef CONFIG_GENERIC_HARDIRQS - /* Glue between IRQ affinity notifiers and CPU rmaps */ struct irq_glue { @@ -309,5 +305,3 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); - -#endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index fbbd66ed86c..dfe6ec17c0a 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -11,57 +11,54 @@ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-t10dif.h> +#include <linux/err.h> +#include <linux/init.h> +#include <crypto/hash.h> +#include <linux/static_key.h> -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; +static struct crypto_shash *crct10dif_tfm; +static struct static_key crct10dif_fallback __read_mostly; __u16 crc_t10dif(const unsigned char *buffer, size_t len) { - __u16 crc = 0; - unsigned int i; + struct { + struct shash_desc shash; + char ctx[2]; + } desc; + int err; + + if (static_key_false(&crct10dif_fallback)) + return crc_t10dif_generic(0, buffer, len); + + desc.shash.tfm = crct10dif_tfm; + desc.shash.flags = 0; + *(__u16 *)desc.ctx = 0; - for (i = 0 ; i < len ; i++) - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + err = crypto_shash_update(&desc.shash, buffer, len); + BUG_ON(err); - return crc; + return *(__u16 *)desc.ctx; } EXPORT_SYMBOL(crc_t10dif); +static int __init crc_t10dif_mod_init(void) +{ + crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); + if (IS_ERR(crct10dif_tfm)) { + static_key_slow_inc(&crct10dif_fallback); + crct10dif_tfm = NULL; + } + return 0; +} + +static void __exit crc_t10dif_mod_fini(void) +{ + crypto_free_shash(crct10dif_tfm); +} + +module_init(crc_t10dif_mod_init); +module_exit(crc_t10dif_mod_fini); + MODULE_DESCRIPTION("T10 DIF CRC calculation"); MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crct10dif"); diff --git a/lib/crc32.c b/lib/crc32.c index 072fbd8234d..410093dbe51 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -131,11 +131,14 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) #endif /** - * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 - * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for - * other uses, or the previous crc32 value if computing incrementally. - * @p: pointer to buffer over which CRC is run + * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II + * CRC32/CRC32C + * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other + * uses, or the previous crc32/crc32c value if computing incrementally. + * @p: pointer to buffer over which CRC32/CRC32C is run * @len: length of buffer @p + * @tab: little-endian Ethernet table + * @polynomial: CRC32/CRC32c LE polynomial */ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], @@ -201,11 +204,13 @@ EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); /** - * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 + * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for * other uses, or the previous crc32 value if computing incrementally. - * @p: pointer to buffer over which CRC is run + * @p: pointer to buffer over which CRC32 is run * @len: length of buffer @p + * @tab: big-endian Ethernet table + * @polynomial: CRC32 BE polynomial */ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 37061ede8b8..bf2c8b1043d 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -381,19 +381,21 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) * debug_object_activate - debug checks when an object is activated * @addr: address of the object * @descr: pointer to an object specific debug description structure + * Returns 0 for success, -EINVAL for check failed. */ -void debug_object_activate(void *addr, struct debug_obj_descr *descr) +int debug_object_activate(void *addr, struct debug_obj_descr *descr) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; + int ret; struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; if (!debug_objects_enabled) - return; + return 0; db = get_bucket((unsigned long) addr); @@ -405,23 +407,26 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_ACTIVE; + ret = 0; break; case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "activate"); state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); - debug_object_fixup(descr->fixup_activate, addr, state); - return; + ret = debug_object_fixup(descr->fixup_activate, addr, state); + return ret ? -EINVAL : 0; case ODEBUG_STATE_DESTROYED: debug_print_object(obj, "activate"); + ret = -EINVAL; break; default: + ret = 0; break; } raw_spin_unlock_irqrestore(&db->lock, flags); - return; + return ret; } raw_spin_unlock_irqrestore(&db->lock, flags); @@ -431,8 +436,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) * true or not. */ if (debug_object_fixup(descr->fixup_activate, addr, - ODEBUG_STATE_NOTAVAILABLE)) + ODEBUG_STATE_NOTAVAILABLE)) { debug_print_object(&o, "activate"); + return -EINVAL; + } + return 0; } /** diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index 19ff89e34ee..d619b28c456 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -48,7 +48,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, out_len = 0x8000; /* 32 K */ out_buf = malloc(out_len); } else { - out_len = 0x7fffffff; /* no limit */ + out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ } if (!out_buf) { error("Out of memory while allocating output buffer"); diff --git a/lib/div64.c b/lib/div64.c index a163b6caef7..4382ad77777 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -79,6 +79,46 @@ EXPORT_SYMBOL(div_s64_rem); #endif /** + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder + * @dividend: 64bit dividend + * @divisor: 64bit divisor + * @remainder: 64bit remainder + * + * This implementation is a comparable to algorithm used by div64_u64. + * But this operation, which includes math for calculating the remainder, + * is kept distinct to avoid slowing down the div64_u64 operation on 32bit + * systems. + */ +#ifndef div64_u64_rem +u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ + u32 high = divisor >> 32; + u64 quot; + + if (high == 0) { + u32 rem32; + quot = div_u64_rem(dividend, divisor, &rem32); + *remainder = rem32; + } else { + int n = 1 + fls(high); + quot = div_u64(dividend >> n, divisor >> n); + + if (quot != 0) + quot--; + + *remainder = dividend - quot * divisor; + if (*remainder >= divisor) { + quot++; + *remainder -= divisor; + } + } + + return quot; +} +EXPORT_SYMBOL(div64_u64_rem); +#endif + +/** * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor diff --git a/lib/dump_stack.c b/lib/dump_stack.c index c03154173cc..f23b63f0a1c 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -23,7 +23,7 @@ static void __dump_stack(void) #ifdef CONFIG_SMP static atomic_t dump_lock = ATOMIC_INIT(-1); -void dump_stack(void) +asmlinkage void dump_stack(void) { int was_locked; int old; @@ -55,7 +55,7 @@ retry: preempt_enable(); } #else -void dump_stack(void) +asmlinkage void dump_stack(void) { __dump_stack(); } diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 99fec3ae405..c37aeacd765 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -309,7 +309,7 @@ static int ddebug_parse_query(char *words[], int nwords, struct ddebug_query *query, const char *modname) { unsigned int i; - int rc; + int rc = 0; /* check we have an even number of words */ if (nwords % 2 != 0) { diff --git a/lib/earlycpio.c b/lib/earlycpio.c index 7aa7ce250c9..3eb3e4722b8 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -49,22 +49,23 @@ enum cpio_fields { /** * cpio_data find_cpio_data - Search for files in an uncompressed cpio - * @path: The directory to search for, including a slash at the end - * @data: Pointer to the the cpio archive or a header inside - * @len: Remaining length of the cpio based on data pointer - * @offset: When a matching file is found, this is the offset to the - * beginning of the cpio. It can be used to iterate through - * the cpio to find all files inside of a directory path + * @path: The directory to search for, including a slash at the end + * @data: Pointer to the the cpio archive or a header inside + * @len: Remaining length of the cpio based on data pointer + * @nextoff: When a matching file is found, this is the offset from the + * beginning of the cpio to the beginning of the next file, not the + * matching file itself. It can be used to iterate through the cpio + * to find all files inside of a directory path. * - * @return: struct cpio_data containing the address, length and - * filename (with the directory path cut off) of the found file. - * If you search for a filename and not for files in a directory, - * pass the absolute path of the filename in the cpio and make sure - * the match returned an empty filename string. + * @return: struct cpio_data containing the address, length and + * filename (with the directory path cut off) of the found file. + * If you search for a filename and not for files in a directory, + * pass the absolute path of the filename in the cpio and make sure + * the match returned an empty filename string. */ struct cpio_data find_cpio_data(const char *path, void *data, - size_t len, long *offset) + size_t len, long *nextoff) { const size_t cpio_header_len = 8*C_NFIELDS - 2; struct cpio_data cd = { NULL, 0, "" }; @@ -124,7 +125,7 @@ struct cpio_data find_cpio_data(const char *path, void *data, if ((ch[C_MODE] & 0170000) == 0100000 && ch[C_NAMESIZE] >= mypathsize && !memcmp(p, path, mypathsize)) { - *offset = (long)nptr - (long)data; + *nextoff = (long)nptr - (long)data; if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { pr_warn( "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", diff --git a/lib/genalloc.c b/lib/genalloc.c index b35cfa9bc3d..26cf20be72b 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -37,6 +37,11 @@ #include <linux/of_address.h> #include <linux/of_device.h> +static inline size_t chunk_size(const struct gen_pool_chunk *chunk) +{ + return chunk->end_addr - chunk->start_addr + 1; +} + static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { unsigned long val, nval; @@ -182,13 +187,13 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); - chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); + chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); if (unlikely(chunk == NULL)) return -ENOMEM; chunk->phys_addr = phys; chunk->start_addr = virt; - chunk->end_addr = virt + size; + chunk->end_addr = virt + size - 1; atomic_set(&chunk->avail, size); spin_lock(&pool->lock); @@ -213,7 +218,7 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { - if (addr >= chunk->start_addr && addr < chunk->end_addr) { + if (addr >= chunk->start_addr && addr <= chunk->end_addr) { paddr = chunk->phys_addr + (addr - chunk->start_addr); break; } @@ -242,7 +247,7 @@ void gen_pool_destroy(struct gen_pool *pool) chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); - end_bit = (chunk->end_addr - chunk->start_addr) >> order; + end_bit = chunk_size(chunk) >> order; bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); @@ -283,7 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) if (size > atomic_read(&chunk->avail)) continue; - end_bit = (chunk->end_addr - chunk->start_addr) >> order; + end_bit = chunk_size(chunk) >> order; retry: start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, pool->data); @@ -330,8 +335,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { - if (addr >= chunk->start_addr && addr < chunk->end_addr) { - BUG_ON(addr + size > chunk->end_addr); + if (addr >= chunk->start_addr && addr <= chunk->end_addr) { + BUG_ON(addr + size - 1 > chunk->end_addr); start_bit = (addr - chunk->start_addr) >> order; remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); @@ -400,7 +405,7 @@ size_t gen_pool_size(struct gen_pool *pool) rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) - size += chunk->end_addr - chunk->start_addr; + size += chunk_size(chunk); rcu_read_unlock(); return size; } @@ -519,7 +524,6 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, /** * dev_get_gen_pool - Obtain the gen_pool (if any) for a device * @dev: device to retrieve the gen_pool from - * @name: Optional name for the gen_pool, usually NULL * * Returns the gen_pool for the device if one is present, or NULL. */ diff --git a/lib/hexdump.c b/lib/hexdump.c index 3f0494c9d57..8499c810909 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -14,6 +14,8 @@ const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); +const char hex_asc_upper[] = "0123456789ABCDEF"; +EXPORT_SYMBOL(hex_asc_upper); /** * hex_to_bin - convert a hex digit to its real value diff --git a/lib/kobject.c b/lib/kobject.c index 4a1f33d4354..669bf190d4f 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -545,8 +545,8 @@ static void kobject_cleanup(struct kobject *kobj) struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; - pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __func__); + pr_debug("kobject: '%s' (%p): %s, parent %p\n", + kobject_name(kobj), kobj, __func__, kobj->parent); if (t && !t->release) pr_debug("kobject: '%s' (%p): does not have a release() " @@ -580,9 +580,25 @@ static void kobject_cleanup(struct kobject *kobj) } } +#ifdef CONFIG_DEBUG_KOBJECT_RELEASE +static void kobject_delayed_cleanup(struct work_struct *work) +{ + kobject_cleanup(container_of(to_delayed_work(work), + struct kobject, release)); +} +#endif + static void kobject_release(struct kref *kref) { - kobject_cleanup(container_of(kref, struct kobject, kref)); + struct kobject *kobj = container_of(kref, struct kobject, kref); +#ifdef CONFIG_DEBUG_KOBJECT_RELEASE + pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n", + kobject_name(kobj), kobj, __func__, kobj->parent); + INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); + schedule_delayed_work(&kobj->release, HZ); +#else + kobject_cleanup(kobj); +#endif } /** @@ -915,6 +931,18 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) return kobj_child_ns_ops(kobj->parent); } +bool kobj_ns_current_may_mount(enum kobj_ns_type type) +{ + bool may_mount = true; + + spin_lock(&kobj_ns_type_lock); + if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && + kobj_ns_ops_tbl[type]) + may_mount = kobj_ns_ops_tbl[type]->current_may_mount(); + spin_unlock(&kobj_ns_type_lock); + + return may_mount; +} void *kobj_ns_grab_current(enum kobj_ns_type type) { diff --git a/lib/lockref.c b/lib/lockref.c new file mode 100644 index 00000000000..6f9d434c152 --- /dev/null +++ b/lib/lockref.c @@ -0,0 +1,183 @@ +#include <linux/export.h> +#include <linux/lockref.h> + +#ifdef CONFIG_CMPXCHG_LOCKREF + +/* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + +/* + * Note that the "cmpxchg()" reloads the "old" value for the + * failure case. + */ +#define CMPXCHG_LOOP(CODE, SUCCESS) do { \ + struct lockref old; \ + BUILD_BUG_ON(sizeof(old) != 8); \ + old.lock_count = ACCESS_ONCE(lockref->lock_count); \ + while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ + struct lockref new = old, prev = old; \ + CODE \ + old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ + old.lock_count, \ + new.lock_count); \ + if (likely(old.lock_count == prev.lock_count)) { \ + SUCCESS; \ + } \ + arch_mutex_cpu_relax(); \ + } \ +} while (0) + +#else + +#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) + +#endif + +/** + * lockref_get - Increments reference count unconditionally + * @lockref: pointer to lockref structure + * + * This operation is only valid if you already hold a reference + * to the object, so you know the count cannot be zero. + */ +void lockref_get(struct lockref *lockref) +{ + CMPXCHG_LOOP( + new.count++; + , + return; + ); + + spin_lock(&lockref->lock); + lockref->count++; + spin_unlock(&lockref->lock); +} +EXPORT_SYMBOL(lockref_get); + +/** + * lockref_get_not_zero - Increments count unless the count is 0 + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count was zero + */ +int lockref_get_not_zero(struct lockref *lockref) +{ + int retval; + + CMPXCHG_LOOP( + new.count++; + if (!old.count) + return 0; + , + return 1; + ); + + spin_lock(&lockref->lock); + retval = 0; + if (lockref->count) { + lockref->count++; + retval = 1; + } + spin_unlock(&lockref->lock); + return retval; +} +EXPORT_SYMBOL(lockref_get_not_zero); + +/** + * lockref_get_or_lock - Increments count unless the count is 0 + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count was zero + * and we got the lock instead. + */ +int lockref_get_or_lock(struct lockref *lockref) +{ + CMPXCHG_LOOP( + new.count++; + if (!old.count) + break; + , + return 1; + ); + + spin_lock(&lockref->lock); + if (!lockref->count) + return 0; + lockref->count++; + spin_unlock(&lockref->lock); + return 1; +} +EXPORT_SYMBOL(lockref_get_or_lock); + +/** + * lockref_put_or_lock - decrements count unless count <= 1 before decrement + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken + */ +int lockref_put_or_lock(struct lockref *lockref) +{ + CMPXCHG_LOOP( + new.count--; + if (old.count <= 1) + break; |