aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Makefile2
-rw-r--r--lib/debugobjects.c2
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/genalloc.c28
-rw-r--r--lib/kobject.c90
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lockref.c1
-rw-r--r--lib/percpu_test.c138
-rw-r--r--lib/show_mem.c39
-rw-r--r--lib/smp_processor_id.c3
-rw-r--r--lib/vsprintf.c35
12 files changed, 320 insertions, 40 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 094f3152ec2..db25707aa41 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -312,6 +312,15 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config MAGIC_SYSRQ_DEFAULT_ENABLE
+ hex "Enable magic SysRq key functions by default"
+ depends on MAGIC_SYSRQ
+ default 0x1
+ help
+ Specifies which SysRq key functions are enabled by default.
+ This may be set to 1 or 0 to enable or disable them all, or
+ to a bitmask as described in Documentation/sysrq.txt.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -1472,6 +1481,15 @@ config INTERVAL_TREE_TEST
help
A benchmark measuring the performance of the interval tree library
+config PERCPU_TEST
+ tristate "Per cpu operations test"
+ depends on m && DEBUG_KERNEL
+ help
+ Enable this option to build test module which validates per-cpu
+ operations.
+
+ If unsure, say N.
+
config ATOMIC64_SELFTEST
bool "Perform an atomic64_t self-test at boot"
help
diff --git a/lib/Makefile b/lib/Makefile
index f3bb2cb98ad..bb016e116ba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -157,6 +157,8 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
+obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
+
obj-$(CONFIG_ASN1) += asn1_decoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index bf2c8b1043d..e0731c3db70 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -196,7 +196,7 @@ static void free_object(struct debug_obj *obj)
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
- sched = keventd_up() && !work_pending(&debug_obj_work);
+ sched = keventd_up();
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
diff --git a/lib/digsig.c b/lib/digsig.c
index 2f31e6a45f0..8793aeda30c 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -209,7 +209,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
kref = keyring_search(make_key_ref(keyring, 1UL),
&key_type_user, name);
if (IS_ERR(kref))
- key = ERR_PTR(PTR_ERR(kref));
+ key = ERR_CAST(kref);
else
key = key_ref_to_ptr(kref);
} else {
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 26cf20be72b..dda31168844 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -313,6 +313,34 @@ retry:
EXPORT_SYMBOL(gen_pool_alloc);
/**
+ * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ unsigned long vaddr;
+
+ if (!pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc(pool, size);
+ if (!vaddr)
+ return NULL;
+
+ *dma = gen_pool_virt_to_phys(pool, vaddr);
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
* gen_pool_free - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
diff --git a/lib/kobject.c b/lib/kobject.c
index 084f7b18d0c..5b4b8886435 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -13,11 +13,30 @@
*/
#include <linux/kobject.h>
+#include <linux/kobj_completion.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
+/**
+ * kobject_namespace - return @kobj's namespace tag
+ * @kobj: kobject in question
+ *
+ * Returns namespace tag of @kobj if its parent has namespace ops enabled
+ * and thus @kobj should have a namespace tag associated with it. Returns
+ * %NULL otherwise.
+ */
+const void *kobject_namespace(struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj);
+
+ if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE)
+ return NULL;
+
+ return kobj->ktype->namespace(kobj);
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -46,13 +65,21 @@ static int populate_dir(struct kobject *kobj)
static int create_dir(struct kobject *kobj)
{
- int error = 0;
- error = sysfs_create_dir(kobj);
+ int error;
+
+ error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
if (!error) {
error = populate_dir(kobj);
if (error)
sysfs_remove_dir(kobj);
}
+
+ /*
+ * @kobj->sd may be deleted by an ancestor going away. Hold an
+ * extra reference so that it stays until @kobj is gone.
+ */
+ sysfs_get(kobj->sd);
+
return error;
}
@@ -428,7 +455,7 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
goto out;
}
- error = sysfs_rename_dir(kobj, new_name);
+ error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj));
if (error)
goto out;
@@ -472,6 +499,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
if (kobj->kset)
new_parent = kobject_get(&kobj->kset->kobj);
}
+
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
@@ -486,7 +514,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
- error = sysfs_move_dir(kobj, new_parent);
+ error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj));
if (error)
goto out;
old_parent = kobj->parent;
@@ -508,10 +536,15 @@ out:
*/
void kobject_del(struct kobject *kobj)
{
+ struct sysfs_dirent *sd;
+
if (!kobj)
return;
+ sd = kobj->sd;
sysfs_remove_dir(kobj);
+ sysfs_put(sd);
+
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
kobject_put(kobj->parent);
@@ -727,6 +760,55 @@ const struct sysfs_ops kobj_sysfs_ops = {
};
/**
+ * kobj_completion_init - initialize a kobj_completion object.
+ * @kc: kobj_completion
+ * @ktype: type of kobject to initialize
+ *
+ * kobj_completion structures can be embedded within structures with different
+ * lifetime rules. During the release of the enclosing object, we can
+ * wait on the release of the kobject so that we don't free it while it's
+ * still busy.
+ */
+void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype)
+{
+ init_completion(&kc->kc_unregister);
+ kobject_init(&kc->kc_kobj, ktype);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_init);
+
+/**
+ * kobj_completion_release - release a kobj_completion object
+ * @kobj: kobject embedded in kobj_completion
+ *
+ * Used with kobject_release to notify waiters that the kobject has been
+ * released.
+ */
+void kobj_completion_release(struct kobject *kobj)
+{
+ struct kobj_completion *kc = kobj_to_kobj_completion(kobj);
+ complete(&kc->kc_unregister);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_release);
+
+/**
+ * kobj_completion_del_and_wait - release the kobject and wait for it
+ * @kc: kobj_completion object to release
+ *
+ * Delete the kobject from sysfs and drop the reference count. Then wait
+ * until any other outstanding references are also dropped. This routine
+ * is only necessary once other references may have been taken on the
+ * kobject. Typically this happens when the kobject has been published
+ * to sysfs via kobject_add.
+ */
+void kobj_completion_del_and_wait(struct kobj_completion *kc)
+{
+ kobject_del(&kc->kc_kobj);
+ kobject_put(&kc->kc_kobj);
+ wait_for_completion(&kc->kc_unregister);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait);
+
+/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 6dc09d8f4c2..872a15a2a63 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -1002,7 +1002,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
* Some tests (e.g. double-unlock) might corrupt the preemption
* count, so restore it:
*/
- preempt_count() = saved_preempt_count;
+ preempt_count_set(saved_preempt_count);
#ifdef CONFIG_TRACE_IRQFLAGS
if (softirq_count())
current->softirqs_enabled = 0;
diff --git a/lib/lockref.c b/lib/lockref.c
index 6f9d434c152..af6e95d0bed 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -153,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref)
assert_spin_locked(&lockref->lock);
lockref->count = -128;
}
+EXPORT_SYMBOL(lockref_mark_dead);
/**
* lockref_get_not_dead - Increments count unless the ref is dead
diff --git a/lib/percpu_test.c b/lib/percpu_test.c
new file mode 100644
index 00000000000..0b5d14dadd1
--- /dev/null
+++ b/lib/percpu_test.c
@@ -0,0 +1,138 @@
+#include <linux/module.h>
+
+/* validate @native and @pcp counter values match @expected */
+#define CHECK(native, pcp, expected) \
+ do { \
+ WARN((native) != (expected), \
+ "raw %ld (0x%lx) != expected %lld (0x%llx)", \
+ (native), (native), \
+ (long long)(expected), (long long)(expected)); \
+ WARN(__this_cpu_read(pcp) != (expected), \
+ "pcp %ld (0x%lx) != expected %lld (0x%llx)", \
+ __this_cpu_read(pcp), __this_cpu_read(pcp), \
+ (long long)(expected), (long long)(expected)); \
+ } while (0)
+
+static DEFINE_PER_CPU(long, long_counter);
+static DEFINE_PER_CPU(unsigned long, ulong_counter);
+
+static int __init percpu_test_init(void)
+{
+ /*
+ * volatile prevents compiler from optimizing it uses, otherwise the
+ * +ul_one/-ul_one below would replace with inc/dec instructions.
+ */
+ volatile unsigned int ui_one = 1;
+ long l = 0;
+ unsigned long ul = 0;
+
+ pr_info("percpu test start\n");
+
+ preempt_disable();
+
+ l += -1;
+ __this_cpu_add(long_counter, -1);
+ CHECK(l, long_counter, -1);
+
+ l += 1;
+ __this_cpu_add(long_counter, 1);
+ CHECK(l, long_counter, 0);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul += 1UL;
+ __this_cpu_add(ulong_counter, 1UL);
+ CHECK(ul, ulong_counter, 1);
+
+ ul += -1UL;
+ __this_cpu_add(ulong_counter, -1UL);
+ CHECK(ul, ulong_counter, 0);
+
+ ul += -(unsigned long)1;
+ __this_cpu_add(ulong_counter, -(unsigned long)1);
+ CHECK(ul, ulong_counter, -1);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul -= 1;
+ __this_cpu_dec(ulong_counter);
+ CHECK(ul, ulong_counter, -1);
+ CHECK(ul, ulong_counter, ULONG_MAX);
+
+ l += -ui_one;
+ __this_cpu_add(long_counter, -ui_one);
+ CHECK(l, long_counter, 0xffffffff);
+
+ l += ui_one;
+ __this_cpu_add(long_counter, ui_one);
+ CHECK(l, long_counter, (long)0x100000000LL);
+
+
+ l = 0;
+ __this_cpu_write(long_counter, 0);
+
+ l -= ui_one;
+ __this_cpu_sub(long_counter, ui_one);
+ CHECK(l, long_counter, -1);
+
+ l = 0;
+ __this_cpu_write(long_counter, 0);
+
+ l += ui_one;
+ __this_cpu_add(long_counter, ui_one);
+ CHECK(l, long_counter, 1);
+
+ l += -ui_one;
+ __this_cpu_add(long_counter, -ui_one);
+ CHECK(l, long_counter, (long)0x100000000LL);
+
+ l = 0;
+ __this_cpu_write(long_counter, 0);
+
+ l -= ui_one;
+ this_cpu_sub(long_counter, ui_one);
+ CHECK(l, long_counter, -1);
+ CHECK(l, long_counter, ULONG_MAX);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul += ui_one;
+ __this_cpu_add(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, 1);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul -= ui_one;
+ __this_cpu_sub(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, -1);
+ CHECK(ul, ulong_counter, ULONG_MAX);
+
+ ul = 3;
+ __this_cpu_write(ulong_counter, 3);
+
+ ul = this_cpu_sub_return(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, 2);
+
+ ul = __this_cpu_sub_return(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, 1);
+
+ preempt_enable();
+
+ pr_info("percpu test done\n");
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void __exit percpu_test_exit(void)
+{
+}
+
+module_init(percpu_test_init)
+module_exit(percpu_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Thelen");
+MODULE_DESCRIPTION("percpu operations test");
diff --git a/lib/show_mem.c b/lib/show_mem.c
index b7c72311ad0..5847a4921b8 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -12,8 +12,7 @@
void show_mem(unsigned int filter)
{
pg_data_t *pgdat;
- unsigned long total = 0, reserved = 0, shared = 0,
- nonshared = 0, highmem = 0;
+ unsigned long total = 0, reserved = 0, highmem = 0;
printk("Mem-Info:\n");
show_free_areas(filter);
@@ -22,43 +21,27 @@ void show_mem(unsigned int filter)
return;
for_each_online_pgdat(pgdat) {
- unsigned long i, flags;
+ unsigned long flags;
+ int zoneid;
pgdat_resize_lock(pgdat, &flags);
- for (i = 0; i < pgdat->node_spanned_pages; i++) {
- struct page *page;
- unsigned long pfn = pgdat->node_start_pfn + i;
-
- if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
- touch_nmi_watchdog();
-
- if (!pfn_valid(pfn))
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+ struct zone *zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
continue;
- page = pfn_to_page(pfn);
-
- if (PageHighMem(page))
- highmem++;
+ total += zone->present_pages;
+ reserved = zone->present_pages - zone->managed_pages;
- if (PageReserved(page))
- reserved++;
- else if (page_count(page) == 1)
- nonshared++;
- else if (page_count(page) > 1)
- shared += page_count(page) - 1;
-
- total++;
+ if (is_highmem_idx(zoneid))
+ highmem += zone->present_pages;
}
pgdat_resize_unlock(pgdat, &flags);
}
printk("%lu pages RAM\n", total);
-#ifdef CONFIG_HIGHMEM
- printk("%lu pages HighMem\n", highmem);
-#endif
+ printk("%lu pages HighMem/MovableOnly\n", highmem);
printk("%lu pages reserved\n", reserved);
- printk("%lu pages shared\n", shared);
- printk("%lu pages non-shared\n", nonshared);
#ifdef CONFIG_QUICKLIST
printk("%lu pages in pagetable cache\n",
quicklist_total_size());
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4c0d0e51d49..04abe53f12a 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -9,10 +9,9 @@
notrace unsigned int debug_smp_processor_id(void)
{
- unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- if (likely(preempt_count))
+ if (likely(preempt_count()))
goto out;
if (irqs_disabled())
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 26559bdb4c4..48586ac3a62 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/ioport.h>
#include <linux/dcache.h>
+#include <linux/cred.h>
#include <net/addrconf.h>
#include <asm/page.h> /* for PAGE_SIZE */
@@ -1218,6 +1219,8 @@ int kptr_restrict __read_mostly;
* The maximum supported length is 64 bytes of the input. Consider
* to use print_hex_dump() for the larger input.
* - 'a' For a phys_addr_t type and its derivative types (passed by reference)
+ * - 'd[234]' For a dentry name (optionally 2-4 last components)
+ * - 'D[234]' Same as 'd' but for a struct file
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -1312,11 +1315,37 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
spec.field_width = default_width;
return string(buf, end, "pK-error", spec);
}
- if (!((kptr_restrict == 0) ||
- (kptr_restrict == 1 &&
- has_capability_noaudit(current, CAP_SYSLOG))))
+
+ switch (kptr_restrict) {
+ case 0:
+ /* Always print %pK values */
+ break;
+ case 1: {
+ /*
+ * Only print the real pointer value if the current
+ * process has CAP_SYSLOG and is running with the
+ * same credentials it started with. This is because
+ * access to files is checked at open() time, but %pK
+ * checks permission at read() time. We don't want to
+ * leak pointer values if a binary opens a file using
+ * %pK and then elevates privileges before reading it.
+ */
+ const struct cred *cred = current_cred();
+
+ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+ !uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid))
+ ptr = NULL;
+ break;
+ }
+ case 2:
+ default:
+ /* Always print 0's for %pK */
ptr = NULL;
+ break;
+ }
break;
+
case 'N':
switch (fmt[1]) {
case 'F':