aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/cpu.c10
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/futex.c57
-rw-r--r--kernel/hw_breakpoint.c58
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kfifo.c111
-rw-r--r--kernel/kgdb.c9
-rw-r--r--kernel/kmod.c12
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/module.c17
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/perf_event.c16
-rw-r--r--kernel/printk.c1
-rw-r--r--kernel/sched.c44
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/softlockup.c15
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/clockevents.c3
-rw-r--r--kernel/time/clocksource.c18
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c3
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/ring_buffer.c28
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_events_filter.c29
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_stack.c24
33 files changed, 372 insertions, 160 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0249f4be9b5..aa3bee56644 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2468,7 +2468,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
/* make sure l doesn't vanish out from under us */
down_write(&l->mutex);
mutex_unlock(&cgrp->pidlist_mutex);
- l->use_count++;
return l;
}
}
@@ -2937,14 +2936,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
for_each_subsys(root, ss) {
struct cgroup_subsys_state *css = ss->create(ss, cgrp);
+
if (IS_ERR(css)) {
err = PTR_ERR(css);
goto err_destroy;
}
init_cgroup_css(css, ss, cgrp);
- if (ss->use_id)
- if (alloc_css_id(ss, parent, cgrp))
+ if (ss->use_id) {
+ err = alloc_css_id(ss, parent, cgrp);
+ if (err)
goto err_destroy;
+ }
/* At error, ->destroy() callback has to free assigned ID. */
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1c8ddd6ee94..677f25376a3 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -151,13 +151,13 @@ static inline void check_for_tasks(int cpu)
write_lock_irq(&tasklist_lock);
for_each_process(p) {
- if (task_cpu(p) == cpu &&
+ if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
(!cputime_eq(p->utime, cputime_zero) ||
!cputime_eq(p->stime, cputime_zero)))
- printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
- (state = %ld, flags = %x) \n",
- p->comm, task_pid_nr(p), cpu,
- p->state, p->flags);
+ printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
+ "(state = %ld, flags = %x)\n",
+ p->comm, task_pid_nr(p), cpu,
+ p->state, p->flags);
}
write_unlock_irq(&tasklist_lock);
}
diff --git a/kernel/cred.c b/kernel/cred.c
index dd76cfe5f5b..1ed8ca18790 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void)
#ifdef CONFIG_KEYS
new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
if (!new->tgcred) {
- kfree(new);
+ kmem_cache_free(cred_jar, new);
return NULL;
}
atomic_set(&new->tgcred->usage, 1);
diff --git a/kernel/fork.c b/kernel/fork.c
index 5b2959b3ffc..f88bd984df3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1241,21 +1241,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
- /*
- * The task hasn't been attached yet, so its cpus_allowed mask will
- * not be changed, nor will its assigned CPU.
- *
- * The cpus_allowed mask of the parent may have changed after it was
- * copied first time - so re-copy it here, then check the child's CPU
- * to ensure it is on a valid CPU (and if not, just force it back to
- * parent's CPU). This avoids alot of nasty races.
- */
- p->cpus_allowed = current->cpus_allowed;
- p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
- if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
- !cpu_online(task_cpu(p))))
- set_task_cpu(p, smp_processor_id());
-
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
diff --git a/kernel/futex.c b/kernel/futex.c
index 8e3c3ffe1b9..e7a35f1039e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -203,8 +203,6 @@ static void drop_futex_key_refs(union futex_key *key)
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
- * @rw: mapping needs to be read/write (values: VERIFY_READ,
- * VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
@@ -216,7 +214,7 @@ static void drop_futex_key_refs(union futex_key *key)
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
@@ -239,7 +237,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
+ if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
@@ -248,7 +246,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
}
again:
- err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
+ err = get_user_pages_fast(address, 1, 1, &page);
if (err < 0)
return err;
@@ -532,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
- WARN_ON(pid && pi_state->owner &&
- pi_state->owner->pid != pid);
+
+ /*
+ * When pi_state->owner is NULL then the owner died
+ * and another waiter is on the fly. pi_state->owner
+ * is fixed up by the task which acquires
+ * pi_state->rt_mutex.
+ *
+ * We do not check for pid == 0 which can happen when
+ * the owner died and robust_list_exit() cleared the
+ * TID.
+ */
+ if (pid && pi_state->owner) {
+ /*
+ * Bail out if user space manipulated the
+ * futex value.
+ */
+ if (pid != task_pid_vnr(pi_state->owner))
+ return -EINVAL;
+ }
atomic_inc(&pi_state->refcount);
*ps = pi_state;
@@ -760,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!pi_state)
return -EINVAL;
+ /*
+ * If current does not own the pi_state then the futex is
+ * inconsistent and user space fiddled with the futex value.
+ */
+ if (pi_state->owner != current)
+ return -EINVAL;
+
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
@@ -867,7 +889,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
if (!bitset)
return -EINVAL;
- ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
+ ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
goto out;
@@ -913,10 +935,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
int ret, op_ret;
retry:
- ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+ ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -1175,11 +1197,10 @@ retry:
pi_state = NULL;
}
- ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+ ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2,
- requeue_pi ? VERIFY_WRITE : VERIFY_READ);
+ ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -1738,7 +1759,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
*/
retry:
q->key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
+ ret = get_futex_key(uaddr, fshared, &q->key);
if (unlikely(ret != 0))
return ret;
@@ -1904,7 +1925,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
q.requeue_pi_key = NULL;
retry:
q.key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
+ ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
goto out;
@@ -1974,7 +1995,7 @@ retry_private:
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
- goto out;
+ goto out_put_key;
out_unlock_put_key:
queue_unlock(&q, hb);
@@ -2023,7 +2044,7 @@ retry:
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM;
- ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
+ ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
goto out;
@@ -2215,7 +2236,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
rt_waiter.task = NULL;
key2 = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out;
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 50dbd599958..967e66143e1 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
*/
-int reserve_bp_slot(struct perf_event *bp)
+static int __reserve_bp_slot(struct perf_event *bp)
{
struct bp_busy_slots slots = {0};
- int ret = 0;
-
- mutex_lock(&nr_bp_mutex);
fetch_bp_busy_slots(&slots, bp);
/* Flexible counters need to keep at least one slot */
- if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
- ret = -ENOSPC;
- goto end;
- }
+ if (slots.pinned + (!!slots.flexible) == HBP_NUM)
+ return -ENOSPC;
toggle_bp_slot(bp, true);
-end:
+ return 0;
+}
+
+int reserve_bp_slot(struct perf_event *bp)
+{
+ int ret;
+
+ mutex_lock(&nr_bp_mutex);
+
+ ret = __reserve_bp_slot(bp);
+
mutex_unlock(&nr_bp_mutex);
return ret;
}
+static void __release_bp_slot(struct perf_event *bp)
+{
+ toggle_bp_slot(bp, false);
+}
+
void release_bp_slot(struct perf_event *bp)
{
mutex_lock(&nr_bp_mutex);
- toggle_bp_slot(bp, false);
+ __release_bp_slot(bp);
mutex_unlock(&nr_bp_mutex);
}
+/*
+ * Allow the kernel debugger to reserve breakpoint slots without
+ * taking a lock using the dbg_* variant of for the reserve and
+ * release breakpoint slots.
+ */
+int dbg_reserve_bp_slot(struct perf_event *bp)
+{
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
+
+ return __reserve_bp_slot(bp);
+}
+
+int dbg_release_bp_slot(struct perf_event *bp)
+{
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
+
+ __release_bp_slot(bp);
+
+ return 0;
+}
int register_perf_hw_breakpoint(struct perf_event *bp)
{
@@ -296,6 +328,10 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
if (!bp->attr.disabled || !bp->overflow_handler)
ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ /* if arch_validate_hwbkpt_settings() fails then release bp slot */
+ if (ret)
+ release_bp_slot(bp);
+
return ret;
}
@@ -324,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
u64 old_addr = bp->attr.bp_addr;
+ u64 old_len = bp->attr.bp_len;
int old_type = bp->attr.bp_type;
- int old_len = bp->attr.bp_len;
int err = 0;
perf_event_disable(bp);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index a9a93d9ee7a..ef077fb7315 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -32,6 +32,7 @@
#include <linux/console.h>
#include <linux/vmalloc.h>
#include <linux/swap.h>
+#include <linux/kmsg_dump.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -1074,6 +1075,9 @@ void crash_kexec(struct pt_regs *regs)
if (mutex_trylock(&kexec_mutex)) {
if (kexec_crash_image) {
struct pt_regs fixed_regs;
+
+ kmsg_dump(KMSG_DUMP_KEXEC);
+
crash_setup_regs(&fixed_regs, regs);
crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index e92d519f93b..35edbe22e9a 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -28,7 +28,7 @@
#include <linux/log2.h>
#include <linux/uaccess.h>
-static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+static void _kfifo_init(struct kfifo *fifo, void *buffer,
unsigned int size)
{
fifo->buffer = buffer;
@@ -41,10 +41,10 @@ static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
* kfifo_init - initialize a FIFO using a preallocated buffer
* @fifo: the fifo to assign the buffer
* @buffer: the preallocated buffer to be used.
- * @size: the size of the internal buffer, this have to be a power of 2.
+ * @size: the size of the internal buffer, this has to be a power of 2.
*
*/
-void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size)
+void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size)
{
/* size must be a power of 2 */
BUG_ON(!is_power_of_2(size));
@@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
buffer = kmalloc(size, gfp_mask);
if (!buffer) {
- _kfifo_init(fifo, 0, 0);
+ _kfifo_init(fifo, NULL, 0);
return -ENOMEM;
}
@@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc);
void kfifo_free(struct kfifo *fifo)
{
kfree(fifo->buffer);
+ _kfifo_init(fifo, NULL, 0);
}
EXPORT_SYMBOL(kfifo_free);
@@ -159,8 +160,9 @@ static inline void __kfifo_out_data(struct kfifo *fifo,
memcpy(to + l, fifo->buffer, len - l);
}
-static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
- const void __user *from, unsigned int len, unsigned int off)
+static inline int __kfifo_from_user_data(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int off,
+ unsigned *lenout)
{
unsigned int l;
int ret;
@@ -177,16 +179,20 @@ static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
/* first put the data starting from fifo->in to buffer end */
l = min(len, fifo->size - off);
ret = copy_from_user(fifo->buffer + off, from, l);
-
- if (unlikely(ret))
- return ret + len - l;
+ if (unlikely(ret)) {
+ *lenout = ret;
+ return -EFAULT;
+ }
+ *lenout = l;
/* then put the rest (if any) at the beginning of the buffer */
- return copy_from_user(fifo->buffer, from + l, len - l);
+ ret = copy_from_user(fifo->buffer, from + l, len - l);
+ *lenout += ret ? ret : len - l;
+ return ret ? -EFAULT : 0;
}
-static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
- void __user *to, unsigned int len, unsigned int off)
+static inline int __kfifo_to_user_data(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned int off, unsigned *lenout)
{
unsigned int l;
int ret;
@@ -203,12 +209,21 @@ static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
/* first get the data from fifo->out until the end of the buffer */
l = min(len, fifo->size - off);
ret = copy_to_user(to, fifo->buffer + off, l);
-
- if (unlikely(ret))
- return ret + len - l;
+ *lenout = l;
+ if (unlikely(ret)) {
+ *lenout -= ret;
+ return -EFAULT;
+ }
/* then get the rest (if any) from the beginning of the buffer */
- return copy_to_user(to + l, fifo->buffer, len - l);
+ len -= l;
+ ret = copy_to_user(to + l, fifo->buffer, len);
+ if (unlikely(ret)) {
+ *lenout += len - ret;
+ return -EFAULT;
+ }
+ *lenout += len;
+ return 0;
}
unsigned int __kfifo_in_n(struct kfifo *fifo,
@@ -235,7 +250,7 @@ EXPORT_SYMBOL(__kfifo_in_n);
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from,
+unsigned int kfifo_in(struct kfifo *fifo, const void *from,
unsigned int len)
{
len = min(kfifo_avail(fifo), len);
@@ -277,7 +292,7 @@ EXPORT_SYMBOL(__kfifo_out_n);
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len)
+unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len)
{
len = min(kfifo_len(fifo), len);
@@ -288,6 +303,27 @@ unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len)
}
EXPORT_SYMBOL(kfifo_out);
+/**
+ * kfifo_out_peek - copy some data from the FIFO, but do not remove it
+ * @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @len: the size of the destination buffer.
+ * @offset: offset into the fifo
+ *
+ * This function copies at most @len bytes at @offset from the FIFO
+ * into the @to buffer and returns the number of copied bytes.
+ * The data is not removed from the FIFO.
+ */
+unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len,
+ unsigned offset)
+{
+ len = min(kfifo_len(fifo), len + offset);
+
+ __kfifo_out_data(fifo, to, len, offset);
+ return len;
+}
+EXPORT_SYMBOL(kfifo_out_peek);
+
unsigned int __kfifo_out_generic(struct kfifo *fifo,
void *to, unsigned int len, unsigned int recsize,
unsigned int *total)
@@ -299,10 +335,13 @@ EXPORT_SYMBOL(__kfifo_out_generic);
unsigned int __kfifo_from_user_n(struct kfifo *fifo,
const void __user *from, unsigned int len, unsigned int recsize)
{
+ unsigned total;
+
if (kfifo_avail(fifo) < len + recsize)
return len + 1;
- return __kfifo_from_user_data(fifo, from, len, recsize);
+ __kfifo_from_user_data(fifo, from, len, recsize, &total);
+ return total;
}
EXPORT_SYMBOL(__kfifo_from_user_n);
@@ -311,20 +350,24 @@ EXPORT_SYMBOL(__kfifo_from_user_n);
* @fifo: the fifo to be used.
* @from: pointer to the data to be added.
* @len: the length of the data to be added.
+ * @total: the actual returned data length.
*
* This function copies at most @len bytes from the @from into the
- * FIFO depending and returns the number of copied bytes.
+ * FIFO depending and returns -EFAULT/0.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_from_user(struct kfifo *fifo,
- const void __user *from, unsigned int len)
+int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned *total)
{
+ int ret;
len = min(kfifo_avail(fifo), len);
- len -= __kfifo_from_user_data(fifo, from, len, 0);
+ ret = __kfifo_from_user_data(fifo, from, len, 0, total);
+ if (ret)
+ return ret;
__kfifo_add_in(fifo, len);
- return len;
+ return 0;
}
EXPORT_SYMBOL(kfifo_from_user);
@@ -339,17 +382,17 @@ unsigned int __kfifo_to_user_n(struct kfifo *fifo,
void __user *to, unsigned int len, unsigned int reclen,
unsigned int recsize)
{
- unsigned int ret;
+ unsigned int ret, total;
if (kfifo_len(fifo) < reclen + recsize)
return len;
- ret = __kfifo_to_user_data(fifo, to, reclen, recsize);
+ ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total);
if (likely(ret == 0))
__kfifo_add_out(fifo, reclen + recsize);
- return ret;
+ return total;
}
EXPORT_SYMBOL(__kfifo_to_user_n);
@@ -358,20 +401,22 @@ EXPORT_SYMBOL(__kfifo_to_user_n);
* @fifo: the fifo to be used.
* @to: where the data must be copied.
* @len: the size of the destination buffer.
+ * @lenout: pointer to output variable with copied data
*
* This function copies at most @len bytes from the FIFO into the
- * @to buffer and returns the number of copied bytes.
+ * @to buffer and 0 or -EFAULT.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_to_user(struct kfifo *fifo,
- void __user *to, unsigned int len)
+int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned *lenout)
{
+ int ret;
len = min(kfifo_len(fifo), len);
- len -= __kfifo_to_user_data(fifo, to, len, 0);
- __kfifo_add_out(fifo, len);
- return len;
+ ret = __kfifo_to_user_data(fifo, to, len, 0, lenout);
+ __kfifo_add_out(fifo, *lenout);
+ return ret;
}
EXPORT_SYMBOL(kfifo_to_user);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 2eb517e2351..761fdd2b303 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -583,6 +583,9 @@ static void kgdb_wait(struct pt_regs *regs)
smp_wmb();
atomic_set(&cpu_in_kgdb[cpu], 1);
+ /* Disable any cpu specific hw breakpoints */
+ kgdb_disable_hw_debug(regs);
+
/* Wait till primary CPU is done with debugging */
while (atomic_read(&passive_cpu_wait[cpu]))
cpu_relax();
@@ -596,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs)
/* Signal the primary CPU that we are done: */
atomic_set(&cpu_in_kgdb[cpu], 0);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
}
@@ -1450,7 +1453,7 @@ acquirelock:
(kgdb_info[cpu].task &&
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
@@ -1550,7 +1553,7 @@ kgdb_restore:
}
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 25b10319036..bf0e231d970 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -520,13 +520,15 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
return -ENOMEM;
ret = call_usermodehelper_stdinpipe(sub_info, filp);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ call_usermodehelper_freeinfo(sub_info);
+ return ret;
+ }
- return call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+ ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+ if (ret < 0) /* Failed to execute helper, close pipe */
+ filp_close(*filp, NULL);
- out:
- call_usermodehelper_freeinfo(sub_info);
return ret;
}
EXPORT_SYMBOL(call_usermodehelper_pipe);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5feaddcdbe4..c62ec14609b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2147,7 +2147,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
return ret;
return print_irq_inversion_bug(curr, &root, target_entry,
- this, 1, irqclass);
+ this, 0, irqclass);
}
void print_irqtrace_events(struct task_struct *curr)
diff --git a/kernel/module.c b/kernel/module.c
index e96b8ed1cb6..f82386bd9ee 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1010,6 +1010,12 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
* J. Corbet <corbet@lwn.net>
*/
#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
+
+static inline bool sect_empty(const Elf_Shdr *sect)
+{
+ return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+}
+
struct module_sect_attr
{
struct module_attribute mattr;
@@ -1051,8 +1057,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
- if (sechdrs[i].sh_flags & SHF_ALLOC
- && sechdrs[i].sh_size)
+ if (!sect_empty(&sechdrs[i]))
nloaded++;
size[0] = ALIGN(sizeof(*sect_attrs)
+ nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1070,9 +1075,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
sattr = &sect_attrs->attrs[0];
gattr = &sect_attrs->grp.attrs[0];
for (i = 0; i < nsect; i++) {
- if (! (sechdrs[i].sh_flags & SHF_ALLOC))
- continue;
- if (!sechdrs[i].sh_size)
+ if (sect_empty(&sechdrs[i]))
continue;
sattr->address = sechdrs[i].sh_addr;
sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
@@ -1156,7 +1159,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
/* Count notes sections and allocate structures. */
notes = 0;
for (i = 0; i < nsect; i++)
- if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
+ if (!sect_empty(&sechdrs[i]) &&
(sechdrs[i].sh_type == SHT_NOTE))
++notes;
@@ -1172,7 +1175,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
notes_attrs->notes = notes;
nattr = &notes_attrs->attrs[0];
for (loaded = i = 0; i < nsect; ++i) {
- if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ if (sect_empty(&sechdrs[i]))
continue;
if (sechdrs[i].sh_type == SHT_NOTE) {
nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
diff --git a/kernel/panic.c b/kernel/panic.c
index 5827f7b9725..c787333282b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -75,7 +75,6 @@ NORET_TYPE void panic(const char * fmt, ...)
dump_stack();
#endif
- kmsg_dump(KMSG_DUMP_PANIC);
/*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
@@ -83,6 +82,8 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
crash_kexec(NULL);
+ kmsg_dump(KMSG_DUMP_PANIC);
+
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 603c0d8b5df..2ae7409bf38 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
- task_event->event_id.time = perf_clock();
-
perf_output_put(&handle, task_event->event_id);