aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/async.c18
-rw-r--r--kernel/audit.c9
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/auditfilter.c16
-rw-r--r--kernel/auditsc.c34
-rw-r--r--kernel/cgroup.c435
-rw-r--r--kernel/cgroup_debug.c2
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c254
-rw-r--r--kernel/exec_domain.c23
-rw-r--r--kernel/exit.c250
-rw-r--r--kernel/extable.c29
-rw-r--r--kernel/fork.c72
-rw-r--r--kernel/futex.c201
-rw-r--r--kernel/hrtimer.c55
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c7
-rw-r--r--kernel/irq/devres.c16
-rw-r--r--kernel/irq/handle.c86
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c412
-rw-r--r--kernel/irq/numa_migrate.c11
-rw-r--r--kernel/irq/pm.c79
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kallsyms.c19
-rw-r--r--kernel/kexec.c22
-rw-r--r--kernel/kmod.c12
-rw-r--r--kernel/kprobes.c281
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/latencytop.c83
-rw-r--r--kernel/lockdep.c26
-rw-r--r--kernel/module.c296
-rw-r--r--kernel/ns_cgroup.c14
-rw-r--r--kernel/panic.c115
-rw-r--r--kernel/params.c26
-rw-r--r--kernel/pid.c33
-rw-r--r--kernel/pid_namespace.c15
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/power/disk.c143
-rw-r--r--kernel/power/main.c55
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/power/swsusp.c18
-rw-r--r--kernel/printk.c26
-rw-r--r--kernel/ptrace.c103
-rw-r--r--kernel/rcuclassic.c23
-rw-r--r--kernel/rcupdate.c44
-rw-r--r--kernel/rcupreempt.c48
-rw-r--r--kernel/rcutorture.c25
-rw-r--r--kernel/rcutree.c20
-rw-r--r--kernel/rcutree.h10
-rw-r--r--kernel/rcutree_trace.c2
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sched.c1063
-rw-r--r--kernel/sched_clock.c31
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_fair.c59
-rw-r--r--kernel/sched_features.h3
-rw-r--r--kernel/sched_rt.c537
-rw-r--r--kernel/sched_stats.h7
-rw-r--r--kernel/signal.c71
-rw-r--r--kernel/slow-work.c640
-rw-r--r--kernel/smp.c432
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/spinlock.c18
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c5
-rw-r--r--kernel/sysctl.c51
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c20
-rw-r--r--kernel/time/clocksource.c76
-rw-r--r--kernel/time/ntp.c444
-rw-r--r--kernel/time/timecompare.c191
-rw-r--r--kernel/timer.c110
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/kmemtrace.c319
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/utsname_sysctl.c2
-rw-r--r--kernel/workqueue.c47
84 files changed, 5398 insertions, 2290 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index e4791b3ba55..bab1dffe37e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
+obj-$(CONFIG_SLOW_WORK) += slow-work.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/async.c b/kernel/async.c
index f565891f2c9..968ef9457d4 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -49,6 +49,7 @@ asynchronous and synchronous parts of the kernel.
*/
#include <linux/async.h>
+#include <linux/bug.h>
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/sched.h>
@@ -387,20 +388,11 @@ static int async_manager_thread(void *unused)
static int __init async_init(void)
{
- if (async_enabled)
- if (IS_ERR(kthread_run(async_manager_thread, NULL,
- "async/mgr")))
- async_enabled = 0;
- return 0;
-}
+ async_enabled =
+ !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
-static int __init setup_async(char *str)
-{
- async_enabled = 1;
- return 1;
+ WARN_ON(!async_enabled);
+ return 0;
}
-__setup("fastboot", setup_async);
-
-
core_initcall(async_init);
diff --git a/kernel/audit.c b/kernel/audit.c
index ce6d8ea3131..9442c3533ba 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -766,6 +766,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
audit_log_format(ab, " msg=");
size = nlmsg_len(nlh);
+ if (size > 0 &&
+ ((unsigned char *)data)[size - 1] == '\0')
+ size--;
audit_log_n_untrustedstring(ab, data, size);
}
audit_set_pid(ab, pid);
@@ -1382,7 +1385,7 @@ void audit_log_n_string(struct audit_buffer *ab, const char *string,
int audit_string_contains_control(const char *string, size_t len)
{
const unsigned char *p;
- for (p = string; p < (const unsigned char *)string + len && *p; p++) {
+ for (p = string; p < (const unsigned char *)string + len; p++) {
if (*p == '"' || *p < 0x21 || *p > 0x7e)
return 1;
}
@@ -1437,13 +1440,13 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
/* We will allow 11 spaces for ' (deleted)' to be appended */
pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
if (!pathname) {
- audit_log_format(ab, "<no memory>");
+ audit_log_string(ab, "<no_memory>");
return;
}
p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
- audit_log_format(ab, "<too long>");
+ audit_log_string(ab, "<too_long>");
} else
audit_log_untrustedstring(ab, p);
kfree(pathname);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 8ad9545b8db..917ab952556 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -385,6 +385,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
mutex_lock(&inode->inotify_mutex);
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
mutex_unlock(&inode->inotify_mutex);
+ put_inotify_watch(&old->watch);
free_chunk(chunk);
return -ENOSPC;
}
@@ -394,6 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk->dead = 1;
inotify_evict_watch(&chunk->watch);
mutex_unlock(&inode->inotify_mutex);
+ put_inotify_watch(&old->watch);
put_inotify_watch(&chunk->watch);
return 0;
}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index fbf24d121d9..a6fe71fd5d1 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -135,18 +135,18 @@ static void audit_remove_watch(struct audit_watch *watch)
static inline void audit_free_rule(struct audit_entry *e)
{
int i;
-
+ struct audit_krule *erule = &e->rule;
/* some rules don't have associated watches */
- if (e->rule.watch)
- audit_put_watch(e->rule.watch);
- if (e->rule.fields)
- for (i = 0; i < e->rule.field_count; i++) {
- struct audit_field *f = &e->rule.fields[i];
+ if (erule->watch)
+ audit_put_watch(erule->watch);
+ if (erule->fields)
+ for (i = 0; i < erule->field_count; i++) {
+ struct audit_field *f = &erule->fields[i];
kfree(f->lsm_str);
security_audit_rule_free(f->lsm_rule);
}
- kfree(e->rule.fields);
- kfree(e->rule.filterkey);
+ kfree(erule->fields);
+ kfree(erule->filterkey);
kfree(e);
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 8cbddff6c28..7d6ac7c1f41 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -66,6 +66,7 @@
#include <linux/syscalls.h>
#include <linux/inotify.h>
#include <linux/capability.h>
+#include <linux/fs_struct.h>
#include "audit.h"
@@ -328,6 +329,14 @@ static int audit_match_filetype(struct audit_context *ctx, int which)
*/
#ifdef CONFIG_AUDIT_TREE
+static void audit_set_auditable(struct audit_context *ctx)
+{
+ if (!ctx->prio) {
+ ctx->prio = 1;
+ ctx->current_state = AUDIT_RECORD_CONTEXT;
+ }
+}
+
static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
{
struct audit_tree_refs *p = ctx->trees;
@@ -741,17 +750,9 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
rcu_read_unlock();
}
-static void audit_set_auditable(struct audit_context *ctx)
-{
- if (!ctx->prio) {
- ctx->prio = 1;
- ctx->current_state = AUDIT_RECORD_CONTEXT;
- }
-}
-
static inline struct audit_context *audit_get_context(struct task_struct *tsk,
int return_valid,
- int return_code)
+ long return_code)
{
struct audit_context *context = tsk->audit_context;
@@ -1023,7 +1024,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
{
char arg_num_len_buf[12];
const char __user *tmp_p = p;
- /* how many digits are in arg_num? 3 is the length of a=\n */
+ /* how many digits are in arg_num? 3 is the length of " a=" */
size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 3;
size_t len, len_left, to_send;
size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
@@ -1109,7 +1110,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
* so we can be sure nothing was lost.
*/
if ((i == 0) && (too_long))
- audit_log_format(*ab, "a%d_len=%zu ", arg_num,
+ audit_log_format(*ab, " a%d_len=%zu", arg_num,
has_cntl ? 2*len : len);
/*
@@ -1129,7 +1130,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
buf[to_send] = '\0';
/* actually log it */
- audit_log_format(*ab, "a%d", arg_num);
+ audit_log_format(*ab, " a%d", arg_num);
if (too_long)
audit_log_format(*ab, "[%d]", i);
audit_log_format(*ab, "=");
@@ -1137,7 +1138,6 @@ static int audit_log_single_execve_arg(struct audit_context *context,
audit_log_n_hex(*ab, buf, to_send);
else
audit_log_format(*ab, "\"%s\"", buf);
- audit_log_format(*ab, "\n");
p += to_send;
len_left -= to_send;
@@ -1165,7 +1165,7 @@ static void audit_log_execve_info(struct audit_context *context,
p = (const char __user *)axi->mm->arg_start;
- audit_log_format(*ab, "argc=%d ", axi->argc);
+ audit_log_format(*ab, "argc=%d", axi->argc);
/*
* we need some kernel buffer to hold the userspace args. Just
@@ -1478,7 +1478,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
case 0:
/* name was specified as a relative path and the
* directory component is the cwd */
- audit_log_d_path(ab, " name=", &context->pwd);
+ audit_log_d_path(ab, "name=", &context->pwd);
break;
default:
/* log the name's directory component */
@@ -2149,7 +2149,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
* __audit_mq_open - record audit data for a POSIX MQ open
* @oflag: open flag
* @mode: mode bits
- * @u_attr: queue attributes
+ * @attr: queue attributes
*
*/
void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
@@ -2196,7 +2196,7 @@ void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
/**
* __audit_mq_notify - record audit data for a POSIX MQ notify
* @mqdes: MQ descriptor
- * @u_notification: Notification event
+ * @notification: Notification event
*
*/
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4b79b..382109b5bae 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -94,7 +94,6 @@ struct cgroupfs_root {
char release_agent_path[PATH_MAX];
};
-
/*
* The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
* subsystems that are otherwise unattached - it never has more than a
@@ -102,6 +101,39 @@ struct cgroupfs_root {
*/
static struct cgroupfs_root rootnode;
+/*
+ * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
+ * cgroup_subsys->use_id != 0.
+ */
+#define CSS_ID_MAX (65535)
+struct css_id {
+ /*
+ * The css to which this ID points. This pointer is set to valid value
+ * after cgroup is populated. If cgroup is removed, this will be NULL.
+ * This pointer is expected to be RCU-safe because destroy()
+ * is called after synchronize_rcu(). But for safe use, css_is_removed()
+ * css_tryget() should be used for avoiding race.
+ */
+ struct cgroup_subsys_state *css;
+ /*
+ * ID of this css.
+ */
+ unsigned short id;
+ /*
+ * Depth in hierarchy which this ID belongs to.
+ */
+ unsigned short depth;
+ /*
+ * ID is freed by RCU. (and lookup routine is RCU safe.)
+ */
+ struct rcu_head rcu_head;
+ /*
+ * Hierarchy of CSS ID belongs to.
+ */
+ unsigned short stack[0]; /* Array of Length (depth+1) */
+};
+
+
/* The list of hierarchy roots */
static LIST_HEAD(roots);
@@ -185,6 +217,8 @@ struct cg_cgroup_link {
static struct css_set init_css_set;
static struct cg_cgroup_link init_css_set_link;
+static int cgroup_subsys_init_idr(struct cgroup_subsys *ss);
+
/* css_set_lock protects the list of css_set objects, and the
* chain of tasks off each css_set. Nests outside task->alloc_lock
* due to cgroup_iter_start() */
@@ -567,6 +601,9 @@ static struct backing_dev_info cgroup_backing_dev_info = {
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
+static int alloc_css_id(struct cgroup_subsys *ss,
+ struct cgroup *parent, struct cgroup *child);
+
static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
{
struct inode *inode = new_inode(sb);
@@ -585,13 +622,18 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
* Call subsys's pre_destroy handler.
* This is called before css refcnt check.
*/
-static void cgroup_call_pre_destroy(struct cgroup *cgrp)
+static int cgroup_call_pre_destroy(struct cgroup *cgrp)
{
struct cgroup_subsys *ss;
+ int ret = 0;
+
for_each_subsys(cgrp->root, ss)
- if (ss->pre_destroy)
- ss->pre_destroy(ss, cgrp);
- return;
+ if (ss->pre_destroy) {
+ ret = ss->pre_destroy(ss, cgrp);
+ if (ret)
+ break;
+ }
+ return ret;
}
static void free_cgroup_rcu(struct rcu_head *obj)
@@ -685,6 +727,22 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
remove_dir(dentry);
}
+/*
+ * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
+ * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
+ * reference to css->refcnt. In general, this refcnt is expected to goes down
+ * to zero, soon.
+ *
+ * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex;
+ */
+DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+
+static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
+{
+ if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
+ wake_up_all(&cgroup_rmdir_waitq);
+}
+
static int rebind_subsystems(struct cgroupfs_root *root,
unsigned long final_bits)
{
@@ -857,16 +915,16 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
}
ret = rebind_subsystems(root, opts.subsys_bits);
+ if (ret)
+ goto out_unlock;
/* (re)populate subsystem files */
- if (!ret)
- cgroup_populate_dir(cgrp);
+ cgroup_populate_dir(cgrp);
if (opts.release_agent)
strcpy(root->release_agent_path, opts.release_agent);
out_unlock:
- if (opts.release_agent)
- kfree(opts.release_agent);
+ kfree(opts.release_agent);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return ret;
@@ -969,15 +1027,13 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
/* First find the desired set of subsystems */
ret = parse_cgroupfs_options(data, &opts);
if (ret) {
- if (opts.release_agent)
- kfree(opts.release_agent);
+ kfree(opts.release_agent);
return ret;
}
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root) {
- if (opts.release_agent)
- kfree(opts.release_agent);
+ kfree(opts.release_agent);
return -ENOMEM;
}
@@ -1071,7 +1127,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
mutex_unlock(&cgroup_mutex);
}
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
free_cg_links:
free_cg_links(&tmp_cg_links);
@@ -1279,6 +1336,12 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
synchronize_rcu();
put_css_set(cg);
+
+ /*
+ * wake up rmdir() waiter. the rmdir should fail since the cgroup
+ * is no longer empty.
+ */
+ cgroup_wakeup_rmdir_waiters(cgrp);
return 0;
}
@@ -1624,10 +1687,10 @@ static struct inode_operations cgroup_dir_inode_operations = {
.rename = cgroup_rename,
};
-static int cgroup_create_file(struct dentry *dentry, int mode,
+static int cgroup_create_file(struct dentry *dentry, mode_t mode,
struct super_block *sb)
{
- static struct dentry_operations cgroup_dops = {
+ static const struct dentry_operations cgroup_dops = {
.d_iput = cgroup_diput,
};
@@ -1670,7 +1733,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode,
* @mode: mode to set on new directory.
*/
static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
- int mode)
+ mode_t mode)
{
struct dentry *parent;
int error = 0;
@@ -1688,6 +1751,33 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
return error;
}
+/**
+ * cgroup_file_mode - deduce file mode of a control file
+ * @cft: the control file in question
+ *
+ * returns cft->mode if ->mode is not 0
+ * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
+ * returns S_IRUGO if it has only a read handler
+ * returns S_IWUSR if it has only a write hander
+ */
+static mode_t cgroup_file_mode(const struct cftype *cft)
+{
+ mode_t mode = 0;
+
+ if (cft->mode)
+ return cft->mode;
+
+ if (cft->read || cft->read_u64 || cft->read_s64 ||
+ cft->read_map || cft->read_seq_string)
+ mode |= S_IRUGO;
+
+ if (cft->write || cft->write_u64 || cft->write_s64 ||
+ cft->write_string || cft->trigger)
+ mode |= S_IWUSR;
+
+ return mode;
+}
+
int cgroup_add_file(struct cgroup *cgrp,
struct cgroup_subsys *subsys,
const struct cftype *cft)
@@ -1695,6 +1785,7 @@ int cgroup_add_file(struct cgroup *cgrp,
struct dentry *dir = cgrp->dentry;
struct dentry *dentry;
int error;
+ mode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
@@ -1705,7 +1796,8 @@ int cgroup_add_file(struct cgroup *cgrp,
BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
dentry = lookup_one_len(name, dir, strlen(name));
if (!IS_ERR(dentry)) {
- error = cgroup_create_file(dentry, 0644 | S_IFREG,
+ mode = cgroup_file_mode(cft);
+ error = cgroup_create_file(dentry, mode | S_IFREG,
cgrp->root->sb);
if (!error)
dentry->d_fsdata = (void *)cft;
@@ -2287,6 +2379,7 @@ static struct cftype files[] = {
.write_u64 = cgroup_tasks_write,
.release = cgroup_tasks_release,
.private = FILE_TASKLIST,
+ .mode = S_IRUGO | S_IWUSR,
},
{
@@ -2326,6 +2419,17 @@ static int cgroup_populate_dir(struct cgroup *cgrp)
if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
return err;
}
+ /* This cgroup is ready now */
+ for_each_subsys(cgrp->root, ss) {
+ struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ /*
+ * Update id->css pointer and make this css visible from
+ * CSS ID functions. This pointer will be dereferened
+ * from RCU-read-side without locks.
+ */
+ if (css->id)
+ rcu_assign_pointer(css->id->css, css);
+ }
return 0;
}
@@ -2337,6 +2441,7 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
css->cgroup = cgrp;
atomic_set(&css->refcnt, 1);
css->flags = 0;
+ css->id = NULL;
if (cgrp == dummytop)
set_bit(CSS_ROOT, &css->flags);
BUG_ON(cgrp->subsys[ss->subsys_id]);
@@ -2375,7 +2480,7 @@ static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
* Must be called with the mutex on the parent inode held
*/
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
- int mode)
+ mode_t mode)
{
struct cgroup *cgrp;
struct cgroupfs_root *root = parent->root;
@@ -2412,6 +2517,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_destroy;
}
init_cgroup_css(css, ss, cgrp);
+ if (ss->use_id)
+ if (alloc_css_id(ss, parent, cgrp))
+ goto err_destroy;
+ /* At error, ->destroy() callback has to free assigned ID. */
}
cgroup_lock_hierarchy(root);
@@ -2554,9 +2663,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
struct cgroup *cgrp = dentry->d_fsdata;
struct dentry *d;
struct cgroup *parent;
+ DEFINE_WAIT(wait);
+ int ret;
/* the vfs holds both inode->i_mutex already */
-
+again:
mutex_lock(&cgroup_mutex);
if (atomic_read(&cgrp->count) != 0) {
mutex_unlock(&cgroup_mutex);
@@ -2572,17 +2683,39 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
* Call pre_destroy handlers of subsys. Notify subsystems
* that rmdir() request comes.
*/
- cgroup_call_pre_destroy(cgrp);
+ ret = cgroup_call_pre_destroy(cgrp);
+ if (ret)
+ return ret;
mutex_lock(&cgroup_mutex);
parent = cgrp->parent;
-
- if (atomic_read(&cgrp->count)
- || !list_empty(&cgrp->children)
- || !cgroup_clear_css_refs(cgrp)) {
+ if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
mutex_unlock(&cgroup_mutex);
return -EBUSY;
}
+ /*
+ * css_put/get is provided for subsys to grab refcnt to css. In typical
+ * case, subsystem has no reference after pre_destroy(). But, under
+ * hierarchy management, some *temporal* refcnt can be hold.
+ * To avoid returning -EBUSY to a user, waitqueue is used. If subsys
+ * is really busy, it should return -EBUSY at pre_destroy(). wake_up
+ * is called when css_put() is called and refcnt goes down to 0.
+ */
+ set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
+ prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ if (!cgroup_clear_css_refs(cgrp)) {
+ mutex_unlock(&cgroup_mutex);
+ schedule();
+ finish_wait(&cgroup_rmdir_waitq, &wait);
+ clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
+ if (signal_pending(current))
+ return -EINTR;
+ goto again;
+ }
+ /* NO css_tryget() can success after here. */
+ finish_wait(&cgroup_rmdir_waitq, &wait);
+ clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
spin_lock(&release_list_lock);
set_bit(CGRP_REMOVED, &cgrp->flags);
@@ -2707,6 +2840,8 @@ int __init cgroup_init(void)
struct cgroup_subsys *ss = subsys[i];
if (!ss->early_init)
cgroup_init_subsys(ss);
+ if (ss->use_id)
+ cgroup_subsys_init_idr(ss);
}
/* Add init_css_set to the hash table */
@@ -3083,18 +3218,19 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
}
/**
- * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp
+ * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
* @cgrp: the cgroup in question
+ * @task: the task in question
*
- * See if @cgrp is a descendant of the current task's cgroup in
- * the appropriate hierarchy.
+ * See if @cgrp is a descendant of @task's cgroup in the appropriate
+ * hierarchy.
*
* If we are sending in dummytop, then presumably we are creating
* the top cgroup in the subsystem.
*
* Called only by the ns (nsproxy) cgroup.
*/
-int cgroup_is_descendant(const struct cgroup *cgrp)
+int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
{
int ret;
struct cgroup *target;
@@ -3104,7 +3240,7 @@ int cgroup_is_descendant(const struct cgroup *cgrp)
return 1;
get_first_subsys(cgrp, NULL, &subsys_id);
- target = task_cgroup(current, subsys_id);
+ target = task_cgroup(task, subsys_id);
while (cgrp != target && cgrp!= cgrp->top_cgroup)
cgrp = cgrp->parent;
ret = (cgrp == target);
@@ -3137,10 +3273,12 @@ void __css_put(struct cgroup_subsys_state *css)
{
struct cgroup *cgrp = css->cgroup;
rcu_read_lock();
- if ((atomic_dec_return(&css->refcnt) == 1) &&
- notify_on_release(cgrp)) {
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
- check_for_release(cgrp);
+ if (atomic_dec_return(&css->refcnt) == 1) {
+ if (notify_on_release(cgrp)) {
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ check_for_release(cgrp);
+ }
+ cgroup_wakeup_rmdir_waiters(cgrp);
}
rcu_read_unlock();
}
@@ -3240,3 +3378,232 @@ static int __init cgroup_disable(char *str)
return 1;
}
__setup("cgroup_disable=", cgroup_disable);
+
+/*
+ * Functons for CSS ID.
+ */
+
+/*
+ *To get ID other than 0, this should be called when !cgroup_is_removed().
+ */
+unsigned short css_id(struct cgroup_subsys_state *css)
+{
+ struct css_id *cssid = rcu_dereference(css->id);
+
+ if (cssid)
+ return cssid->id;
+ return 0;
+}
+
+unsigned short css_depth(struct cgroup_subsys_state *css)
+{
+ struct css_id *cssid = rcu_dereference(css->id);
+
+ if (cssid)
+ return cssid->depth;
+ return 0;
+}
+
+bool css_is_ancestor(struct cgroup_subsys_state *child,
+ const struct cgroup_subsys_state *root)
+{
+ struct css_id *child_id = rcu_dereference(child->id);
+ struct css_id *root_id = rcu_dereference(root->id);
+
+ if (!child_id || !root_id || (child_id->depth < root_id->depth))
+ return false;
+ return child_id->stack[root_id->depth] == root_id->id;
+}
+
+static void __free_css_id_cb(struct rcu_head *head)
+{
+ struct css_id *id;
+
+ id = container_of(head, struct css_id, rcu_head);
+ kfree(id);
+}
+
+void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
+{
+ struct css_id *id = css->id;
+ /* When this is called before css_id initialization, id can be NULL */
+ if (!id)
+ return;
+
+ BUG_ON(!ss->use_id);
+
+ rcu_assign_pointer(id->css, NULL);
+ rcu_assign_pointer(css->id, NULL);
+ spin_lock(&ss->id_lock);
+ idr_remove(&ss->idr, id->id);
+ spin_unlock(&ss->id_lock);
+ call_rcu(&id->rcu_head, __free_css_id_cb);
+}
+
+/*
+ * This is called by init or create(). Then, calls to this function are
+ * always serialized (By cgroup_mutex() at create()).
+ */
+
+static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
+{
+ struct css_id *newid;
+ int myid, error, size;
+
+ BUG_ON(!ss->use_id);
+
+ size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
+ newid = kzalloc(size, GFP_KERNEL);
+ if (!newid)
+ return ERR_PTR(-ENOMEM);
+ /* get id */
+ if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+ spin_lock(&ss->id_lock);
+ /* Don't use 0. allocates an ID of 1-65535 */
+ error = idr_get_new_above(&ss->idr, newid, 1, &myid);
+ spin_unlock(&ss->id_lock);
+
+ /* Returns error when there are no free spaces for new ID.*/
+ if (error) {
+ error = -ENOSPC;
+ goto err_out;
+ }
+ if (myid > CSS_ID_MAX)
+ goto remove_idr;
+
+ newid->id = myid;
+ newid->depth = depth;
+ return newid;
+remove_idr:
+ error = -ENOSPC;
+ spin_lock(&ss->id_lock);
+ idr_remove(&ss->idr, myid);
+ spin_unlock(&ss->id_lock);
+err_out:
+ kfree(newid);
+ return ERR_PTR(error);
+
+}
+
+static int __init cgroup_subsys_init_idr(struct cgroup_subsys *ss)
+{
+ struct css_id *newid;
+ struct cgroup_subsys_state *rootcss;
+
+ spin_lock_init(&ss->id_lock);
+ idr_init(&ss->idr);
+
+ rootcss = init_css_set.subsys[ss->subsys_id];
+ newid = get_new_cssid(ss, 0);
+ if (IS_ERR(newid))
+ return PTR_ERR(newid);
+
+ newid->stack[0] = newid->id;
+ newid->css = rootcss;
+ rootcss->id = newid;
+ return 0;
+}
+
+static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
+ struct cgroup *child)
+{
+ int subsys_id, i, depth = 0;
+ struct cgroup_subsys_state *parent_css, *child_css;
+ struct css_id *child_id, *parent_id = NULL;
+
+ subsys_id = ss->subsys_id;
+ parent_css = parent->subsys[subsys_id];
+ child_css = child->subsys[subsys_id];
+ depth = css_depth(parent_css) + 1;
+ parent_id = parent_css->id;
+
+ child_id = get_new_cssid(ss, depth);
+ if (IS_ERR(child_id))
+ return PTR_ERR(child_id);
+
+ for (i = 0; i < depth; i++)
+ child_id->stack[i] = parent_id->stack[i];
+ child_id->stack[depth] = child_id->id;
+ /*
+ * child_id->css pointer will be set after this cgroup is available
+ * see cgroup_populate_dir()
+ */
+ rcu_assign_pointer(child_css->id, child_id);
+
+ return 0;
+}
+
+/**
+ * css_lookup - lookup css by id
+ * @ss: cgroup subsys to be looked into.
+ * @id: the id
+ *
+ * Returns pointer to cgroup_subsys_state if there is valid one with id.
+ * NULL if not. Should be called under rcu_read_lock()
+ */
+struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
+{
+ struct css_id *cssid = NULL;
+
+ BUG_ON(!ss->use_id);
+ cssid = idr_find(&ss->idr, id);
+
+ if (unlikely(!cssid))
+ return NULL;
+
+ return rcu_dereference(cssid->css);
+}
+
+/**
+ * css_get_next - lookup next cgroup under specified hierarchy.
+ * @ss: pointer to subsystem
+ * @id: current position of iteration.
+ * @root: pointer to css. search tree under this.
+ * @foundid: position of found object.
+ *
+ * Search next css under the specified hierarchy of rootid. Calling under
+ * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
+ */
+struct cgroup_subsys_state *
+css_get_next(struct cgroup_subsys *ss, int id,
+ struct cgroup_subsys_state *root, int *foundid)
+{
+ struct cgroup_subsys_state *ret = NULL;
+ struct css_id *tmp;
+ int tmpid;
+ int rootid = css_id(root);
+ int depth = css_depth(root);
+
+ if (!rootid)
+ return NULL;
+
+ BUG_ON(!ss->use_id);
+ /* fill start point for scan */
+ tmpid = id;
+ while (1) {
+ /*
+ * scan next entry from bitmap(tree), tmpid is updated after
+ * idr_get_next().
+ */
+ spin_lock(&ss->id_lock);
+ tmp = idr_get_next(&ss->idr, &tmpid);
+ spin_unlock(&ss->id_lock);
+
+ if (!tmp)
+ break;
+ if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
+ ret = rcu_dereference(tmp->css);
+ if (ret) {
+ *foundid = tmpid;
+ break;
+ }
+ }
+ /* continue to scan from next id */
+ tmpid = tmpid + 1;
+ }
+ return ret;
+}
+
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
index daca6209202..0c92d797baa 100644
--- a/kernel/cgroup_debug.c
+++ b/kernel/cgroup_debug.c
@@ -40,9 +40,7 @@ static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
{
u64 count;
- cgroup_lock();
count = cgroup_task_count(cont);
- cgroup_unlock();
return count;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 79e40f00dcb..395b6974dc8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu)
goto out;
}
- cpu_clear(cpu, cpu_active_map);
+ set_cpu_active(cpu, false);
/*
* Make sure the all cpus did the reschedule and are not
@@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu)
err = _cpu_down(cpu, 0);
if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
out:
cpu_maps_update_done();
@@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
BUG_ON(!cpu_online(cpu));
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
/* Now call notifier in preparation. */
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f76db9dcaa0..026faccca86 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -128,10 +128,6 @@ static inline struct cpuset *task_cs(struct task_struct *task)
return container_of(task_subsys_state(task, cpuset_subsys_id),
struct cpuset, css);
}
-struct cpuset_hotplug_scanner {
- struct cgroup_scanner scan;
- struct cgroup *to;
-};
/* bits in struct cpuset flags field */
typedef enum {
@@ -521,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
return 0;
}
+#ifdef CONFIG_SMP
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping cpus_allowed masks?
@@ -815,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
put_online_cpus();
}
+#else /* !CONFIG_SMP */
+static void do_rebuild_sched_domains(struct work_struct *unused)
+{
+}
+
+static int generate_sched_domains(struct cpumask **domains,
+ struct sched_domain_attr **attributes)
+{
+ *domains = NULL;
+ return 1;
+}
+#endif /* CONFIG_SMP */
static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
@@ -1026,101 +1035,70 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
mutex_unlock(&callback_mutex);
}
+/*
+ * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new
+ * nodes if memory_migrate flag is set. Called with cgroup_mutex held.
+ */
+static void cpuset_change_nodemask(struct task_struct *p,
+ struct cgroup_scanner *scan)
+{
+ struct mm_struct *mm;
+ struct cpuset *cs;
+ int migrate;
+ const nodemask_t *oldmem = scan->data;
+
+ mm = get_task_mm(p);
+ if (!mm)
+ return;
+
+ cs = cgroup_cs(scan->cg);
+ migrate = is_memory_migrate(cs);
+
+ mpol_rebind_mm(mm, &cs->mems_allowed);
+ if (migrate)
+ cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
+ mmput(mm);
+}
+
static void *cpuset_being_rebound;
/**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
* @oldmem: old mems_allowed of cpuset cs
+ * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
*
* Called with cgroup_mutex held
- * Return 0 if successful, -errno if not.
+ * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * if @heap != NULL.
*/
-static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
+static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
+ struct ptr_heap *heap)
{
- struct task_struct *p;
- struct mm_struct **mmarray;
- int i, n, ntasks;
- int migrate;
- int fudge;
- struct cgroup_iter it;
- int retval;
+ struct cgroup_scanner scan;
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
- fudge = 10; /* spare mmarray[] slots */
- fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
- retval = -ENOMEM;
-
- /*
- * Allocate mmarray[] to hold mm reference for each task
- * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
- * tasklist_lock. We could use GFP_ATOMIC, but with a
- * few more lines of code, we can retry until we get a big
- * enough mmarray[] w/o using GFP_ATOMIC.
- */
- while (1) {
- ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
- ntasks += fudge;
- mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
- if (!mmarray)
- goto done;
- read_lock(&tasklist_lock); /* block fork */
- if (cgroup_task_count(cs->css.cgroup) <= ntasks)
- break; /* got enough */
- read_unlock(&tasklist_lock); /* try again */
- kfree(mmarray);
- }
-
- n = 0;
-
- /* Load up mmarray[] with mm reference for each task in cpuset. */
- cgroup_iter_start(cs->css.cgroup, &it);
- while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
- struct mm_struct *mm;
-
- if (n >= ntasks) {
- printk(KERN_WARNING
- "Cpuset mempolicy rebind incomplete.\n");
- break;
- }
- mm = get_task_mm(p);
- if (!mm)
- continue;
- mmarray[n++] = mm;
- }
- cgroup_iter_end(cs->css.cgroup, &it);
- read_unlock(&tasklist_lock);
+ scan.cg = cs->css.cgroup;
+ scan.test_task = NULL;
+ scan.process_task = cpuset_change_nodemask;
+ scan.heap = heap;
+ scan.data = (nodemask_t *)oldmem;
/*
- * Now that we've dropped the tasklist spinlock, we can
- * rebind the vma mempolicies of each mm in mmarray[] to their
- * new cpuset, and release that mm. The mpol_rebind_mm()
- * call takes mmap_sem, which we couldn't take while holding
- * tasklist_lock. Forks can happen again now - the mpol_dup()
- * cpuset_being_rebound check will catch such forks, and rebind
- * their vma mempolicies too. Because we still hold the global
- * cgroup_mutex, we know that no other rebind effort will
- * be contending for the global variable cpuset_being_rebound.
+ * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
+ * take while holding tasklist_lock. Forks can happen - the
+ * mpol_dup() cpuset_being_rebound check will catch such forks,
+ * and rebind their vma mempolicies too. Because we still hold
+ * the global cgroup_mutex, we know that no other rebind effort
+ * will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
- migrate = is_memory_migrate(cs);
- for (i = 0; i < n; i++) {
- struct mm_struct *mm = mmarray[i];
-
- mpol_rebind_mm(mm, &cs->mems_allowed);
- if (migrate)
- cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
- mmput(mm);
- }
+ cgroup_scan_tasks(&scan);
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
- kfree(mmarray);
cpuset_being_rebound = NULL;
- retval = 0;
-done:
- return retval;
}
/*
@@ -1141,6 +1119,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
{
nodemask_t oldmem;
int retval;
+ struct ptr_heap heap;
/*
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
@@ -1175,12 +1154,18 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;
+ retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
+ if (retval < 0)
+ goto done;
+
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs->mems_allowed;
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
- retval = update_tasks_nodemask(cs, &oldmem);
+ update_tasks_nodemask(cs, &oldmem, &heap);
+
+ heap_free(&heap);
done:
return retval;
}
@@ -1192,8 +1177,10 @@ int current_cpuset_is_being_rebound(void)
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
+#ifdef CONFIG_SMP
if (val < -1 || val >= SD_LV_MAX)
return -EINVAL;
+#endif
if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
@@ -1355,19 +1342,22 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
struct cgroup *cont, struct task_struct *tsk)
{
struct cpuset *cs = cgroup_cs(cont);
- int ret = 0;
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
- if (tsk->flags & PF_THREAD_BOUND) {
- mutex_lock(&callback_mutex);
- if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
- ret = -EINVAL;
- mutex_unlock(&callback_mutex);
- }
+ /*
+ * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
+ * cannot change their cpu affinity and isolating such threads by their
+ * set of allowed nodes is unnecessary. Thus, cpusets are not
+ * applicable for such threads. This prevents checking for success of
+ * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
+ * be changed.
+ */
+ if (tsk->flags & PF_THREAD_BOUND)
+ return -EINVAL;
- return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL);
+ return security_task_setscheduler(tsk, 0, NULL);
}
static void cpuset_attach(struct cgroup_subsys *ss,
@@ -1706,6 +1696,7 @@ static struct cftype files[] = {
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE,
+ .mode = S_IRUGO,
},
{
@@ -1913,10 +1904,9 @@ int __init cpuset_init(void)
static void cpuset_do_move_task(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
- struct cpuset_hotplug_scanner *chsp;
+ struct cgroup *new_cgroup = scan->data;
- chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
- cgroup_attach_task(chsp->to, tsk);
+ cgroup_attach_task(new_cgroup, tsk);
}
/**
@@ -1932,15 +1922,15 @@ static void cpuset_do_move_task(struct task_struct *tsk,
*/
static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
{
- struct cpuset_hotplug_scanner scan;
+ struct cgroup_scanner scan;
- scan.scan.cg = from->css.cgroup;
- scan.scan.test_task = NULL; /* select all tasks in cgroup */
- scan.scan.process_task = cpuset_do_move_task;
- scan.scan.heap = NULL;
- scan.to = to->css.cgroup;
+ scan.cg = from->css.cgroup;
+ scan.test_task = NULL; /* select all tasks in cgroup */
+ scan.process_task = cpuset_do_move_task;
+ scan.heap = NULL;
+ scan.data = to->css.cgroup;
- if (cgroup_scan_tasks(&scan.scan))
+ if (cgroup_scan_tasks(&scan))
printk(KERN_ERR "move_member_tasks_to_cpuset: "
"cgroup_scan_tasks failed\n");
}
@@ -2033,7 +2023,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
remove_tasks_in_empty_cpuset(cp);
else {
update_tasks_cpumask(cp, NULL);
- update_tasks_nodemask(cp, &oldmems);
+ update_tasks_nodemask(cp, &oldmems, NULL);
}
}
}
@@ -2069,7 +2059,9 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
}
cgroup_lock();
+ mutex_lock(&callback_mutex);
cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
+ mutex_unlock(&callback_mutex);
scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr);
cgroup_unlock();
@@ -2092,11 +2084,12 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
cgroup_lock();
switch (action) {
case MEM_ONLINE:
- top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
- break;
case MEM_OFFLINE:
+ mutex_lock(&callback_mutex);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
- scan_for_empty_cpusets(&top_cpuset);
+ mutex_unlock(&callback_mutex);
+ if (action == MEM_OFFLINE)
+ scan_for_empty_cpusets(&top_cpuset);
break;
default:
break;
@@ -2206,26 +2199,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
}
/**
- * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
- * @z: is this zone on an allowed node?
+ * cpuset_node_allowed_softwall - Can we allocate on a memory node?
+ * @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
- * If we're in interrupt, yes, we can always allocate. If
- * __GFP_THISNODE is set, yes, we can always allocate. If zone
- * z's node is in our tasks mems_allowed, yes. If it's not a
- * __GFP_HARDWALL request and this zone's nodes is in the nearest
- * hardwalled cpuset ancestor to this tasks cpuset, yes.
- * If the task has been OOM killed and has access to memory reserves
- * as specified by the TIF_MEMDIE flag, yes.
+ * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
+ * set, yes, we can always allocate. If node is in our task's mems_allowed,
+ * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
+ * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
+ * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
+ * flag, yes.
* Otherwise, no.
*
- * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
- * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
- * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
- * from an enclosing cpuset.
+ * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
+ * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
+ * might sleep, and might allow a node from an enclosing cpuset.
*
- * cpuset_zone_allowed_hardwall() only handles the simpler case of
- * hardwall cpusets, and never sleeps.
+ * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
+ * cpusets, and never sleeps.
*
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
@@ -2264,20 +2255,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok.
*
* Rule:
- * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
+ * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
* pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
* the code that might scan up ancestor cpusets and sleep.
*/
-
-int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
+int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
- int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
- node = zone_to_nid(z);
might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed))
return 1;
@@ -2306,15 +2294,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
}
/*
- * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
- * @z: is this zone on an allowed node?
+ * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
+ * @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
- * If we're in interrupt, yes, we can always allocate.
- * If __GFP_THISNODE is set, yes, we can always allocate. If zone
- * z's node is in our tasks mems_allowed, yes. If the task has been
- * OOM killed and has access to memory reserves as specified by the
- * TIF_MEMDIE flag, yes. Otherwise, no.
+ * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
+ * set, yes, we can always allocate. If node is in our task's mems_allowed,
+ * yes. If the task has been OOM killed and has access to memory reserves as
+ * specified by the TIF_MEMDIE flag, yes.
+ * Otherwise, no.
*
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
@@ -2322,20 +2310,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
* any node on the zonelist except the first. By the time any such
* calls get to this routine, we should just shut up and say 'yes'.
*
- * Unlike the cpuset_zone_allowed_softwall() variant, above,
- * this variant requires that the zone be in the current tasks
+ * Unlike the cpuset_node_allowed_softwall() variant, above,
+ * this variant requires that the node be in the current task's
* mems_allowed or that we're in interrupt. It does not scan up the
* cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
* It never sleeps.
*/
-
-int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
+int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
- int node; /* node that zone z is on */
-
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
- node = zone_to_nid(z);
if (node_isset(node, current->mems_allowed))
return 1;
/*
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 667c841c295..c35452cadde 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -18,6 +18,7 @@
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/types.h>
+#include <linux/fs_struct.h>
static void default_handler(int, struct pt_regs *);
@@ -145,28 +146,6 @@ __set_personality(u_long personality)
return 0;
}
- if (atomic_read(&current->fs->count) != 1) {
- struct fs_struct *fsp, *ofsp;
-
- fsp = copy_fs_struct(current->fs);
- if (fsp == NULL) {
- module_put(ep->module);
- return -ENOMEM;
- }
-
- task_lock(current);
- ofsp = current->fs;
- current->fs = fsp;
- task_unlock(current);
-
- put_fs_struct(ofsp);
- }
-
- /*
- * At that point we are guaranteed to be the sole owner of
- * current->fs.
- */
-
current->personality = personality;
oep = current_thread_info()->exec_domain;
current_thread_info()->exec_domain = ep;
diff --git a/kernel/exit.c b/kernel/exit.c
index 167e1e3ad7c..abf9cf3b95c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
+#include <linux/fs_struct.h>
#include <linux/init_task.h>
#include <trace/sched.h>
@@ -61,11 +62,6 @@ DEFINE_TRACE(sched_process_wait);
static void exit_mm(struct task_struct * tsk);
-static inline int task_detached(struct task_struct *p)
-{
- return p->exit_signal == -1;
-}
-
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
@@ -362,16 +358,12 @@ static void reparent_to_kthreadd(void)
void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
- pid_t nr = pid_nr(pid);
- if (task_session(curr) != pid) {
+ if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
- set_task_session(curr, nr);
- }
- if (task_pgrp(curr) != pid) {
+
+ if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
- set_task_pgrp(curr, nr);
- }
}
static void set_special_pids(struct pid *pid)
@@ -429,7 +421,6 @@ EXPORT_SYMBOL(disallow_signal);
void daemonize(const char *name, ...)
{
va_list args;
- struct fs_struct *fs;
sigset_t blocked;
va_start(args, name);
@@ -462,11 +453,7 @@ void daemonize(const char *name, ...)
/* Become as one with the init task */
- exit_fs(current); /* current->fs->count--; */
- fs = init_task.fs;
- current->fs = fs;
- atomic_inc(&fs->count);
-
+ daemonize_fs_struct();
exit_files(current);
current->files = init_task.files;
atomic_inc(&current->files->count);
@@ -565,30 +552,6 @@ void exit_files(struct task_struct *tsk)
}
}
-void put_fs_struct(struct fs_struct *fs)
-{
- /* No need to hold fs->lock if we are killing it */
- if (atomic_dec_and_test(&fs->count)) {
- path_put(&fs->root);
- path_put(&fs->pwd);
- kmem_cache_free(fs_cachep, fs);
- }
-}
-
-void exit_fs(struct task_struct *tsk)
-{
- struct fs_struct * fs = tsk->fs;
-
- if (fs) {
- task_lock(tsk);
- tsk->fs = NULL;
- task_unlock(tsk);
- put_fs_struct(fs);
- }
-}
-
-EXPORT_SYMBOL_GPL(exit_fs);
-
#ifdef CONFIG_MM_OWNER
/*
* Task p is exiting and it owned mm, lets find a new owner for it
@@ -732,119 +695,6 @@ static void exit_mm(struct task_struct * tsk)
}
/*
- * Return nonzero if @parent's children should reap themselves.
- *
- * Called with write_lock_irq(&tasklist_lock) held.
- */
-static int ignoring_children(struct task_struct *parent)
-{
- int ret;
- struct sighand_struct *psig = parent->sighand;
- unsigned long flags;
- spin_lock_irqsave(&psig->siglock, flags);
- ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
- (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
- spin_unlock_irqrestore(&psig->siglock, flags);
- return ret;
-}
-
-/*
- * Detach all tasks we were using ptrace on.
- * Any that need to be release_task'd are put on the @dead list.
- *
- * Called with write_lock(&tasklist_lock) held.
- */
-static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
-{
- struct task_struct *p, *n;
- int ign = -1;
-
- list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
- __ptrace_unlink(p);
-
- if (p->exit_state != EXIT_ZOMBIE)
- continue;
-
- /*
- * If it's a zombie, our attachedness prevented normal
- * parent notification or self-reaping. Do notification
- * now if it would have happened earlier. If it should
- * reap itself, add it to the @dead list. We can't call
- * release_task() here because we already hold tasklist_lock.
- *
- * If it's our own child, there is no notification to do.
- * But if our normal children self-reap, then this child
- * was prevented by ptrace and we must reap it now.
- */
- if (!task_detached(p) && thread_group_empty(p)) {
- if (!same_thread_group(p->real_parent, parent))
- do_notify_parent(p, p->exit_signal);
- else {
- if (ign < 0)
- ign = ignoring_children(parent);
- if (ign)
- p->exit_signal = -1;
- }
- }
-
- if (task_detached(p)) {
- /*
- * Mark it as in the process of being reaped.
- */
- p->exit_state = EXIT_DEAD;
- list_add(&p->ptrace_entry, dead);
- }
- }
-}
-
-/*
- * Finish up exit-time ptrace cleanup.
- *
- * Called without locks.
- */
-static void ptrace_exit_finish(struct task_struct *parent,
- struct list_head *dead)
-{
- struct task_struct *p, *n;
-
- BUG_ON(!list_empty(&parent->ptraced));
-
- list_for_each_entry_safe(p, n, dead, ptrace_entry) {
- list_del_init(&p->ptrace_entry);
- release_task(p);
- }
-}
-
-static void reparent_thread(struct task_struct *p, struct task_struct *father)
-{
- if (p->pdeath_signal)
- /* We already hold the tasklist_lock here. */
- group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
-
- list_move_tail(&p->sibling, &p->real_parent->children);
-
- /* If this is a threaded reparent there is no need to
- * notify anyone anything has happened.
- */
- if (same_thread_group(p->real_parent, father))
- return;
-
- /* We don't want people slaying init. */
- if (!task_detached(p))
- p->exit_signal = SIGCHLD;
-
- /* If we'd notified the old parent about this child's death,
- * also notify the new parent.
- */
- if (!ptrace_reparented(p) &&
- p->exit_state == EXIT_ZOMBIE &&
- !task_detached(p) && thread_group_empty(p))
- do_notify_parent(p, p->exit_signal);
-
- kill_orphaned_pgrp(p, father);
-}
-
-/*
* When we die, we re-parent all our children.
* Try to give them to another thread in our thread
* group, and if no such member exists, give it to
@@ -883,17 +733,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
return pid_ns->child_reaper;
}
+/*
+* Any that need to be release_task'd are put on the @dead list.
+ */
+static void reparent_thread(struct task_struct *father, struct task_struct *p,
+ struct list_head *dead)
+{
+ if (p->pdeath_signal)
+ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
+
+ list_move_tail(&p->sibling, &p->real_parent->children);
+
+ if (task_detached(p))
+ return;
+ /*
+ * If this is a threaded reparent there is no need to
+ * notify anyone anything has happened.
+ */
+ if (same_thread_group(p->real_parent, father))
+ return;
+
+ /* We don't want people slaying init. */
+ p->exit_signal = SIGCHLD;
+
+ /* If it has exited notify the new parent about this child's death. */
+ if (!p->ptrace &&
+ p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
+ do_notify_parent(p, p->exit_signal);
+ if (task_detached(p)) {
+ p->exit_state = EXIT_DEAD;
+ list_move_tail(&p->sibling, dead);
+ }
+ }
+
+ kill_orphaned_pgrp(p, father);
+}
+
static void forget_original_parent(struct task_struct *father)
{
struct task_struct *p, *n, *reaper;
- LIST_HEAD(ptrace_dead);
+ LIST_HEAD(dead_children);
+
+ exit_ptrace(father);
write_lock_irq(&tasklist_lock);
reaper = find_new_reaper(father);
- /*
- * First clean up ptrace if we were using it.
- */
- ptrace_exit(father, &ptrace_dead);
list_for_each_entry_safe(p, n, &father->children, sibling) {
p->real_parent = reaper;
@@ -901,13 +785,16 @@ static void forget_original_parent(struct task_struct *father)
BUG_ON(p->ptrace);
p->parent = p->real_parent;
}
- reparent_thread(p, father);
+ reparent_thread(father, p, &dead_children);
}
-
write_unlock_irq(&tasklist_lock);
+
BUG_ON(!list_empty(&father->children));
- ptrace_exit_finish(father, &ptrace_dead);
+ list_for_each_entry_safe(p, n, &dead_children, sibling) {
+ list_del_init(&p->sibling);
+ release_task(p);
+ }
}
/*
@@ -950,8 +837,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
*/
if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
- tsk->self_exec_id != tsk->parent_exec_id) &&
- !capable(CAP_KILL))
+ tsk->self_exec_id != tsk->parent_exec_id))
tsk->exit_signal = SIGCHLD;
signal = tracehook_notify_death(tsk, &cookie, group_dead);
@@ -1037,6 +923,8 @@ NORET_TYPE void do_exit(long code)
schedule();
}
+ exit_irq_thread();
+
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
@@ -1417,6 +1305,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
return retval;
}
+static int *task_stopped_code(struct task_struct *p, bool ptrace)
+{
+ if (ptrace) {
+ if (task_is_stopped_or_traced(p))
+ return &p->exit_code;
+ } else {
+ if (p->signal->flags & SIGNAL_STOP_STOPPED)
+ return &p->signal->group_exit_code;
+ }
+ return NULL;
+}
+
/*
* Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
@@ -1427,7 +1327,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
int options, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
- int retval, exit_code, why;
+ int retval, exit_code, *p_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
@@ -1437,22 +1337,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
- if (unlikely(!task_is_stopped_or_traced(p)))
- goto unlock_sig;
-
- if (!ptrace && p->signal->group_stop_count > 0)
- /*
- * A group stop is in progress and this is the group leader.
- * We won't report until all threads have stopped.
- */
+ p_code = task_stopped_code(p, ptrace);
+ if (unlikely(!p_code))
goto unlock_sig;
- exit_code = p->exit_code;
+ exit_code = *p_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(options & WNOWAIT))
- p->exit_code = 0;
+ *p_code = 0;
/* don't need the RCU readlock here as we're holding a spinlock */
uid = __task_cred(p)->uid;
@@ -1608,7 +1502,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
*/
*notask_error = 0;
- if (task_is_stopped_or_traced(p))
+ if (task_stopped_code(p, ptrace))
return wait_task_stopped(ptrace, p, options,
infop, stat_addr, ru);
@@ -1812,7 +1706,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
pid = find_get_pid(-upid);
} else if (upid == 0) {
type = PIDTYPE_PGID;
- pid = get_pid(task_pgrp(current));
+ pid = get_task_pid(current, PIDTYPE_PGID);
} else /* upid > 0 */ {
type = PIDTYPE_PID;
pid = find_get_pid(upid);
diff --git a/kernel/extable.c b/kernel/extable.c
index b54a6017b6b..7f8f263f852 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -52,6 +52,14 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
return e;
}
+static inline int init_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sinittext &&
+ addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+}
+
int core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
@@ -59,8 +67,7 @@ int core_kernel_text(unsigned long addr)
return 1;
if (system_state == SYSTEM_BOOTING &&
- addr >= (unsigned long)_sinittext &&
- addr <= (unsigned long)_einittext)
+ init_kernel_text(addr))
return 1;
return 0;
}
@@ -69,14 +76,26 @@ int __kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
return 1;
- return __module_text_address(addr) != NULL;
+ if (is_module_text_address(addr))
+ return 1;
+ /*
+ * There might be init symbols in saved stacktraces.
+ * Give those symbols a chance to be printed in
+ * backtraces (such as lockdep traces).
+ *
+ * Since we are after the module-symbols check, there's
+ * no danger of address overlap:
+ */
+ if (init_kernel_text(addr))
+ return 1;
+ return 0;
}
int kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
return 1;
- return module_text_address(addr) != NULL;
+ return is_module_text_address(addr);
}
/*
@@ -92,5 +111,5 @@ int func_ptr_is_kernel_text(void *ptr)
addr = (unsigned long) dereference_function_descriptor(ptr);
if (core_kernel_text(addr))
return 1;
- return module_text_address(addr) != NULL;
+ return is_module_text_address(addr);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6715ebc3761..660c2b8765b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -60,6 +60,7 @@
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
+#include <linux/fs_struct.h>
#include <trace/sched.h>
#include <linux/magic.h>
@@ -284,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
@@ -681,38 +682,21 @@ fail_nomem:
return retval;
}
-static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
-{
- struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
- /* We don't need to lock fs - think why ;-) */
- if (fs) {
- atomic_set(&fs->count, 1);
- rwlock_init(&fs->lock);
- fs->umask = old->umask;
- read_lock(&old->lock);
- fs->root = old->root;
- path_get(&old->root);
- fs->pwd = old->pwd;
- path_get(&old->pwd);
- read_unlock(&old->lock);
- }
- return fs;
-}
-
-struct fs_struct *copy_fs_struct(struct fs_struct *old)
-{
- return __copy_fs_struct(old);
-}
-
-EXPORT_SYMBOL_GPL(copy_fs_struct);
-
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
+ struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
- atomic_inc(&current->fs->count);
+ /* tsk->fs is already what we want */
+ write_lock(&fs->lock);
+ if (fs->in_exec) {
+ write_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+ fs->users++;
+ write_unlock(&fs->lock);
return 0;
}
- tsk->fs = __copy_fs_struct(current->fs);
+ tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
@@ -841,6 +825,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
sig->flags = 0;
+ if (clone_flags & CLONE_NEWPID)
+ sig->flags |= SIGNAL_UNKILLABLE;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
@@ -1125,7 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
- retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
+ retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
@@ -1263,8 +1249,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -1488,6 +1472,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
@@ -1543,12 +1528,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
- if ((unshare_flags & CLONE_FS) &&
- (fs && atomic_read(&fs->count) > 1)) {
- *new_fsp = __copy_fs_struct(current->fs);
- if (!*new_fsp)
- return -ENOMEM;
- }
+ if (!(unshare_flags & CLONE_FS) || !fs)
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+ if (fs->users == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+ if (!*new_fsp)
+ return -ENOMEM;
return 0;
}
@@ -1664,8 +1653,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (new_fs) {
fs = current->fs;
+ write_lock(&fs->lock);
current->fs = new_fs;
- new_fs = fs;
+ if (--fs->users)
+ new_fs = NULL;
+ else
+ new_fs = fs;
+ write_unlock(&fs->lock);
}
if (new_mm) {
@@ -1704,7 +1698,7 @@ bad_unshare_cleanup_sigh:
bad_unshare_cleanup_fs:
if (new_fs)
- put_fs_struct(new_fs);
+ free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index 438701adce2..6b50a024bca 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -114,7 +114,9 @@ struct futex_q {
};
/*
- * Split the global futex_lock into every hash list lock.
+ * Hash buckets are shared by all the futex_keys that hash to the same
+ * location. Each key may have multiple futex_q structures, one for each task
+ * waiting on a futex.
*/
struct futex_hash_bucket {
spinlock_t lock;
@@ -189,8 +191,7 @@ static void drop_futex_key_refs(union futex_key *key)
/**
* get_futex_key - Get parameters which are the keys for a futex.
* @uaddr: virtual address of the futex
- * @shared: NULL for a PROCESS_PRIVATE futex,
- * &current->mm->mmap_sem for a PROCESS_SHARED futex
+ * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
*
* Returns a negative error code or 0
@@ -200,9 +201,7 @@ static void drop_futex_key_refs(union futex_key *key)
* offset_within_page). For private mappings, it's (uaddr, current->mm).
* We can usually work out the index without swapping in the page.
*
- * fshared is NULL for PROCESS_PRIVATE futexes
- * For other futexes, it points to &current->mm->mmap_sem and
- * caller must have taken the reader lock. but NOT any spinlocks.
+ * lock_page() might sleep, the caller should not hold a spinlock.
*/
static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
{
@@ -299,41 +298,6 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
return ret ? -EFAULT : 0;
}
-/*
- * Fault handling.
- */
-static int futex_handle_fault(unsigned long address, int attempt)
-{
- struct vm_area_struct * vma;
- struct mm_struct *mm = current->mm;
- int ret = -EFAULT;
-
- if (attempt > 2)
- return ret;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
- if (vma && address >= vma->vm_start &&
- (vma->vm_flags & VM_WRITE)) {
- int fault;
- fault = handle_mm_fault(mm, vma, address, 1);
- if (unlikely((fault & VM_FAULT_ERROR))) {
-#if 0
- /* XXX: let's do this when we verify it is OK */
- if (ret & VM_FAULT_OOM)
- ret = -ENOMEM;
-#endif
- } else {
- ret = 0;
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
- }
- }
- up_read(&mm->mmap_sem);
- return ret;
-}
/*
* PI code:
@@ -589,10 +553,9 @@ static void wake_futex(struct futex_q *q)
* The waiting task can free the futex_q as soon as this is written,
* without taking any locks. This must come last.
*
- * A memory barrier is required here to prevent the following store
- * to lock_ptr from getting ahead of the wakeup. Clearing the lock
- * at the end of wake_up_all() does not prevent this store from
- * moving.
+ * A memory barrier is required here to prevent the following store to
+ * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
+ * end of wake_up() does not prevent this store from moving.
*/
smp_wmb();
q->lock_ptr = NULL;
@@ -692,9 +655,16 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
}
}
+static inline void
+double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
+{
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
+}
+
/*
- * Wake up all waiters hashed on the physical page that is mapped
- * to this virtual address:
+ * Wake up waiters matching bitset queued on this futex (uaddr).
*/
static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
{
@@ -750,9 +720,9 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head;
struct futex_q *this, *next;
- int ret, op_ret, attempt = 0;
+ int ret, op_ret;
-retryfull:
+retry:
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
@@ -763,16 +733,13 @@ retryfull:
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
-retry:
double_lock_hb(hb1, hb2);
-
+retry_private:
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
u32 dummy;
- spin_unlock(&hb1->lock);
- if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ double_unlock_hb(hb1, hb2);
#ifndef CONFIG_MMU
/*
@@ -788,26 +755,16 @@ retry:
goto out_put_keys;
}
- /*
- * futex_atomic_op_inuser needs to both read and write
- * *(int __user *)uaddr2, but we can't modify it
- * non-atomically. Therefore, if get_user below is not
- * enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem.
- */
- if (attempt++) {
- ret = futex_handle_fault((unsigned long)uaddr2,
- attempt);
- if (ret)
- goto out_put_keys;
- goto retry;
- }
-
ret = get_user(dummy, uaddr2);
if (ret)
- return ret;
+ goto out_put_keys;
+
+ if (!fshared)
+ goto retry_private;
- goto retryfull;
+ put_futex_key(fshared, &key2);
+ put_futex_key(fshared, &key1);
+ goto retry;
}
head = &hb1->chain;
@@ -834,9 +791,7 @@ retry:
ret += op_ret;
}
- spin_unlock(&hb1->lock);
- if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(fshared, &key2);
out_put_key1:
@@ -869,6 +824,7 @@ retry:
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
+retry_private:
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
@@ -877,16 +833,18 @@ retry:
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
- spin_unlock(&hb1->lock);
- if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ double_unlock_hb(hb1, hb2);
ret = get_user(curval, uaddr1);
+ if (ret)
+ goto out_put_keys;
- if (!ret)
- goto retry;
+ if (!fshared)
+ goto retry_private;
- goto out_put_keys;
+ put_futex_key(fshared, &key2);
+ put_futex_key(fshared, &key1);
+ goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
@@ -923,9 +881,7 @@ retry:
}
out_unlock:
- spin_unlock(&hb1->lock);
- if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ double_unlock_hb(hb1, hb2);
/* drop_futex_key_refs() must be called outside the spinlocks. */
while (--drop_count >= 0)
@@ -1063,7 +1019,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct futex_pi_state *pi_state = q->pi_state;
struct task_struct *oldowner = pi_state->owner;
u32 uval, curval, newval;
- int ret, attempt = 0;
+ int ret;
/* Owner died? */
if (!pi_state->owner)
@@ -1076,11 +1032,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
* in the user space variable. This must be atomic as we have
* to preserve the owner died bit here.
*
- * Note: We write the user space value _before_ changing the
- * pi_state because we can fault here. Imagine swapped out
- * pages or a fork, which was running right before we acquired
- * mmap_sem, that marked all the anonymous memory readonly for
- * cow.
+ * Note: We write the user space value _before_ changing the pi_state
+ * because we can fault here. Imagine swapped out pages or a fork
+ * that marked all the anonymous memory readonly for cow.
*
* Modifying pi_state _before_ the user space value would
* leave the pi_state in an inconsistent state when we fault
@@ -1136,7 +1090,7 @@ retry:
handle_fault:
spin_unlock(q->lock_ptr);
- ret = futex_handle_fault((unsigned long)uaddr, attempt++);
+ ret = get_user(uval, uaddr);
spin_lock(q->lock_ptr);
@@ -1185,10 +1139,11 @@ retry:
if (unlikely(ret != 0))
goto out;
+retry_private:
hb = queue_lock(&q);
/*
- * Access the page AFTER the futex is queued.
+ * Access the page AFTER the hash-bucket is locked.
* Order is important:
*
* Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
@@ -1204,20 +1159,23 @@ retry:
* a wakeup when *uaddr != val on entry to the syscall. This is
* rare, but normal.
*
- * for shared futexes, we hold the mmap semaphore, so the mapping
+ * For shared futexes, we hold the mmap semaphore, so the mapping
* cannot have changed since we looked it up in get_futex_key.
*/
ret = get_futex_value_locked(&uval, uaddr);
if (unlikely(ret)) {
queue_unlock(&q, hb);
- put_futex_key(fshared, &q.key);
ret = get_user(uval, uaddr);
+ if (ret)
+ goto out_put_key;
- if (!ret)
- goto retry;
- goto out;
+ if (!fshared)
+ goto retry_private;
+
+ put_futex_key(fshared, &q.key);
+ goto retry;
}
ret = -EWOULDBLOCK;
if (unlikely(uval != val)) {
@@ -1248,16 +1206,13 @@ retry:
if (!abs_time)
schedule();
else {
- unsigned long slack;
- slack = current->timer_slack_ns;
- if (rt_task(current))
- slack = 0;
hrtimer_init_on_stack(&t.timer,
clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(&t, current);
- hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
+ hrtimer_set_expires_range_ns(&t.timer, *abs_time,
+ current->timer_slack_ns);
hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
if (!hrtimer_active(&t.timer))
@@ -1354,7 +1309,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
struct futex_hash_bucket *hb;
u32 uval, newval, curval;
struct futex_q q;
- int ret, lock_taken, ownerdied = 0, attempt = 0;
+ int ret, lock_taken, ownerdied = 0;
if (refill_pi_state_cache())
return -ENOMEM;
@@ -1374,7 +1329,7 @@ retry:
if (unlikely(ret != 0))
goto out;
-retry_unlocked:
+retry_private:
hb = queue_lock(&q);
retry_locked:
@@ -1458,6 +1413,7 @@ retry_locked:
* exit to complete.
*/
queue_unlock(&q, hb);
+ put_futex_key(fshared, &q.key);
cond_resched();
goto retry;
@@ -1564,6 +1520,13 @@ retry_locked:
}
}
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle the
+ * fault, unlock it and return the fault to userspace.
+ */
+ if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
+
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
@@ -1591,22 +1554,18 @@ uaddr_faulted:
*/
queue_unlock(&q, hb);
- if (attempt++) {
- ret = futex_handle_fault((unsigned long)uaddr, attempt);
- if (ret)
- goto out_put_key;
- goto retry_unlocked;
- }
-
ret = get_user(uval, uaddr);
- if (!ret)
- goto retry;
+ if (ret)
+ goto out_put_key;
- if (to)
- destroy_hrtimer_on_stack(&to->timer);
- return ret;
+ if (!fshared)
+ goto retry_private;
+
+ put_futex_key(fshared, &q.key);
+ goto retry;
}
+
/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
@@ -1619,7 +1578,7 @@ static int futex_unlock_pi(u32 __user *uaddr, int fshared)
u32 uval;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
- int ret, attempt = 0;
+ int ret;
retry:
if (get_user(uval, uaddr))
@@ -1635,7 +1594,6 @@ retry:
goto out;
hb = hash_futex(&key);
-retry_unlocked:
spin_lock(&hb->lock);
/*
@@ -1700,14 +1658,7 @@ pi_faulted:
* we have to drop the mmap_sem in order to call get_user().
*/
spin_unlock(&hb->lock);
-
- if (attempt++) {
- ret = futex_handle_fault((unsigned long)uaddr, attempt);
- if (ret)
- goto out;
- uval = 0;
- goto retry_unlocked;
- }
+ put_futex_key(fshared, &key);
ret = get_user(uval, uaddr);
if (!ret)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f394d2a42ca..cb8a15c1958 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
* and expiry check is done in the hrtimer_interrupt or in the softirq.
*/
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ int wakeup)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
- spin_unlock(&base->cpu_base->lock);
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ if (wakeup) {
+ spin_unlock(&base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ spin_lock(&base->cpu_base->lock);
+ } else
+ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+
return 1;
}
+
return 0;
}
@@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ int wakeup)
{
return 0;
}
@@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
return 0;
}
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer: the timer to be added
- * @tim: expiry time
- * @delta_ns: "slack" range for the timer
- * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
- *
- * Returns:
- * 0 on success
- * 1 when the timer was active
- */
-int
-hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
- const enum hrtimer_mode mode)
+int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns, const enum hrtimer_mode mode,
+ int wakeup)
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
@@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
* XXX send_remote_softirq() ?
*/
if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
- hrtimer_enqueue_reprogram(timer, new_base);
+ hrtimer_enqueue_reprogram(timer, new_base, wakeup);
unlock_hrtimer_base(timer, &flags);
return ret;
}
+
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
+ * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
+ *
+ * Returns:
+ * 0 on success
+ * 1 when the timer was active
+ */
+int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns, const enum hrtimer_mode mode)
+{
+ return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
+}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
/**
@@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
int
hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
{
- return hrtimer_start_range_ns(timer, tim, 0, mode);
+ return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
}
EXPORT_SYMBOL_GPL(hrtimer_start);
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 4dd5b1edac9..3394f8f5296 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
+obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 122fef4b0bd..c687ba4363f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -81,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq)
desc->handle_irq = handle_bad_irq;
desc->chip = &no_irq_chip;
desc->name = NULL;
+ clear_kstat_irqs(desc);
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -293,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
desc->chip->mask_ack(irq);
else {
desc->chip->mask(irq);
- desc->chip->ack(irq);
+ if (desc->chip->ack)
+ desc->chip->ack(irq);
}
}
@@ -479,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
- desc->chip->ack(irq);
+ if (desc->chip->ack)
+ desc->chip->ack(irq);
desc = irq_remap_to_desc(irq, desc);
/* Mark the IRQ currently in progress.*/
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 38a25b8d8bf..d06df9c41cb 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
}
/**
- * devm_request_irq - allocate an interrupt line for a managed device
+ * devm_request_threaded_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
+ * @thread_fn: function to be called in a threaded interrupt context. NULL
+ * for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
* If an IRQ allocated with this function needs to be freed
* separately, dev_free_irq() must be used.
*/
-int devm_request_irq(struct device *dev, unsigned int irq,
- irq_handler_t handler, unsigned long irqflags,
- const char *devname, void *dev_id)
+int devm_request_threaded_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_fn,
+ unsigned long irqflags, const char *devname,
+ void *dev_id)
{
struct irq_devres *dr;
int rc;
@@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq,
if (!dr)
return -ENOMEM;
- rc = request_irq(irq, handler, irqflags, devname, dev_id);
+ rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
+ dev_id);
if (rc) {
devres_free(dr);
return rc;
@@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq,
return 0;
}
-EXPORT_SYMBOL(devm_request_irq);
+EXPORT_SYMBOL(devm_request_threaded_irq);
/**
* devm_free_irq - free an interrupt
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 412370ab9a3..d82142be8dd 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -83,19 +83,21 @@ static struct irq_desc irq_desc_init = {
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
{
- unsigned long bytes;
- char *ptr;
int node;
-
- /* Compute how many bytes we need per irq and allocate them */
- bytes = nr * sizeof(unsigned int);
+ void *ptr;
node = cpu_to_node(cpu);
- ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
- printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
+ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
- if (ptr)
- desc->kstat_irqs = (unsigned int *)ptr;
+ /*
+ * don't overwite if can not get new one
+ * init_copy_kstat_irqs() could still use old one
+ */
+ if (ptr) {
+ printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
+ cpu, node);
+ desc->kstat_irqs = ptr;
+ }
}
static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -238,6 +240,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
}
};
+static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
int __init early_irq_init(void)
{
struct irq_desc *desc;
@@ -254,6 +257,7 @@ int __init early_irq_init(void)
for (i = 0; i < count; i++) {
desc[i].irq = i;
init_alloc_desc_masks(&desc[i], 0, true);
+ desc[i].kstat_irqs = kstat_irqs_all[i];
}
return arch_early_irq_init();
}
@@ -269,6 +273,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
}
#endif /* !CONFIG_SPARSE_IRQ */
+void clear_kstat_irqs(struct irq_desc *desc)
+{
+ memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+}
+
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
@@ -330,6 +339,15 @@ irqreturn_t no_action(int cpl, void *dev_id)
return IRQ_NONE;
}
+static void warn_no_thread(unsigned int irq, struct irqaction *action)
+{
+ if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
+ return;
+
+ printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
+ "but no thread function available.", irq, action->name);
+}
+
DEFINE_TRACE(irq_handler_entry);
DEFINE_TRACE(irq_handler_exit);
@@ -345,6 +363,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
+ WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
+
if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();
@@ -352,8 +372,47 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
trace_irq_handler_entry(irq, action);
ret = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, ret);
- if (ret == IRQ_HANDLED)
+
+ switch (ret) {
+ case IRQ_WAKE_THREAD:
+ /*
+ * Set result to handled so the spurious check
+ * does not trigger.
+ */
+ ret = IRQ_HANDLED;
+
+ /*
+ * Catch drivers which return WAKE_THREAD but
+ * did not set up a thread function
+ */
+ if (unlikely(!action->thread_fn)) {
+ warn_no_thread(irq, action);
+ break;
+ }
+
+ /*
+ * Wake up the handler thread for this
+ * action. In case the thread crashed and was
+ * killed we just pretend that we handled the
+ * interrupt. The hardirq handler above has
+ * disabled the device interrupt, so no irq
+ * storm is lurking.
+ */
+ if (likely(!test_bit(IRQTF_DIED,
+ &action->thread_flags))) {
+ set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+ wake_up_process(action->thread);
+ }
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
status |= action->flags;
+ break;
+
+ default:
+ break;
+ }
+
retval |= ret;
action = action->next;
} while (action);
@@ -366,6 +425,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
+
+#ifdef CONFIG_ENABLE_WARN_DEPRECATED
+# warning __do_IRQ is deprecated. Please convert to proper flow handlers
+#endif
+
/**
* __do_IRQ - original all in one highlevel IRQ handler
* @irq: the interrupt number
@@ -486,12 +550,10 @@ void early_init_irq_lock_class(void)
}
}
-#ifdef CONFIG_SPARSE_IRQ
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc ? desc->kstat_irqs[cpu] : 0;
}
-#endif
EXPORT_SYMBOL(kstat_irqs_cpu);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 40416a81a0f..01ce20eab38 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -12,9 +12,12 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
+extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
+extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
+extern void clear_kstat_irqs(struct irq_desc *desc);
extern spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a3a5dc9ef34..7e2e7dd4cd2 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -8,16 +8,15 @@
*/
#include <linux/irq.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include "internals.h"
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
-cpumask_var_t irq_default_affinity;
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq)
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
+
+ /*
+ * We made sure that no hardirq handler is running. Now verify
+ * that no threaded handlers are active.
+ */
+ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
}
EXPORT_SYMBOL(synchronize_irq);
+#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
+
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq)
return 1;
}
+static void
+irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+{
+ struct irqaction *action = desc->action;
+
+ while (action) {
+ if (action->thread)
+ set_cpus_allowed_ptr(action->thread, cpumask);
+ action = action->next;
+ }
+}
+
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
@@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
#endif
+ irq_set_thread_affinity(desc, cpumask);
desc->status |= IRQ_AFFINITY_SET;
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
@@ -109,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
/*
* Generic version of the affinity autoselector.
*/
-int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
+static int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
if (!irq_can_set_affinity(irq))
return 0;
@@ -133,7 +154,7 @@ set_affinity:
return 0;
}
#else
-static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
+static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
{
return irq_select_affinity(irq);
}
@@ -149,19 +170,35 @@ int irq_select_affinity_usr(unsigned int irq)
int ret;
spin_lock_irqsave(&desc->lock, flags);
- ret = do_irq_select_affinity(irq, desc);
+ ret = setup_affinity(irq, desc);
+ if (!ret)
+ irq_set_thread_affinity(desc, desc->affinity);
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
#else
-static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
+static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
return 0;
}
#endif
+void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+{
+ if (suspend) {
+ if (!desc->action || (desc->action->flags & IRQF_TIMER))
+ return;
+ desc->status |= IRQ_SUSPENDED;
+ }
+
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->chip->disable(irq);
+ }
+}
+
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
@@ -182,10 +219,7 @@ void disable_irq_nosync(unsigned int irq)
return;
spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
- }
+ __disable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -215,15 +249,21 @@ void disable_irq(unsigned int irq)
}
EXPORT_SYMBOL(disable_irq);
-static void __enable_irq(struct irq_desc *desc, unsigned int irq)
+void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
{
+ if (resume)
+ desc->status &= ~IRQ_SUSPENDED;
+
switch (desc->depth) {
case 0:
+ err_out:
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
break;
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
+ if (desc->status & IRQ_SUSPENDED)
+ goto err_out;
/* Prevent probing on this irq: */
desc->status = status | IRQ_NOPROBE;
check_irq_resend(desc, irq);
@@ -253,7 +293,7 @@ void enable_irq(unsigned int irq)
return;
spin_lock_irqsave(&desc->lock, flags);
- __enable_irq(desc, irq);
+ __enable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(enable_irq);
@@ -384,14 +424,98 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
+static int irq_wait_for_interrupt(struct irqaction *action)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ schedule();
+ }
+ return -1;
+}
+
+/*
+ * Interrupt handler thread
+ */
+static int irq_thread(void *data)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
+ int wake;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ current->irqaction = action;
+
+ while (!irq_wait_for_interrupt(action)) {
+
+ atomic_inc(&desc->threads_active);
+
+ spin_lock_irq(&desc->lock);
+ if (unlikely(desc->status & IRQ_DISABLED)) {
+ /*
+ * CHECKME: We might need a dedicated
+ * IRQ_THREAD_PENDING flag here, which
+ * retriggers the thread in check_irq_resend()
+ * but AFAICT IRQ_PENDING should be fine as it
+ * retriggers the interrupt itself --- tglx
+ */
+ desc->status |= IRQ_PENDING;
+ spin_unlock_irq(&desc->lock);
+ } else {
+ spin_unlock_irq(&desc->lock);
+
+ action->thread_fn(action->irq, action->dev_id);
+ }
+
+ wake = atomic_dec_and_test(&desc->threads_active);
+
+ if (wake && waitqueue_active(&desc->wait_for_threads))
+ wake_up(&desc->wait_for_threads);
+ }
+
+ /*
+ * Clear irqaction. Otherwise exit_irq_thread() would make
+ * fuzz about an active irq thread going into nirvana.
+ */
+ current->irqaction = NULL;
+ return 0;
+}
+
+/*
+ * Called from do_exit()
+ */
+void exit_irq_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ if (!tsk->irqaction)
+ return;
+
+ printk(KERN_ERR
+ "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+
+ /*
+ * Set the THREAD DIED flag to prevent further wakeups of the
+ * soon to be gone threaded handler.
+ */
+ set_bit(IRQTF_DIED, &tsk->irqaction->flags);
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
*/
static int
-__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
+__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
- struct irqaction *old, **p;
+ struct irqaction *old, **old_ptr;
const char *old_name = NULL;
unsigned long flags;
int shared = 0;
@@ -420,11 +544,31 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
}
/*
+ * Threaded handler ?
+ */
+ if (new->thread_fn) {
+ struct task_struct *t;
+
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+ * references an already freed task_struct.
+ */
+ get_task_struct(t);
+ new->thread = t;
+ wake_up_process(t);
+ }
+
+ /*
* The following block of code has to be executed atomically
*/
spin_lock_irqsave(&desc->lock, flags);
- p = &desc->action;
- old = *p;
+ old_ptr = &desc->action;
+ old = *old_ptr;
if (old) {
/*
* Can't share interrupts unless both agree to and are
@@ -447,8 +591,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
/* add new interrupt at end of irq queue */
do {
- p = &old->next;
- old = *p;
+ old_ptr = &old->next;
+ old = *old_ptr;
} while (old);
shared = 1;
}
@@ -456,15 +600,15 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
if (!shared) {
irq_chip_set_defaults(desc->chip);
+ init_waitqueue_head(&desc->wait_for_threads);
+
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc, irq,
new->flags & IRQF_TRIGGER_MASK);
- if (ret) {
- spin_unlock_irqrestore(&desc->lock, flags);
- return ret;
- }
+ if (ret)
+ goto out_thread;
} else
compat_irq_chip_set_default_handler(desc);
#if defined(CONFIG_IRQ_PER_CPU)
@@ -488,7 +632,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
desc->status |= IRQ_NO_BALANCING;
/* Set default affinity mask once everything is setup */
- do_irq_select_affinity(irq, desc);
+ setup_affinity(irq, desc);
} else if ((new->flags & IRQF_TRIGGER_MASK)
&& (new->flags & IRQF_TRIGGER_MASK)
@@ -499,7 +643,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
(int)(new->flags & IRQF_TRIGGER_MASK));
}
- *p = new;
+ *old_ptr = new;
/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
@@ -511,7 +655,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
*/
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
desc->status &= ~IRQ_SPURIOUS_DISABLED;
- __enable_irq(desc, irq);
+ __enable_irq(desc, irq, false);
}
spin_unlock_irqrestore(&desc->lock, flags);
@@ -532,8 +676,19 @@ mismatch:
dump_stack();
}
#endif
+ ret = -EBUSY;
+
+out_thread:
spin_unlock_irqrestore(&desc->lock, flags);
- return -EBUSY;
+ if (new->thread) {
+ struct task_struct *t = new->thread;
+
+ new->thread = NULL;
+ if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
+ kthread_stop(t);
+ put_task_struct(t);
+ }
+ return ret;
}
/**
@@ -549,97 +704,138 @@ int setup_irq(unsigned int irq, struct irqaction *act)
return __setup_irq(irq, desc, act);
}
+EXPORT_SYMBOL_GPL(setup_irq);
-/**
- * free_irq - free an interrupt
- * @irq: Interrupt line to free
- * @dev_id: Device identity to free
- *
- * Remove an interrupt handler. The handler is removed and if the
- * interrupt line is no longer in use by any driver it is disabled.
- * On a shared IRQ the caller must ensure the interrupt is disabled
- * on the card it drives before calling this function. The function
- * does not return until any executing interrupts for this IRQ
- * have completed.
- *
- * This function must not be called from interrupt context.
+ /*
+ * Internal function to unregister an irqaction - used to free
+ * regular and special interrupts that are part of the architecture.
*/
-void free_irq(unsigned int irq, void *dev_id)
+static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction **p;
+ struct irqaction *action, **action_ptr;
+ struct task_struct *irqthread;
unsigned long flags;
- WARN_ON(in_interrupt());
+ WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
if (!desc)
- return;
+ return NULL;
spin_lock_irqsave(&desc->lock, flags);
- p = &desc->action;
+
+ /*
+ * There can be multiple actions per IRQ descriptor, find the right
+ * one based on the dev_id:
+ */
+ action_ptr = &desc->action;
for (;;) {
- struct irqaction *action = *p;
+ action = *action_ptr;
+
+ if (!action) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ spin_unlock_irqrestore(&desc->lock, flags);
- if (action) {
- struct irqaction **pp = p;
+ return NULL;
+ }
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
+ if (action->dev_id == dev_id)
+ break;
+ action_ptr = &action->next;
+ }
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
+ /* Found it - now remove it from the list of entries: */
+ *action_ptr = action->next;
- /* Currently used only by UML, might disappear one day.*/
+ /* Currently used only by UML, might disappear one day: */
#ifdef CONFIG_IRQ_RELEASE_METHOD
- if (desc->chip->release)
- desc->chip->release(irq, dev_id);
+ if (desc->chip->release)
+ desc->chip->release(irq, dev_id);
#endif
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- if (desc->chip->shutdown)
- desc->chip->shutdown(irq);
- else
- desc->chip->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
- unregister_handler_proc(irq, action);
+ /* If this was the last handler, shut down the IRQ line: */
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ if (desc->chip->shutdown)
+ desc->chip->shutdown(irq);
+ else
+ desc->chip->disable(irq);
+ }
+
+ irqthread = action->thread;
+ action->thread = NULL;
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ unregister_handler_proc(irq, action);
+
+ /* Make sure it's not being used on another CPU: */
+ synchronize_irq(irq);
+
+ if (irqthread) {
+ if (!test_bit(IRQTF_DIED, &action->thread_flags))
+ kthread_stop(irqthread);
+ put_task_struct(irqthread);
+ }
- /* Make sure it's not being used on another CPU */
- synchronize_irq(irq);
-#ifdef CONFIG_DEBUG_SHIRQ
- /*
- * It's a shared IRQ -- the driver ought to be
- * prepared for it to happen even now it's
- * being freed, so let's make sure.... We do
- * this after actually deregistering it, to
- * make sure that a 'real' IRQ doesn't run in
- * parallel with our fake
- */
- if (action->flags & IRQF_SHARED) {
- local_irq_save(flags);
- action->handler(irq, dev_id);
- local_irq_restore(flags);
- }
-#endif
- kfree(action);
- return;
- }
- printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
#ifdef CONFIG_DEBUG_SHIRQ
- dump_stack();
-#endif
- spin_unlock_irqrestore(&desc->lock, flags);
- return;
+ /*
+ * It's a shared IRQ -- the driver ought to be prepared for an IRQ
+ * event to happen even now it's being freed, so let's make sure that
+ * is so by doing an extra call to the handler ....
+ *
+ * ( We do this after actually deregistering it, to make sure that a
+ * 'real' IRQ doesn't run in * parallel with our fake. )
+ */
+ if (action->flags & IRQF_SHARED) {
+ local_irq_save(flags);
+ action->handler(irq, dev_id);
+ local_irq_restore(flags);
}
+#endif
+ return action;
+}
+
+/**
+ * remove_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_irq(unsigned int irq, struct irqaction *act)
+{
+ __free_irq(irq, act->dev_id);
+}
+EXPORT_SYMBOL_GPL(remove_irq);
+
+/**
+ * free_irq - free an interrupt allocated with request_irq
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function. The function
+ * does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+ kfree(__free_irq(irq, dev_id));
}
EXPORT_SYMBOL(free_irq);
/**
- * request_irq - allocate an interrupt line
+ * request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * @thread_fn: Function called from the irq handler thread
+ * If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -651,6 +847,15 @@ EXPORT_SYMBOL(free_irq);
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
+ * If you want to set up a threaded irq handler for your device
+ * then you need to supply @handler and @thread_fn. @handler ist
+ * still called in hard interrupt context and has to check
+ * whether the interrupt originates from the device. If yes it
+ * needs to disable the interrupt on the device and return
+ * IRQ_THREAD_WAKE which will wake up the handler thread and run
+ * @thread_fn. This split handler design is necessary to support
+ * shared interrupts.
+ *
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
@@ -666,8 +871,9 @@ EXPORT_SYMBOL(free_irq);
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
-int request_irq(unsigned int irq, irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
+int request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn, unsigned long irqflags,
+ const char *devname, void *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -679,11 +885,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
* the behavior is classified as "will not fix" so we need to
* start nudging drivers away from using that idiom.
*/
- if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
- == (IRQF_SHARED|IRQF_DISABLED))
- pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
- "guaranteed on shared IRQs\n",
- irq, devname);
+ if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
+ (IRQF_SHARED|IRQF_DISABLED)) {
+ pr_warning(
+ "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
+ irq, devname);
+ }
#ifdef CONFIG_LOCKDEP
/*
@@ -709,15 +916,14 @@ int request_irq(unsigned int irq, irq_handler_t handler,
if (!handler)
return -EINVAL;
- action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
+ action->thread_fn = thread_fn;
action->flags = irqflags;
- cpus_clear(action->mask);
action->name = devname;
- action->next = NULL;
action->dev_id = dev_id;
retval = __setup_irq(irq, desc, action);
@@ -745,4 +951,4 @@ int request_irq(unsigned int irq, irq_handler_t handler,
#endif
return retval;
}
-EXPORT_SYMBOL(request_irq);
+EXPORT_SYMBOL(request_threaded_irq);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 7f9b80434e3..243d6121e50 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc,
struct irq_desc *desc,
int cpu, int nr)
{
- unsigned long bytes;
-
init_kstat_irqs(desc, cpu, nr);
- if (desc->kstat_irqs != old_desc->kstat_irqs) {
- /* Compute how many bytes we need per irq and allocate them */
- bytes = nr * sizeof(unsigned int);
-
- memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
- }
+ if (desc->kstat_irqs != old_desc->kstat_irqs)
+ memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
+ nr * sizeof(*desc->kstat_irqs));
}
static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
new file mode 100644
index 00000000000..638d8bedec1
--- /dev/null
+++ b/kernel/irq/pm.c
@@ -0,0 +1,79 @@
+/*
+ * linux/kernel/irq/pm.c
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file contains power management functions related to interrupts.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include "internals.h"
+
+/**
+ * suspend_device_irqs - disable all currently enabled interrupt lines
+ *
+ * During system-wide suspend or hibernation device interrupts need to be
+ * disabled at the chip level and this function is provided for this purpose.
+ * It disables all interrupt lines that are enabled at the moment and sets the
+ * IRQ_SUSPENDED flag for them.
+ */
+void suspend_device_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ __disable_irq(desc, irq, true);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ for_each_irq_desc(irq, desc)
+ if (desc->status & IRQ_SUSPENDED)
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(suspend_device_irqs);
+
+/**
+ * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
+ *
+ * Enable all interrupt lines previously disabled by suspend_device_irqs() that
+ * have the IRQ_SUSPENDED flag set.
+ */
+void resume_device_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ unsigned long flags;
+
+ if (!(desc->status & IRQ_SUSPENDED))
+ continue;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ __enable_irq(desc, irq, true);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(resume_device_irqs);
+
+/**
+ * check_wakeup_irqs - check if any wake-up interrupts are pending
+ */
+int check_wakeup_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc)
+ if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
+ return -EBUSY;
+
+ return 0;
+}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56..4d568294de3 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
return ok;
}
-static void poll_spurious_irqs(unsigned long dummy)
+static void poll_all_shared_irqs(void)
{
struct irq_desc *desc;
int i;
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
try_one_irq(i, desc);
}
+}
+
+static void poll_spurious_irqs(unsigned long dummy)
+{
+ poll_all_shared_irqs();
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
+#ifdef CONFIG_DEBUG_SHIRQ
+void debug_poll_all_shared_irqs(void)
+{
+ poll_all_shared_irqs();
+}
+#endif
+
/*
* If 99,900 of the previous 100,000 interrupts have not been handled
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7b8b0f21a5b..374faf9bfdc 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -161,6 +161,25 @@ unsigned long kallsyms_lookup_name(const char *name)
return module_kallsyms_lookup_name(name);
}
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long),
+ void *data)
+{
+ char namebuf[KSYM_NAME_LEN];
+ unsigned long i;
+ unsigned int off;
+ int ret;
+
+ for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
+ off = kallsyms_expand_symbol(off, namebuf);
+ ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
+ if (ret != 0)
+ return ret;
+ }
+ return module_kallsyms_on_each_symbol(fn, data);
+}
+EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol);
+
static unsigned long get_symbol_pos(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c7fd6692939..5a758c6e495 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -42,7 +42,7 @@
note_buf_t* crash_notes;
/* vmcoreinfo stuff */
-unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
@@ -1409,6 +1409,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_OFFSET(vm_struct, addr);
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ log_buf_kexec_setup();
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
VMCOREINFO_NUMBER(PG_lru);
@@ -1450,11 +1451,7 @@ int kernel_kexec(void)
error = device_suspend(PMSG_FREEZE);
if (error)
goto Resume_console;
- error = disable_nonboot_cpus();
- if (error)
- goto Resume_devices;
device_pm_lock();
- local_irq_disable();
/* At this point, device_suspend() has been called,
* but *not* device_power_down(). We *must*
* device_power_down() now. Otherwise, drivers for
@@ -1464,12 +1461,15 @@ int kernel_kexec(void)
*/
error = device_power_down(PMSG_FREEZE);
if (error)
- goto Enable_irqs;
-
+ goto Resume_devices;
+ error = disable_nonboot_cpus();
+ if (error)
+ goto Enable_cpus;
+ local_irq_disable();
/* Suspend system devices */
error = sysdev_suspend(PMSG_FREEZE);
if (error)
- goto Power_up_devices;
+ goto Enable_irqs;
} else
#endif
{
@@ -1483,13 +1483,13 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
sysdev_resume();
- Power_up_devices:
- device_power_up(PMSG_RESTORE);
Enable_irqs:
local_irq_enable();
- device_pm_unlock();
+ Enable_cpus:
enable_nonboot_cpus();
+ device_power_up(PMSG_RESTORE);
Resume_devices:
+ device_pm_unlock();
device_resume(PMSG_RESTORE);
Resume_console:
resume_console();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a27a5f64443..b750675251e 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -50,7 +50,8 @@ static struct workqueue_struct *khelper_wq;
char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
/**
- * request_module - try to load a kernel module
+ * __request_module - try to load a kernel module
+ * @wait: wait (or not) for the operation to complete
* @fmt: printf style format string for the name of the module
* @...: arguments as specified in the format string
*
@@ -63,7 +64,7 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
* If module auto-loading support is disabled then this function
* becomes a no-operation.
*/
-int request_module(const char *fmt, ...)
+int __request_module(bool wait, const char *fmt, ...)
{
va_list args;
char module_name[MODULE_NAME_LEN];
@@ -108,11 +109,12 @@ int request_module(const char *fmt, ...)
return -ENOMEM;
}
- ret = call_usermodehelper(modprobe_path, argv, envp, 1);
+ ret = call_usermodehelper(modprobe_path, argv, envp,
+ wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
atomic_dec(&kmod_concurrent);
return ret;
}
-EXPORT_SYMBOL(request_module);
+EXPORT_SYMBOL(__request_module);
#endif /* CONFIG_MODULES */
struct subprocess_info {
@@ -167,7 +169,7 @@ static int ____call_usermodehelper(void *data)
}
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Our parent is keventd, which runs with elevated scheduling priority.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5016bfb682b..a5e74ddee0e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -68,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
-static bool kprobe_enabled;
+static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
@@ -328,7 +328,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->pre_handler && !kprobe_gone(kp)) {
+ if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs))
return 1;
@@ -344,7 +344,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->post_handler && !kprobe_gone(kp)) {
+ if (kp->post_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags);
reset_kprobe_instance();
@@ -518,20 +518,28 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
}
/*
-* Add the new probe to old_p->list. Fail if this is the
+* Add the new probe to ap->list. Fail if this is the
* second jprobe at the address - two jprobes can't coexist
*/
-static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
+static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
+ BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
if (p->break_handler) {
- if (old_p->break_handler)
+ if (ap->break_handler)
return -EEXIST;
- list_add_tail_rcu(&p->list, &old_p->list);
- old_p->break_handler = aggr_break_handler;
+ list_add_tail_rcu(&p->list, &ap->list);
+ ap->break_handler = aggr_break_handler;
} else
- list_add_rcu(&p->list, &old_p->list);
- if (p->post_handler && !old_p->post_handler)
- old_p->post_handler = aggr_post_handler;
+ list_add_rcu(&p->list, &ap->list);
+ if (p->post_handler && !ap->post_handler)
+ ap->post_handler = aggr_post_handler;
+
+ if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
+ ap->flags &= ~KPROBE_FLAG_DISABLED;
+ if (!kprobes_all_disarmed)
+ /* Arm the breakpoint again. */
+ arch_arm_kprobe(ap);
+ }
return 0;
}
@@ -544,6 +552,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
copy_kprobe(p, ap);
flush_insn_slot(ap);
ap->addr = p->addr;
+ ap->flags = p->flags;
ap->pre_handler = aggr_pre_handler;
ap->fault_handler = aggr_fault_handler;
/* We don't care the kprobe which has gone. */
@@ -566,44 +575,59 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
struct kprobe *p)
{
int ret = 0;
- struct kprobe *ap;
+ struct kprobe *ap = old_p;
- if (kprobe_gone(old_p)) {
+ if (old_p->pre_handler != aggr_pre_handler) {
+ /* If old_p is not an aggr_probe, create new aggr_kprobe. */
+ ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
+ if (!ap)
+ return -ENOMEM;
+ add_aggr_kprobe(ap, old_p);
+ }
+
+ if (kprobe_gone(ap)) {
/*
* Attempting to insert new probe at the same location that
* had a probe in the module vaddr area which already
* freed. So, the instruction slot has already been
* released. We need a new slot for the new probe.
*/
- ret = arch_prepare_kprobe(old_p);
+ ret = arch_prepare_kprobe(ap);
if (ret)
+ /*
+ * Even if fail to allocate new slot, don't need to
+ * free aggr_probe. It will be used next time, or
+ * freed by unregister_kprobe.
+ */
return ret;
- }
- if (old_p->pre_handler == aggr_pre_handler) {
- copy_kprobe(old_p, p);
- ret = add_new_kprobe(old_p, p);
- ap = old_p;
- } else {
- ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
- if (!ap) {
- if (kprobe_gone(old_p))
- arch_remove_kprobe(old_p);
- return -ENOMEM;
- }
- add_aggr_kprobe(ap, old_p);
- copy_kprobe(ap, p);
- ret = add_new_kprobe(ap, p);
- }
- if (kprobe_gone(old_p)) {
+
/*
- * If the old_p has gone, its breakpoint has been disarmed.
- * We have to arm it again after preparing real kprobes.
+ * Clear gone flag to prevent allocating new slot again, and
+ * set disabled flag because it is not armed yet.
*/
- ap->flags &= ~KPROBE_FLAG_GONE;
- if (kprobe_enabled)
- arch_arm_kprobe(ap);
+ ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
+ | KPROBE_FLAG_DISABLED;
}
- return ret;
+
+ copy_kprobe(ap, p);
+ return add_new_kprobe(ap, p);
+}
+
+/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
+static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ if (!kprobe_disabled(kp))
+ /*
+ * There is an active probe on the list.
+ * We can't disable aggr_kprobe.
+ */
+ return 0;
+ }
+ p->flags |= KPROBE_FLAG_DISABLED;
+ return 1;
}
static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -664,7 +688,9 @@ int __kprobes register_kprobe(struct kprobe *p)
return -EINVAL;
}
- p->flags = 0;
+ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
+ p->flags &= KPROBE_FLAG_DISABLED;
+
/*
* Check if are we probing a module.
*/
@@ -709,7 +735,7 @@ int __kprobes register_kprobe(struct kprobe *p)
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (kprobe_enabled)
+ if (!kprobes_all_disarmed && !kprobe_disabled(p))
arch_arm_kprobe(p);
out_unlock_text:
@@ -722,26 +748,39 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(register_kprobe);
-/*
- * Unregister a kprobe without a scheduler synchronization.
- */
-static int __kprobes __unregister_kprobe_top(struct kprobe *p)
+/* Check passed kprobe is valid and return kprobe in kprobe_table. */
+static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
{
struct kprobe *old_p, *list_p;
old_p = get_kprobe(p->addr);
if (unlikely(!old_p))
- return -EINVAL;
+ return NULL;
if (p != old_p) {
list_for_each_entry_rcu(list_p, &old_p->list, list)
if (list_p == p)
/* kprobe p is a valid probe */
- goto valid_p;
- return -EINVAL;
+ goto valid;
+ return NULL;
}
-valid_p:
+valid:
+ return old_p;
+}
+
+/*
+ * Unregister a kprobe without a scheduler synchronization.
+ */
+static int __kprobes __unregister_kprobe_top(struct kprobe *p)
+{
+ struct kprobe *old_p, *list_p;
+
+ old_p = __get_valid_kprobe(p);
+ if (old_p == NULL)
+ return -EINVAL;
+
if (old_p == p ||
(old_p->pre_handler == aggr_pre_handler &&
list_is_singular(&old_p->list))) {
@@ -750,7 +789,7 @@ valid_p:
* enabled and not gone - otherwise, the breakpoint would
* already have been removed. We save on flushing icache.
*/
- if (kprobe_enabled && !kprobe_gone(old_p)) {
+ if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
mutex_lock(&text_mutex);
arch_disarm_kprobe(p);
mutex_unlock(&text_mutex);
@@ -768,6 +807,11 @@ valid_p:
}
noclean:
list_del_rcu(&p->list);
+ if (!kprobe_disabled(old_p)) {
+ try_to_disable_aggr_kprobe(old_p);
+ if (!kprobes_all_disarmed && kprobe_disabled(old_p))
+ arch_disarm_kprobe(old_p);
+ }
}
return 0;
}
@@ -803,11 +847,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_kprobes);
void __kprobes unregister_kprobe(struct kprobe *p)
{
unregister_kprobes(&p, 1);
}
+EXPORT_SYMBOL_GPL(unregister_kprobe);
void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
@@ -826,6 +872,7 @@ void __kprobes unregister_kprobes(struct kprobe **kps, int num)
if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]);
}
+EXPORT_SYMBOL_GPL(unregister_kprobes);
static struct notifier_block kprobe_exceptions_nb = {
.notifier_call = kprobe_exceptions_notify,
@@ -865,16 +912,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_jprobes);
int __kprobes register_jprobe(struct jprobe *jp)
{
return register_jprobes(&jp, 1);
}
+EXPORT_SYMBOL_GPL(register_jprobe);
void __kprobes unregister_jprobe(struct jprobe *jp)
{
unregister_jprobes(&jp, 1);
}
+EXPORT_SYMBOL_GPL(unregister_jprobe);
void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
@@ -894,6 +944,7 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num)
__unregister_kprobe_bottom(&jps[i]->kp);
}
}
+EXPORT_SYMBOL_GPL(unregister_jprobes);
#ifdef CONFIG_KRETPROBES
/*
@@ -987,6 +1038,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
free_rp_inst(rp);
return ret;
}
+EXPORT_SYMBOL_GPL(register_kretprobe);
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
@@ -1004,11 +1056,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_kretprobes);
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
unregister_kretprobes(&rp, 1);
}
+EXPORT_SYMBOL_GPL(unregister_kretprobe);
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
@@ -1030,24 +1084,30 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
}
}
}
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
#else /* CONFIG_KRETPROBES */
int __kprobes register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(register_kretprobe);
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(register_kretprobes);
+
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
}
+EXPORT_SYMBOL_GPL(unregister_kretprobe);
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
}
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
struct pt_regs *regs)
@@ -1061,6 +1121,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
static void __kprobes kill_kprobe(struct kprobe *p)
{
struct kprobe *kp;
+
p->flags |= KPROBE_FLAG_GONE;
if (p->pre_handler == aggr_pre_handler) {
/*
@@ -1173,8 +1234,8 @@ static int __init init_kprobes(void)
}
}
- /* By default, kprobes are enabled */
- kprobe_enabled = true;
+ /* By default, kprobes are armed */
+ kprobes_all_disarmed = false;
err = arch_init_kprobes();
if (!err)
@@ -1202,12 +1263,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
else
kprobe_type = "k";
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type,
- sym, offset, (modname ? modname : " "),
- (kprobe_gone(p) ? "[GONE]" : ""));
+ seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
+ p->addr, kprobe_type, sym, offset,
+ (modname ? modname : " "),
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ?
+ "[DISABLED]" : ""));
else
- seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr,
- (kprobe_gone(p) ? "[GONE]" : ""));
+ seq_printf(pi, "%p %s %p %s%s\n",
+ p->addr, kprobe_type, p->addr,
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ?
+ "[DISABLED]" : ""));
}
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1272,7 +1339,72 @@ static struct file_operations debugfs_kprobes_operations = {
.release = seq_release,
};
-static void __kprobes enable_all_kprobes(void)
+/* Disable one kprobe */
+int __kprobes disable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If the probe is already disabled (or gone), just return */
+ if (kprobe_disabled(kp))
+ goto out;
+
+ kp->flags |= KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ /* When kp != p, p is always enabled. */
+ try_to_disable_aggr_kprobe(p);
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ arch_disarm_kprobe(p);
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(disable_kprobe);
+
+/* Enable one kprobe */
+int __kprobes enable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (kprobe_gone(kp)) {
+ /* This kprobe has gone, we couldn't enable it. */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ arch_arm_kprobe(p);
+
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ kp->flags &= ~KPROBE_FLAG_DISABLED;
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(enable_kprobe);
+
+static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -1281,20 +1413,20 @@ static void __kprobes enable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
- /* If kprobes are already enabled, just return */
- if (kprobe_enabled)
+ /* If kprobes are armed, just return */
+ if (!kprobes_all_disarmed)
goto already_enabled;
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
- if (!kprobe_gone(p))
+ if (!kprobe_disabled(p))
arch_arm_kprobe(p);
}
mutex_unlock(&text_mutex);
- kprobe_enabled = true;
+ kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n");
already_enabled:
@@ -1302,7 +1434,7 @@ already_enabled:
return;
}
-static void __kprobes disable_all_kprobes(void)
+static void __kprobes disarm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -1311,17 +1443,17 @@ static void __kprobes disable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
- /* If kprobes are already disabled, just return */
- if (!kprobe_enabled)
+ /* If kprobes are already disarmed, just return */
+ if (kprobes_all_disarmed)
goto already_disabled;
- kprobe_enabled = false;
+ kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_gone(p))
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
arch_disarm_kprobe(p);
}
}
@@ -1347,7 +1479,7 @@ static ssize_t read_enabled_file_bool(struct file *file,
{
char buf[3];
- if (kprobe_enabled)
+ if (!kprobes_all_disarmed)
buf[0] = '1';
else
buf[0] = '0';
@@ -1370,12 +1502,12 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
- enable_all_kprobes();
+ arm_all_kprobes();
break;
case 'n':
case 'N':
case '0':
- disable_all_kprobes();
+ disarm_all_kprobes();
break;
}
@@ -1418,16 +1550,5 @@ late_initcall(debugfs_kprobe_init);
module_init(init_kprobes);
-EXPORT_SYMBOL_GPL(register_kprobe);
-EXPORT_SYMBOL_GPL(unregister_kprobe);
-EXPORT_SYMBOL_GPL(register_kprobes);
-EXPORT_SYMBOL_GPL(unregister_kprobes);
-EXPORT_SYMBOL_GPL(register_jprobe);
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-EXPORT_SYMBOL_GPL(register_jprobes);
-EXPORT_SYMBOL_GPL(unregister_jprobes);
+/* defined in arch/.../kernel/kprobes.c */
EXPORT_SYMBOL_GPL(jprobe_return);
-EXPORT_SYMBOL_GPL(register_kretprobe);
-EXPORT_SYMBOL_GPL(unregister_kretprobe);
-EXPORT_SYMBOL_GPL(register_kretprobes);
-EXPORT_SYMBOL_GPL(unregister_kretprobes);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4fbc456f393..84bbadd4d02 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -110,7 +110,7 @@ static void create_kthread(struct kthread_create_info *create)
*/
sched_setscheduler(create->result, SCHED_NORMAL, &param);
set_user_nice(create->result, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(create->result, cpu_all_mask);
}
complete(&create->done);
}
@@ -240,7 +240,7 @@ int kthreadd(void *unused)
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(tsk, cpu_all_mask);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 449db466bdb..ca07c5c0c91 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -9,6 +9,44 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
+
+/*
+ * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
+ * used by the "latencytop" userspace tool. The latency that is tracked is not
+ * the 'traditional' interrupt latency (which is primarily caused by something
+ * else consuming CPU), but instead, it is the latency an application encounters
+ * because the kernel sleeps on its behalf for various reasons.
+ *
+ * This code tracks 2 levels of statistics:
+ * 1) System level latency
+ * 2) Per process latency
+ *
+ * The latency is stored in fixed sized data structures in an accumulated form;
+ * if the "same" latency cause is hit twice, this will be tracked as one entry
+ * in the data structure. Both the count, total accumulated latency and maximum
+ * latency are tracked in this data structure. When the fixed size structure is
+ * full, no new causes are tracked until the buffer is flushed by writing to
+ * the /proc file; the userspace tool does this on a regular basis.
+ *
+ * A latency cause is identified by a stringified backtrace at the point that
+ * the scheduler gets invoked. The userland tool will use this string to
+ * identify the cause of the latency in human readable form.
+ *
+ * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
+ * These files look like this:
+ *
+ * Latency Top version : v0.1
+ * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
+ * | | | |
+ * | | | +----> the stringified backtrace
+ * | | +---------> The maximum latency for this entry in microseconds
+ * | +--------------> The accumulated latency for this entry (microseconds)
+ * +-------------------> The number of times this entry is hit
+ *
+ * (note: the average latency is the accumulated latency divided by the number
+ * of times)
+ */
+
#include <linux/latencytop.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
firstnonnull = i;
continue;
}
- for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
+ for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
unsigned long record = lat->backtrace[q];
if (latency_record[i].backtrace[q] != record) {
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
}
-static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
+/*
+ * Iterator to store a backtrace into a latency record entry
+ */
+static inline void store_stacktrace(struct task_struct *tsk,
+ struct latency_record *lat)
{
struct stack_trace trace;
memset(&trace, 0, sizeof(trace));
trace.max_entries = LT_BACKTRACEDEPTH;
trace.entries = &lat->backtrace[0];
- trace.skip = 0;
save_stack_trace_tsk(tsk, &trace);
}
+/**
+ * __account_scheduler_latency - record an occured latency
+ * @tsk - the task struct of the task hitting the latency
+ * @usecs - the duration of the latency in microseconds
+ * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
+ *
+ * This function is the main entry point for recording latency entries
+ * as called by the scheduler.
+ *
+ * This function has a few special cases to deal with normal 'non-latency'
+ * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
+ * since this usually is caused by waiting for events via select() and co.
+ *
+ * Negative latencies (caused by time going backwards) are also explicitly
+ * skipped.
+ */
void __sched
-account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
+__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
{
unsigned long flags;
int i, q;
struct latency_record lat;
- if (!latencytop_enabled)
- return;
-
/* Long interruptible waits are generally user requested... */
if (inter && usecs > 5000)
return;
+ /* Negative sleeps are time going backwards */
+ /* Zero-time sleeps are non-interesting */
+ if (usecs <= 0)
+ return;
+
memset(&lat, 0, sizeof(lat));
lat.count = 1;
lat.time = usecs;
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
if (tsk->latency_record_count >= LT_SAVECOUNT)
goto out_unlock;
- for (i = 0; i < LT_SAVECOUNT ; i++) {
+ for (i = 0; i < LT_SAVECOUNT; i++) {
struct latency_record *mylat;
int same = 1;
mylat = &tsk->latency_record[i];
- for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
+ for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
unsigned long record = lat.backtrace[q];
if (mylat->backtrace[q] != record) {
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
for (i = 0; i < MAXLR; i++) {
if (latency_record[i].backtrace[0]) {
int q;
- seq_printf(m, "%i %li %li ",
+ seq_printf(m, "%i %lu %lu ",
latency_record[i].count,
latency_record[i].time,
latency_record[i].max);
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
return single_open(filp, lstats_show, NULL);
}
-static struct file_operations lstats_fops = {
+static const struct file_operations lstats_fops = {
.open = lstats_open,
.read = seq_read,
.write = lstats_write,
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
proc_create("latency_stats", 0644, NULL, &lstats_fops);
return 0;
}
-__initcall(init_lstats_procfs);
+device_initcall(init_lstats_procfs);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 71b567f5281..b0f01186696 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -793,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return NULL;
}
class = lock_classes + nr_lock_classes++;
@@ -856,6 +857,7 @@ static struct lock_list *alloc_list_entry(void)
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return NULL;
}
return list_entries + nr_list_entries++;
@@ -1682,6 +1684,7 @@ cache_hit:
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return 0;
}
chain = lock_chains + nr_lock_chains++;
@@ -2255,7 +2258,7 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(&redundant_softirqs_off);
}
-void lockdep_trace_alloc(gfp_t gfp_mask)
+static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
{
struct task_struct *curr = current;
@@ -2274,12 +2277,29 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
if (!(gfp_mask & __GFP_FS))
return;
- if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
return;
mark_held_locks(curr, RECLAIM_FS);
}
+static void check_flags(unsigned long flags);
+
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+ unsigned long flags;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+ current->lockdep_recursion = 1;
+ __lockdep_trace_alloc(gfp_mask, flags);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+
static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
/*
@@ -2524,6 +2544,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
debug_locks_off();
printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return 0;
}
@@ -2620,6 +2641,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
debug_locks_off();
printk("BUG: MAX_LOCK_DEPTH too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return 0;
}
diff --git a/kernel/module.c b/kernel/module.c
index 7fa134e0cc2..05f014efa32 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -68,7 +68,8 @@
/* List of modules, protected by module_mutex or preempt_disable
* (delete uses stop_machine/add uses RCU list operations). */
-static DEFINE_MUTEX(module_mutex);
+DEFINE_MUTEX(module_mutex);
+EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
/* Waiting for a module to finish initializing? */
@@ -76,7 +77,7 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
-/* Bounds of module allocation, for speeding __module_text_address */
+/* Bounds of module allocation, for speeding __module_address */
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
int register_module_notifier(struct notifier_block * nb)
@@ -186,17 +187,6 @@ extern const unsigned long __start___kcrctab_unused_gpl[];
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
#endif
-struct symsearch {
- const struct kernel_symbol *start, *stop;
- const unsigned long *crcs;
- enum {
- NOT_GPL_ONLY,
- GPL_ONLY,
- WILL_BE_GPL_ONLY,
- } licence;
- bool unused;
-};
-
static bool each_symbol_in_section(const struct symsearch *arr,
unsigned int arrsize,
struct module *owner,
@@ -217,10 +207,8 @@ static bool each_symbol_in_section(const struct symsearch *arr,
}
/* Returns true as soon as fn returns true, otherwise false. */
-static bool each_symbol(bool (*fn)(const struct symsearch *arr,
- struct module *owner,
- unsigned int symnum, void *data),
- void *data)
+bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
+ unsigned int symnum, void *data), void *data)
{
struct module *mod;
const struct symsearch arr[] = {
@@ -273,6 +261,7 @@ static bool each_symbol(bool (*fn)(const struct symsearch *arr,
}
return false;
}
+EXPORT_SYMBOL_GPL(each_symbol);
struct find_symbol_arg {
/* Input */
@@ -283,7 +272,7 @@ struct find_symbol_arg {
/* Output */
struct module *owner;
const unsigned long *crc;
- unsigned long value;
+ const struct kernel_symbol *sym;
};
static bool find_symbol_in_section(const struct symsearch *syms,
@@ -324,17 +313,17 @@ static bool find_symbol_in_section(const struct symsearch *syms,
fsa->owner = owner;
fsa->crc = symversion(syms->crcs, symnum);
- fsa->value = syms->start[symnum].value;
+ fsa->sym = &syms->start[symnum];
return true;
}
-/* Find a symbol, return value, (optional) crc and (optional) module
- * which owns it */
-static unsigned long find_symbol(const char *name,
- struct module **owner,
- const unsigned long **crc,
- bool gplok,
- bool warn)
+/* Find a symbol and return it, along with, (optional) crc and
+ * (optional) module which owns it */
+const struct kernel_symbol *find_symbol(const char *name,
+ struct module **owner,
+ const unsigned long **crc,
+ bool gplok,
+ bool warn)
{
struct find_symbol_arg fsa;
@@ -347,15 +336,16 @@ static unsigned long find_symbol(const char *name,
*owner = fsa.owner;
if (crc)
*crc = fsa.crc;
- return fsa.value;
+ return fsa.sym;
}
DEBUGP("Failed to find symbol %s\n", name);
- return -ENOENT;
+ return NULL;
}
+EXPORT_SYMBOL_GPL(find_symbol);
/* Search for module by name: must hold module_mutex. */
-static struct module *find_module(const char *name)
+struct module *find_module(const char *name)
{
struct module *mod;
@@ -365,6 +355,7 @@ static struct module *find_module(const char *name)
}
return NULL;
}
+EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
@@ -641,7 +632,7 @@ static int already_uses(struct module *a, struct module *b)
}
/* Module a uses b */
-static int use_module(struct module *a, struct module *b)
+int use_module(struct module *a, struct module *b)
{
struct module_use *use;
int no_warn, err;
@@ -674,6 +665,7 @@ static int use_module(struct module *a, struct module *b)
no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
return 1;
}
+EXPORT_SYMBOL_GPL(use_module);
/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
@@ -856,7 +848,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
mutex_lock(&module_mutex);
/* Store the name of the last unloaded module for diagnostic purposes */
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
- unregister_dynamic_debug_module(mod->name);
+ ddebug_remove_module(mod->name);
free_module(mod);
out:
@@ -894,7 +886,7 @@ void __symbol_put(const char *symbol)
struct module *owner;
preempt_disable();
- if (IS_ERR_VALUE(find_symbol(symbol, &owner, NULL, true, false)))
+ if (!find_symbol(symbol, &owner, NULL, true, false))
BUG();
module_put(owner);
preempt_enable();
@@ -908,8 +900,10 @@ void symbol_put_addr(void *addr)
if (core_kernel_text((unsigned long)addr))
return;
- if (!(modaddr = module_text_address((unsigned long)addr)))
- BUG();
+ /* module_text_address is safe here: we're supposed to have reference
+ * to module from symbol_get, so it can't go away. */
+ modaddr = __module_text_address((unsigned long)addr);
+ BUG_ON(!modaddr);
module_put(modaddr);
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
@@ -949,10 +943,11 @@ static inline void module_unload_free(struct module *mod)
{
}
-static inline int use_module(struct module *a, struct module *b)
+int use_module(struct module *a, struct module *b)
{
return strong_try_module_get(b) == 0;
}
+EXPORT_SYMBOL_GPL(use_module);
static inline void module_unload_init(struct module *mod)
{
@@ -995,12 +990,12 @@ static struct module_attribute *modinfo_attrs[] = {
static const char vermagic[] = VERMAGIC_STRING;
-static int try_to_force_load(struct module *mod, const char *symname)
+static int try_to_force_load(struct module *mod, const char *reason)
{
#ifdef CONFIG_MODULE_FORCE_LOAD
if (!test_taint(TAINT_FORCED_MODULE))
- printk("%s: no version for \"%s\" found: kernel tainted.\n",
- mod->name, symname);
+ printk(KERN_WARNING "%s: %s: kernel tainted.\n",
+ mod->name, reason);
add_taint_module(mod, TAINT_FORCED_MODULE);
return 0;
#else
@@ -1057,9 +1052,9 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
{
const unsigned long *crc;
- if (IS_ERR_VALUE(find_symbol("struct_module", NULL, &crc, true, false)))
+ if (!find_symbol("module_layout", NULL, &crc, true, false))
BUG();
- return check_version(sechdrs, versindex, "struct_module", mod, crc);
+ return check_version(sechdrs, versindex, "module_layout", mod, crc);
}
/* First part is kernel version, which we ignore if module has crcs. */
@@ -1098,25 +1093,25 @@ static inline int same_magic(const char *amagic, const char *bmagic,
/* Resolve a symbol for this module. I.e. if we find one, record usage.
Must be holding module_mutex. */
-static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
- unsigned int versindex,
- const char *name,
- struct module *mod)
+static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
+ unsigned int versindex,
+ const char *name,
+ struct module *mod)
{
struct module *owner;
- unsigned long ret;
+ const struct kernel_symbol *sym;
const unsigned long *crc;
- ret = find_symbol(name, &owner, &crc,
+ sym = find_symbol(name, &owner, &crc,
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
- if (!IS_ERR_VALUE(ret)) {
- /* use_module can fail due to OOM,
- or module initialization or unloading */
+ /* use_module can fail due to OOM,
+ or module initialization or unloading */
+ if (sym) {
if (!check_version(sechdrs, versindex, name, mod, crc) ||
!use_module(mod, owner))
- ret = -EINVAL;
+ sym = NULL;
}
- return ret;
+ return sym;
}
/*
@@ -1491,6 +1486,9 @@ static void free_module(struct module *mod)
/* Module unload stuff */
module_unload_free(mod);
+ /* Free any allocated parameters. */
+ destroy_params(mod->kp, mod->num_kp);
+
/* release any pointers to mcount in this module */
ftrace_release(mod->module_core, mod->core_size);
@@ -1513,17 +1511,15 @@ static void free_module(struct module *mod)
void *__symbol_get(const char *symbol)
{
struct module *owner;
- unsigned long value;
+ const struct kernel_symbol *sym;
preempt_disable();
- value = find_symbol(symbol, &owner, NULL, true, true);
- if (IS_ERR_VALUE(value))
- value = 0;
- else if (strong_try_module_get(owner))
- value = 0;
+ sym = find_symbol(symbol, &owner, NULL, true, true);
+ if (sym && strong_try_module_get(owner))
+ sym = NULL;
preempt_enable();
- return (void *)value;
+ return sym ? (void *)sym->value : NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);
@@ -1551,8 +1547,7 @@ static int verify_export_symbols(struct module *mod)
for (i = 0; i < ARRAY_SIZE(arr); i++) {
for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
- if (!IS_ERR_VALUE(find_symbol(s->name, &owner,
- NULL, true, false))) {
+ if (find_symbol(s->name, &owner, NULL, true, false)) {
printk(KERN_ERR
"%s: exports duplicate symbol %s"
" (owned by %s)\n",
@@ -1576,6 +1571,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned long secbase;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
int ret = 0;
+ const struct kernel_symbol *ksym;
for (i = 1; i < n; i++) {
switch (sym[i].st_shndx) {
@@ -1595,13 +1591,14 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
break;
case SHN_UNDEF:
- sym[i].st_value
- = resolve_symbol(sechdrs, versindex,
- strtab + sym[i].st_name, mod);
-
+ ksym = resolve_symbol(sechdrs, versindex,
+ strtab + sym[i].st_name, mod);
/* Ok if resolved. */
- if (!IS_ERR_VALUE(sym[i].st_value))
+ if (ksym) {
+ sym[i].st_value = ksym->value;
break;
+ }
+
/* Ok if weak. */
if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
break;
@@ -1676,8 +1673,7 @@ static void layout_sections(struct module *mod,
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || strncmp(secstrings + s->sh_name,
- ".init", 5) == 0)
+ || strstarts(secstrings + s->sh_name, ".init"))
continue;
s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
DEBUGP("\t%s\n", secstrings + s->sh_name);
@@ -1694,8 +1690,7 @@ static void layout_sections(struct module *mod,
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || strncmp(secstrings + s->sh_name,
- ".init", 5) != 0)
+ || !strstarts(secstrings + s->sh_name, ".init"))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
| INIT_OFFSET_MASK);
@@ -1828,8 +1823,7 @@ static char elf_type(const Elf_Sym *sym,
else
return 'b';
}
- if (strncmp(secstrings + sechdrs[sym->st_shndx].sh_name,
- ".debug", strlen(".debug")) == 0)
+ if (strstarts(secstrings + sechdrs[sym->st_shndx].sh_name, ".debug"))
return 'n';
return '?';
}
@@ -1861,19 +1855,13 @@ static inline void add_kallsyms(struct module *mod,
}
#endif /* CONFIG_KALLSYMS */
-static void dynamic_printk_setup(struct mod_debug *debug, unsigned int num)
+static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
{
-#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
- unsigned int i;
-
- for (i = 0; i < num; i++) {
- register_dynamic_debug_module(debug[i].modname,
- debug[i].type,
- debug[i].logical_modname,
- debug[i].flag_names,
- debug[i].hash, debug[i].hash2);
- }
-#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
+#ifdef CONFIG_DYNAMIC_DEBUG
+ if (ddebug_add_module(debug, num, debug->modname))
+ printk(KERN_ERR "dynamic debug error adding module: %s\n",
+ debug->modname);
+#endif
}
static void *module_alloc_update_bounds(unsigned long size)
@@ -1904,8 +1892,7 @@ static noinline struct module *load_module(void __user *umod,
unsigned int symindex = 0;
unsigned int strindex = 0;
unsigned int modindex, versindex, infoindex, pcpuindex;
- unsigned int num_kp, num_mcount;
- struct kernel_param *kp;
+ unsigned int num_mcount;
struct module *mod;
long err = 0;
void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -1922,12 +1909,6 @@ static noinline struct module *load_module(void __user *umod,
if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
return ERR_PTR(-ENOMEM);
- /* Create stop_machine threads since the error path relies on
- * a non-failing stop_machine call. */
- err = stop_machine_create();
- if (err)
- goto free_hdr;
-
if (copy_from_user(hdr, umod, len) != 0) {
err = -EFAULT;
goto free_hdr;
@@ -1968,7 +1949,7 @@ static noinline struct module *load_module(void __user *umod,
}
#ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
- if (strncmp(secstrings+sechdrs[i].sh_name, ".exit", 5) == 0)
+ if (strstarts(secstrings+sechdrs[i].sh_name, ".exit"))
sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
#endif
}
@@ -2012,7 +1993,7 @@ static noinline struct module *load_module(void __user *umod,
modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
- err = try_to_force_load(mod, "magic");
+ err = try_to_force_load(mod, "bad vermagic");
if (err)
goto free_hdr;
} else if (!same_magic(modmagic, vermagic, versindex)) {
@@ -2150,8 +2131,8 @@ static noinline struct module *load_module(void __user *umod,
/* Now we've got everything in the final locations, we can
* find optional sections. */
- kp = section_objs(hdr, sechdrs, secstrings, "__param", sizeof(*kp),
- &num_kp);
+ mod->kp = section_objs(hdr, sechdrs, secstrings, "__param",
+ sizeof(*mod->kp), &mod->num_kp);
mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab",
sizeof(*mod->syms), &mod->num_syms);
mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab");
@@ -2201,8 +2182,8 @@ static noinline struct module *load_module(void __user *umod,
|| (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
#endif
) {
- printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
- err = try_to_force_load(mod, "nocrc");
+ err = try_to_force_load(mod,
+ "no versions for exported symbols");
if (err)
goto cleanup;
}
@@ -2247,12 +2228,13 @@ static noinline struct module *load_module(void __user *umod,
add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
if (!mod->taints) {
- struct mod_debug *debug;
+ struct _ddebug *debug;
unsigned int num_debug;
debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
sizeof(*debug), &num_debug);
- dynamic_printk_setup(debug, num_debug);
+ if (debug)
+ dynamic_debug_setup(debug, num_debug);
}
/* sechdrs[0].sh_size is always zero */
@@ -2296,11 +2278,11 @@ static noinline struct module *load_module(void __user *umod,
*/
list_add_rcu(&mod->list, &modules);
- err = parse_args(mod->name, mod->args, kp, num_kp, NULL);
+ err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
if (err < 0)
goto unlink;
- err = mod_sysfs_setup(mod, kp, num_kp);
+ err = mod_sysfs_setup(mod, mod->kp, mod->num_kp);
if (err < 0)
goto unlink;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
@@ -2309,12 +2291,13 @@ static noinline struct module *load_module(void __user *umod,
/* Get rid of temporary copy */
vfree(hdr);
- stop_machine_destroy();
/* Done! */
return mod;
unlink:
- stop_machine(__unlink_module, mod, NULL);
+ /* Unlink carefully: kallsyms could be walking list. */
+ list_del_rcu(&mod->list);
+ synchronize_sched();
module_arch_cleanup(mod);
cleanup:
kobject_del(&mod->mkobj.kobj);
@@ -2322,8 +2305,8 @@ static noinline struct module *load_module(void __user *umod,
ftrace_release(mod->module_core, mod->core_size);
free_unload:
module_unload_free(mod);
- free_init:
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+ free_init:
percpu_modfree(mod->refptr);
#endif
module_free(mod, mod->module_init);
@@ -2337,7 +2320,6 @@ static noinline struct module *load_module(void __user *umod,
kfree(args);
free_hdr:
vfree(hdr);
- stop_machine_destroy();
return ERR_PTR(err);
truncated:
@@ -2614,6 +2596,25 @@ unsigned long module_kallsyms_lookup_name(const char *name)
preempt_enable();
return ret;
}
+
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data)
+{
+ struct module *mod;
+ unsigned int i;
+ int ret;
+
+ list_for_each_entry(mod, &modules, list) {
+ for (i = 0; i < mod->num_symtab; i++) {
+ ret = fn(data, mod->strtab + mod->symtab[i].st_name,
+ mod, mod->symtab[i].st_value);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ return 0;
+}
#endif /* CONFIG_KALLSYMS */
static char *module_flags(struct module *mod, char *buf)
@@ -2749,29 +2750,31 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
}
/*
- * Is this a valid module address?
+ * is_module_address - is this address inside a module?
+ * @addr: the address to check.
+ *
+ * See is_module_text_address() if you simply want to see if the address
+ * is code (not data).
*/
-int is_module_address(unsigned long addr)
+bool is_module_address(unsigned long addr)
{
- struct module *mod;
+ bool ret;
preempt_disable();
-
- list_for_each_entry_rcu(mod, &modules, list) {
- if (within_module_core(addr, mod)) {
- preempt_enable();
- return 1;
- }
- }
-
+ ret = __module_address(addr) != NULL;
preempt_enable();
- return 0;
+ return ret;
}
-
-/* Is this a valid kernel address? */
-struct module *__module_text_address(unsigned long addr)
+/*
+ * __module_address - get the module which contains an address.
+ * @addr: the address.
+ *
+ * Must be called with preempt disabled or module mutex held so that
+ * module doesn't get freed during this.
+ */
+struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -2779,22 +2782,51 @@ struct module *__module_text_address(unsigned long addr)
return NULL;
list_for_each_entry_rcu(mod, &modules, list)
- if (within(addr, mod->module_init, mod->init_text_size)
- || within(addr, mod->module_core, mod->core_text_size))
+ if (within_module_core(addr, mod)
+ || within_module_init(addr, mod))
return mod;
return NULL;
}
+EXPORT_SYMBOL_GPL(__module_address);
-struct module *module_text_address(unsigned long addr)
+/*
+ * is_module_text_address - is this address inside module code?
+ * @addr: the address to check.
+ *
+ * See is_module_address() if you simply want to see if the address is
+ * anywhere in a module. See kernel_text_address() for testing if an
+ * address corresponds to kernel or module code.
+ */
+bool is_module_text_address(unsigned long addr)
{
- struct module *mod;
+ bool ret;
preempt_disable();
- mod = __module_text_address(addr);
+ ret = __module_text_address(addr) != NULL;
preempt_enable();
+ return ret;
+}
+
+/*
+ * __module_text_address - get the module whose code contains an address.
+ * @addr: the address.
+ *
+ * Must be called with preempt disabled or module mutex held so that
+ * module doesn't get freed during this.
+ */
+struct module *__module_text_address(unsigned long addr)
+{
+ struct module *mod = __module_address(addr);
+ if (mod) {
+ /* Make sure it's within the text section. */
+ if (!within(addr, mod->module_init, mod->init_text_size)
+ && !within(addr, mod->module_core, mod->core_text_size))
+ mod = NULL;
+ }
return mod;
}
+EXPORT_SYMBOL_GPL(__module_text_address);
/* Don't grab lock, we're oopsing. */
void print_modules(void)
@@ -2814,9 +2846,17 @@ void print_modules(void)
}
#ifdef CONFIG_MODVERSIONS
-/* Generate the signature for struct module here, too, for modversions. */
-void struct_module(struct module *mod) { return; }
-EXPORT_SYMBOL(struct_module);
+/* Generate the signature for all relevant module structures here.
+ * If these change, we don't want to try to parse the module. */
+void module_layout(struct module *mod,
+ struct modversion_info *ver,
+ struct kernel_param *kp,
+ struct kernel_symbol *ks,
+ struct marker *marker,
+ struct tracepoint *tp)
+{
+}
+EXPORT_SYMBOL(module_layout);
#endif
#ifdef CONFIG_MARKERS
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 78bc3fdac0d..5aa854f9e5a 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -34,7 +34,7 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
/*
* Rules:
- * 1. you can only enter a cgroup which is a child of your current
+ * 1. you can only enter a cgroup which is a descendant of your current
* cgroup
* 2. you can only place another process into a cgroup if
* a. you have CAP_SYS_ADMIN
@@ -45,21 +45,15 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
static int ns_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup, struct task_struct *task)
{
- struct cgroup *orig;
-
if (current != task) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!cgroup_is_descendant(new_cgroup))
+ if (!cgroup_is_descendant(new_cgroup, current))
return -EPERM;
}
- if (atomic_read(&new_cgroup->count) != 0)
- return -EPERM;
-
- orig = task_cgroup(task, ns_subsys_id);
- if (orig && orig != new_cgroup->parent)
+ if (!cgroup_is_descendant(new_cgroup, task))
return -EPERM;
return 0;
@@ -77,7 +71,7 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
- if (!cgroup_is_descendant(cgroup))
+ if (!cgroup_is_descendant(cgroup, current))
return ERR_PTR(-EPERM);
ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
diff --git a/kernel/panic.c b/kernel/panic.c
index 32fe4eff1b8..3fd8c5bf8b3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -8,19 +8,19 @@
* This function is used through-out the kernel (including mm and fs)
* to indicate a major problem.
*/
+#include <linux/debug_locks.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
+#include <linux/random.h>
#include <linux/reboot.h>
-#include <linux/notifier.h>
-#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kexec.h>
+#include <linux/sched.h>
#include <linux/sysrq.h>
-#include <linux/interrupt.h>
+#include <linux/init.h>
#include <linux/nmi.h>
-#include <linux/kexec.h>
-#include <linux/debug_locks.h>
-#include <linux/random.h>
-#include <linux/kallsyms.h>
#include <linux/dmi.h>
int panic_on_oops;
@@ -52,19 +52,15 @@ EXPORT_SYMBOL(panic_blink);
*
* This function never returns.
*/
-
NORET_TYPE void panic(const char * fmt, ...)
{
- long i;
static char buf[1024];
va_list args;
-#if defined(CONFIG_S390)
- unsigned long caller = (unsigned long) __builtin_return_address(0);
-#endif
+ long i;
/*
- * It's possible to come here directly from a panic-assertion and not
- * have preempt disabled. Some functions called from here want
+ * It's possible to come here directly from a panic-assertion and
+ * not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
*/
preempt_disable();
@@ -77,7 +73,6 @@ NORET_TYPE void panic(const char * fmt, ...)
#ifdef CONFIG_DEBUG_BUGVERBOSE
dump_stack();
#endif
- bust_spinlocks(0);
/*
* If we have crashed and we have a crash kernel loaded let it handle
@@ -86,14 +81,12 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
crash_kexec(NULL);
-#ifdef CONFIG_SMP
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
* situation.
*/
smp_send_stop();
-#endif
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
@@ -102,19 +95,21 @@ NORET_TYPE void panic(const char * fmt, ...)
if (panic_timeout > 0) {
/*
- * Delay timeout seconds before rebooting the machine.
- * We can't use the "normal" timers since we just panicked..
- */
- printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
+ * Delay timeout seconds before rebooting the machine.
+ * We can't use the "normal" timers since we just panicked.
+ */
+ printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
+
for (i = 0; i < panic_timeout*1000; ) {
touch_nmi_watchdog();
i += panic_blink(i);
mdelay(1);
i++;
}
- /* This will not be a clean reboot, with everything
- * shutting down. But if there is a chance of
- * rebooting the system it will be rebooted.
+ /*
+ * This will not be a clean reboot, with everything
+ * shutting down. But if there is a chance of
+ * rebooting the system it will be rebooted.
*/
emergency_restart();
}
@@ -127,38 +122,44 @@ NORET_TYPE void panic(const char * fmt, ...)
}
#endif
#if defined(CONFIG_S390)
- disabled_wait(caller);
+ {
+ unsigned long caller;
+
+ caller = (unsigned long)__builtin_return_address(0);
+ disabled_wait(caller);
+ }
#endif
local_irq_enable();
- for (i = 0;;) {
+ for (i = 0; ; ) {
touch_softlockup_watchdog();
i += panic_blink(i);
mdelay(1);
i++;
}
+ bust_spinlocks(0);
}
EXPORT_SYMBOL(panic);
struct tnt {
- u8 bit;
- char true;
- char false;
+ u8 bit;
+ char true;
+ char false;
};
static const struct tnt tnts[] = {
- { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
- { TAINT_FORCED_MODULE, 'F', ' ' },
- { TAINT_UNSAFE_SMP, 'S', ' ' },
- { TAINT_FORCED_RMMOD, 'R', ' ' },
- { TAINT_MACHINE_CHECK, 'M', ' ' },
- { TAINT_BAD_PAGE, 'B', ' ' },
- { TAINT_USER, 'U', ' ' },
- { TAINT_DIE, 'D', ' ' },
- { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
- { TAINT_WARN, 'W', ' ' },
- { TAINT_CRAP, 'C', ' ' },
+ { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
+ { TAINT_FORCED_MODULE, 'F', ' ' },
+ { TAINT_UNSAFE_SMP, 'S', ' ' },
+ { TAINT_FORCED_RMMOD, 'R', ' ' },
+ { TAINT_MACHINE_CHECK, 'M', ' ' },
+ { TAINT_BAD_PAGE, 'B', ' ' },
+ { TAINT_USER, 'U', ' ' },
+ { TAINT_DIE, 'D', ' ' },
+ { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
+ { TAINT_WARN, 'W', ' ' },
+ { TAINT_CRAP, 'C', ' ' },
};
/**
@@ -195,7 +196,8 @@ const char *print_tainted(void)
*s = 0;
} else
snprintf(buf, sizeof(buf), "Not tainted");
- return(buf);
+
+ return buf;
}
int test_taint(unsigned flag)
@@ -211,7 +213,8 @@ unsigned long get_taint(void)
void add_taint(unsigned flag)
{
- debug_locks = 0; /* can't trust the integrity of the kernel anymore */
+ /* can't trust the integrity of the kernel anymore: */
+ debug_locks = 0;
set_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(add_taint);
@@ -266,8 +269,8 @@ static void do_oops_enter_exit(void)
}
/*
- * Return true if the calling CPU is allowed to print oops-related info. This
- * is a bit racy..
+ * Return true if the calling CPU is allowed to print oops-related info.
+ * This is a bit racy..
*/
int oops_may_print(void)
{
@@ -276,20 +279,22 @@ int oops_may_print(void)
/*
* Called when the architecture enters its oops handler, before it prints
- * anything. If this is the first CPU to oops, and it's oopsing the first time
- * then let it proceed.
+ * anything. If this is the first CPU to oops, and it's oopsing the first
+ * time then let it proceed.
*
- * This is all enabled by the pause_on_oops kernel boot option. We do all this
- * to ensure that oopses don't scroll off the screen. It has the side-effect
- * of preventing later-oopsing CPUs from mucking up the display, too.
+ * This is all enabled by the pause_on_oops kernel boot option. We do all
+ * this to ensure that oopses don't scroll off the screen. It has the
+ * side-effect of preventing later-oopsing CPUs from mucking up the display,
+ * too.
*
- * It turns out that the CPU which is allowed to print ends up pausing for the
- * right duration, whereas all the other CPUs pause for twice as long: once in
- * oops_enter(), once in oops_exit().
+ * It turns out that the CPU which is allowed to print ends up pausing for
+ * the right duration, whereas all the other CPUs pause for twice as long:
+ * once in oops_enter(), once in oops_exit().
*/
void oops_enter(void)
{
- debug_locks_off(); /* can't trust the integrity of the kernel anymore */
+ /* can't trust the integrity of the kernel anymore: */
+ debug_locks_off();
do_oops_enter_exit();
}
diff --git a/kernel/params.c b/kernel/params.c
index a1e3025b19a..de273ec85bd 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,6 +24,9 @@
#include <linux/err.h>
#include <linux/slab.h>
+/* We abuse the high bits of "perm" to record whether we kmalloc'ed. */
+#define KPARAM_KMALLOCED 0x80000000
+
#if 0
#define DEBUGP printk
#else
@@ -217,7 +220,19 @@ int param_set_charp(const char *val, struct kernel_param *kp)
return -ENOSPC;
}
- *(char **)kp->arg = (char *)val;
+ if (kp->perm & KPARAM_KMALLOCED)
+ kfree(*(char **)kp->arg);
+
+ /* This is a hack. We can't need to strdup in early boot, and we
+ * don't need to; this mangled commandline is preserved. */
+ if (slab_is_available()) {
+ kp->perm |= KPARAM_KMALLOCED;
+ *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
+ if (!kp->arg)
+ return -ENOMEM;
+ } else
+ *(const char **)kp->arg = val;
+
return 0;
}
@@ -571,6 +586,15 @@ void module_param_sysfs_remove(struct module *mod)
}
#endif
+void destroy_params(const struct kernel_param *params, unsigned num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (params[i].perm & KPARAM_KMALLOCED)
+ kfree(*(char **)params[i].arg);
+}
+
static void __init kernel_add_sysfs_param(const char *name,
struct kernel_param *kparam,
unsigned int name_skip)
diff --git a/kernel/pid.c b/kernel/pid.c
index 1b3586fe753..b2e5f78fd28 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -403,6 +403,8 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
struct pid *pid;
rcu_read_lock();
+ if (type != PIDTYPE_PID)
+ task = task->group_leader;
pid = get_pid(task->pids[type].pid);
rcu_read_unlock();
return pid;
@@ -450,11 +452,24 @@ pid_t pid_vnr(struct pid *pid)
}
EXPORT_SYMBOL_GPL(pid_vnr);
-pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ struct pid_namespace *ns)
{
- return pid_nr_ns(task_pid(tsk), ns);
+ pid_t nr = 0;
+
+ rcu_read_lock();
+ if (!ns)
+ ns = current->nsproxy->pid_ns;
+ if (likely(pid_alive(task))) {
+ if (type != PIDTYPE_PID)
+ task = task->group_leader;
+ nr = pid_nr_ns(task->pids[type].pid, ns);
+ }
+ rcu_read_unlock();
+
+ return nr;
}
-EXPORT_SYMBOL(task_pid_nr_ns);
+EXPORT_SYMBOL(__task_pid_nr_ns);
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
@@ -462,18 +477,6 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
}
EXPORT_SYMBOL(task_tgid_nr_ns);
-pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return pid_nr_ns(task_pgrp(tsk), ns);
-}
-EXPORT_SYMBOL(task_pgrp_nr_ns);
-
-pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return pid_nr_ns(task_session(tsk), ns);
-}
-EXPORT_SYMBOL(task_session_nr_ns);
-
struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
{
return ns_of_pid(task_pid(tsk));
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index fab8ea86fac..2d1001b4858 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -152,6 +152,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
{
int nr;
int rc;
+ struct task_struct *task;
/*
* The last thread in the cgroup-init thread group is terminating.
@@ -169,7 +170,19 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
read_lock(&tasklist_lock);
nr = next_pidmap(pid_ns, 1);
while (nr > 0) {
- kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
+ rcu_read_lock();
+
+ /*
+ * Use force_sig() since it clears SIGNAL_UNKILLABLE ensuring
+ * any nested-container's init processes don't ignore the
+ * signal
+ */
+ task = pid_task(find_vpid(nr), PIDTYPE_PID);
+ if (task)
+ force_sig(SIGKILL, task);
+
+ rcu_read_unlock();
+
nr = next_pidmap(pid_ns, nr);
}
read_unlock(&tasklist_lock);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e976e505648..8e5d9a68b02 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
}
- return 0;
+
+ return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
}
/*
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 4a4a206b197..5f21ab2bbcd 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,6 +22,7 @@
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
+#include <asm/suspend.h>
#include "power.h"
@@ -214,7 +215,7 @@ static int create_image(int platform_mode)
return error;
device_pm_lock();
- local_irq_disable();
+
/* At this point, device_suspend() has been called, but *not*
* device_power_down(). We *must* call device_power_down() now.
* Otherwise, drivers for some devices (e.g. interrupt controllers)
@@ -225,13 +226,25 @@ static int create_image(int platform_mode)
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting hibernation\n");
- goto Enable_irqs;
+ goto Unlock;
}
+
+ error = platform_pre_snapshot(platform_mode);
+ if (error || hibernation_test(TEST_PLATFORM))
+ goto Platform_finish;
+
+ error = disable_nonboot_cpus();
+ if (error || hibernation_test(TEST_CPUS)
+ || hibernation_testmode(HIBERNATION_TEST))
+ goto Enable_cpus;
+
+ local_irq_disable();
+
sysdev_suspend(PMSG_FREEZE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting hibernation\n");
- goto Power_up_devices;
+ goto Enable_irqs;
}
if (hibernation_test(TEST_CORE))
@@ -247,17 +260,28 @@ static int create_image(int platform_mode)
restore_processor_state();
if (!in_suspend)
platform_leave(platform_mode);
+
Power_up:
sysdev_resume();
/* NOTE: device_power_up() is just a resume() for devices
* that suspended with irqs off ... no overall powerup.
*/
- Power_up_devices:
- device_power_up(in_suspend ?
- (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
+
Enable_irqs:
local_irq_enable();
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Platform_finish:
+ platform_finish(platform_mode);
+
+ device_power_up(in_suspend ?
+ (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
+
+ Unlock:
device_pm_unlock();
+
return error;
}
@@ -265,7 +289,7 @@ static int create_image(int platform_mode)
* hibernation_snapshot - quiesce devices and create the hibernation
* snapshot image.
* @platform_mode - if set, use the platform driver, if available, to
- * prepare the platform frimware for the power transition.
+ * prepare the platform firmware for the power transition.
*
* Must be called with pm_mutex held
*/
@@ -291,25 +315,9 @@ int hibernation_snapshot(int platform_mode)
if (hibernation_test(TEST_DEVICES))
goto Recover_platform;
- error = platform_pre_snapshot(platform_mode);
- if (error || hibernation_test(TEST_PLATFORM))
- goto Finish;
-
- error = disable_nonboot_cpus();
- if (!error) {
- if (hibernation_test(TEST_CPUS))
- goto Enable_cpus;
-
- if (hibernation_testmode(HIBERNATION_TEST))
- goto Enable_cpus;
+ error = create_image(platform_mode);
+ /* Control returns here after successful restore */
- error = create_image(platform_mode);
- /* Control returns here after successful restore */
- }
- Enable_cpus:
- enable_nonboot_cpus();
- Finish:
- platform_finish(platform_mode);
Resume_devices:
device_resume(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
@@ -331,19 +339,33 @@ int hibernation_snapshot(int platform_mode)
* kernel.
*/
-static int resume_target_kernel(void)
+static int resume_target_kernel(bool platform_mode)
{
int error;
device_pm_lock();
- local_irq_disable();
+
error = device_power_down(PMSG_QUIESCE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting resume\n");
- goto Enable_irqs;
+ goto Unlock;
}
- sysdev_suspend(PMSG_QUIESCE);
+
+ error = platform_pre_restore(platform_mode);
+ if (error)
+ goto Cleanup;
+
+ error = disable_nonboot_cpus();
+ if (error)
+ goto Enable_cpus;
+
+ local_irq_disable();
+
+ error = sysdev_suspend(PMSG_QUIESCE);
+ if (error)
+ goto Enable_irqs;
+
/* We'll ignore saved state, but this gets preempt count (etc) right */
save_processor_state();
error = restore_highmem();
@@ -366,11 +388,23 @@ static int resume_target_kernel(void)
swsusp_free();
restore_processor_state();
touch_softlockup_watchdog();
+
sysdev_resume();
- device_power_up(PMSG_RECOVER);
+
Enable_irqs:
local_irq_enable();
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Cleanup:
+ platform_restore_cleanup(platform_mode);
+
+ device_power_up(PMSG_RECOVER);
+
+ Unlock:
device_pm_unlock();
+
return error;
}
@@ -378,7 +412,7 @@ static int resume_target_kernel(void)
* hibernation_restore - quiesce devices and restore the hibernation
* snapshot image. If successful, control returns in hibernation_snaphot()
* @platform_mode - if set, use the platform driver, if available, to
- * prepare the platform frimware for the transition.
+ * prepare the platform firmware for the transition.
*
* Must be called with pm_mutex held
*/
@@ -390,19 +424,10 @@ int hibernation_restore(int platform_mode)
pm_prepare_console();
suspend_console();
error = device_suspend(PMSG_QUIESCE);
- if (error)
- goto Finish;
-
- error = platform_pre_restore(platform_mode);
if (!error) {
- error = disable_nonboot_cpus();
- if (!error)
- error = resume_target_kernel();
- enable_nonboot_cpus();
+ error = resume_target_kernel(platform_mode);
+ device_resume(PMSG_RECOVER);
}
- platform_restore_cleanup(platform_mode);
- device_resume(PMSG_RECOVER);
- Finish:
resume_console();
pm_restore_console();
return error;
@@ -438,38 +463,46 @@ int hibernation_platform_enter(void)
goto Resume_devices;
}
+ device_pm_lock();
+
+ error = device_power_down(PMSG_HIBERNATE);
+ if (error)
+ goto Unlock;
+
error = hibernation_ops->prepare();
if (error)
- goto Resume_devices;
+ goto Platofrm_finish;
error = disable_nonboot_cpus();
if (error)
- goto Finish;
+ goto Platofrm_finish;
- device_pm_lock();
local_irq_disable();
- error = device_power_down(PMSG_HIBERNATE);
- if (!error) {
- sysdev_suspend(PMSG_HIBERNATE);
- hibernation_ops->enter();
- /* We should never get here */
- while (1);
- }
- local_irq_enable();
- device_pm_unlock();
+ sysdev_suspend(PMSG_HIBERNATE);
+ hibernation_ops->enter();
+ /* We should never get here */
+ while (1);
/*
* We don't need to reenable the nonboot CPUs or resume consoles, since
* the system is going to be halted anyway.
*/
- Finish:
+ Platofrm_finish:
hibernation_ops->finish();
+
+ device_power_up(PMSG_RESTORE);
+
+ Unlock:
+ device_pm_unlock();
+
Resume_devices:
entering_platform_hibernation = false;
device_resume(PMSG_RESTORE);
resume_console();
+
Close:
hibernation_ops->end();
+
return error;
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index c9632f841f6..f172f41858b 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -287,17 +287,32 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
*/
static int suspend_enter(suspend_state_t state)
{
- int error = 0;
+ int error;
device_pm_lock();
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
- if ((error = device_power_down(PMSG_SUSPEND))) {
+ error = device_power_down(PMSG_SUSPEND);
+ if (error) {
printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Done;
}
+ if (suspend_ops->prepare) {
+ error = suspend_ops->prepare();
+ if (error)
+ goto Power_up_devices;
+ }
+
+ if (suspend_test(TEST_PLATFORM))
+ goto Platfrom_finish;
+
+ error = disable_nonboot_cpus();
+ if (error || suspend_test(TEST_CPUS))
+ goto Enable_cpus;
+
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
+
error = sysdev_suspend(PMSG_SUSPEND);
if (!error) {
if (!suspend_test(TEST_CORE))
@@ -305,11 +320,22 @@ static int suspend_enter(suspend_state_t state)
sysdev_resume();
}
- device_power_up(PMSG_RESUME);
- Done:
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Platfrom_finish:
+ if (suspend_ops->finish)
+ suspend_ops->finish();
+
+ Power_up_devices:
+ device_power_up(PMSG_RESUME);
+
+ Done:
device_pm_unlock();
+
return error;
}
@@ -341,23 +367,8 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- if (suspend_ops->prepare) {
- error = suspend_ops->prepare();
- if (error)
- goto Resume_devices;
- }
-
- if (suspend_test(TEST_PLATFORM))
- goto Finish;
-
- error = disable_nonboot_cpus();
- if (!error && !suspend_test(TEST_CPUS))
- suspend_enter(state);
+ suspend_enter(state);
- enable_nonboot_cpus();
- Finish:
- if (suspend_ops->finish)
- suspend_ops->finish();
Resume_devices:
suspend_test_start();
device_resume(PMSG_RESUME);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f5fc2d7680f..33e2e4a819f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
INIT_LIST_HEAD(list);
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
unsigned long zone_start, zone_end;
struct mem_extent *ext, *cur, *aux;
- if (!populated_zone(zone))
- continue;
-
zone_start = zone->zone_start_pfn;
zone_end = zone->zone_start_pfn + zone->spanned_pages;
@@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void)
struct zone *zone;
unsigned int cnt = 0;
- for_each_zone(zone)
- if (populated_zone(zone) && is_highmem(zone))
+ for_each_populated_zone(zone)
+ if (is_highmem(zone))
cnt += zone_page_state(zone, NR_FREE_PAGES);
return cnt;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index a92c9145155..78c35047586 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -51,6 +51,7 @@
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/rbtree.h>
+#include <linux/io.h>
#include "power.h"
@@ -229,17 +230,16 @@ int swsusp_shrink_memory(void)
size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
tmp = size;
size += highmem_size;
- for_each_zone (zone)
- if (populated_zone(zone)) {
- tmp += snapshot_additional_pages(zone);
- if (is_highmem(zone)) {
- highmem_size -=
+ for_each_populated_zone(zone) {
+ tmp += snapshot_additional_pages(zone);
+ if (is_highmem(zone)) {
+ highmem_size -=
zone_page_state(zone, NR_FREE_PAGES);
- } else {
- tmp -= zone_page_state(zone, NR_FREE_PAGES);
- tmp += zone->lowmem_reserve[ZONE_NORMAL];
- }
+ } else {
+ tmp -= zone_page_state(zone, NR_FREE_PAGES);
+ tmp += zone->lowmem_reserve[ZONE_NORMAL];
}
+ }
if (highmem_size < 0)
highmem_size = 0;
diff --git a/kernel/printk.c b/kernel/printk.c
index e3602d0755b..5052b5497c6 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
+#include <linux/kexec.h>
#include <asm/uaccess.h>
@@ -135,6 +136,24 @@ static char *log_buf = __log_buf;
static int log_buf_len = __LOG_BUF_LEN;
static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
+#ifdef CONFIG_KEXEC
+/*
+ * This appends the listed symbols to /proc/vmcoreinfo
+ *
+ * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
+ * obtain access to symbols that are otherwise very difficult to locate. These
+ * symbols are specifically used so that utilities can access and extract the
+ * dmesg log from a vmcore file after a crash.
+ */
+void log_buf_kexec_setup(void)
+{
+ VMCOREINFO_SYMBOL(log_buf);
+ VMCOREINFO_SYMBOL(log_end);
+ VMCOREINFO_SYMBOL(log_buf_len);
+ VMCOREINFO_SYMBOL(logged_chars);
+}
+#endif
+
static int __init log_buf_len_setup(char *str)
{
unsigned size = memparse(str, &str);
@@ -1292,8 +1311,11 @@ EXPORT_SYMBOL(printk_ratelimit);
bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msecs)
{
- if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) {
- *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs);
+ if (*caller_jiffies == 0
+ || !time_in_range(jiffies, *caller_jiffies,
+ *caller_jiffies
+ + msecs_to_jiffies(interval_msecs))) {
+ *caller_jiffies = jiffies;
return true;
}
return false;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index c9cf48b21f0..aaad0ec3419 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -60,11 +60,15 @@ static void ptrace_untrace(struct task_struct *child)
{
spin_lock(&child->sighand->siglock);
if (task_is_traced(child)) {
- if (child->signal->flags & SIGNAL_STOP_STOPPED) {
+ /*
+ * If the group stop is completed or in progress,
+ * this thread was already counted as stopped.
+ */
+ if (child->signal->flags & SIGNAL_STOP_STOPPED ||
+ child->signal->group_stop_count)
__set_task_state(child, TASK_STOPPED);
- } else {
+ else
signal_wake_up(child, 1);
- }
}
spin_unlock(&child->sighand->siglock);
}
@@ -235,18 +239,58 @@ out:
return retval;
}
-static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
+/*
+ * Called with irqs disabled, returns true if childs should reap themselves.
+ */
+static int ignoring_children(struct sighand_struct *sigh)
{
- child->exit_code = data;
- /* .. re-parent .. */
- __ptrace_unlink(child);
- /* .. and wake it up. */
- if (child->exit_state != EXIT_ZOMBIE)
- wake_up_process(child);
+ int ret;
+ spin_lock(&sigh->siglock);
+ ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
+ (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
+ spin_unlock(&sigh->siglock);
+ return ret;
+}
+
+/*
+ * Called with tasklist_lock held for writing.
+ * Unlink a traced task, and clean it up if it was a traced zombie.
+ * Return true if it needs to be reaped with release_task().
+ * (We can't call release_task() here because we already hold tasklist_lock.)
+ *
+ * If it's a zombie, our attachedness prevented normal parent notification
+ * or self-reaping. Do notification now if it would have happened earlier.
+ * If it should reap itself, return true.
+ *
+ * If it's our own child, there is no notification to do.
+ * But if our normal children self-reap, then this child
+ * was prevented by ptrace and we must reap it now.
+ */
+static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+{
+ __ptrace_unlink(p);
+
+ if (p->exit_state == EXIT_ZOMBIE) {
+ if (!task_detached(p) && thread_group_empty(p)) {
+ if (!same_thread_group(p->real_parent, tracer))
+ do_notify_parent(p, p->exit_signal);
+ else if (ignoring_children(tracer->sighand))
+ p->exit_signal = -1;
+ }
+ if (task_detached(p)) {
+ /* Mark it as in the process of being reaped. */
+ p->exit_state = EXIT_DEAD;
+ return true;
+ }
+ }
+
+ return false;
}
int ptrace_detach(struct task_struct *child, unsigned int data)
{
+ bool dead = false;
+
if (!valid_signal(data))
return -EIO;
@@ -255,14 +299,45 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
write_lock_irq(&tasklist_lock);
- /* protect against de_thread()->release_task() */
- if (child->ptrace)
- __ptrace_detach(child, data);
+ /*
+ * This child can be already killed. Make sure de_thread() or
+ * our sub-thread doing do_wait() didn't do release_task() yet.
+ */
+ if (child->ptrace) {
+ child->exit_code = data;
+ dead = __ptrace_detach(current, child);
+ }
write_unlock_irq(&tasklist_lock);
+ if (unlikely(dead))
+ release_task(child);
+
return 0;
}
+/*
+ * Detach all tasks we were using ptrace on.
+ */
+void exit_ptrace(struct task_struct *tracer)
+{
+ struct task_struct *p, *n;
+ LIST_HEAD(ptrace_dead);
+
+ write_lock_irq(&tasklist_lock);
+ list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
+ if (__ptrace_detach(tracer, p))
+ list_add(&p->ptrace_entry, &ptrace_dead);
+ }
+ write_unlock_irq(&tasklist_lock);
+
+ BUG_ON(!list_empty(&tracer->ptraced));
+
+ list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
+ list_del_init(&p->ptrace_entry);
+ release_task(p);
+ }
+}
+
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
{
int copied = 0;
@@ -612,8 +687,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
goto out_put_task_struct;
ret = arch_ptrace(child, request, addr, data);
- if (ret < 0)
- goto out_put_task_struct;
out_put_task_struct:
put_task_struct(child);
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 654c640a6b9..0f2b0b31130 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -65,6 +65,7 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
.cpumask = CPU_BITS_NONE,
};
+
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.cur = -300,
.completed = -300,
@@ -73,8 +74,26 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.cpumask = CPU_BITS_NONE,
};
-DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
-DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
+static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->passed_quiesc = 1;
+}
+
+void rcu_bh_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ rdp->passed_quiesc = 1;
+}
static int blimit = 10;
static int qhimark = 10000;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index cae8a059cf4..2c7b8457d0d 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,6 +122,8 @@ static void rcu_barrier_func(void *type)
}
}
+static inline void wait_migrated_callbacks(void);
+
/*
* Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
@@ -147,6 +149,7 @@ static void _rcu_barrier(enum rcu_barrier type)
complete(&rcu_barrier_completion);
wait_for_completion(&rcu_barrier_completion);
mutex_unlock(&rcu_barrier_mutex);
+ wait_migrated_callbacks();
}
/**
@@ -176,9 +179,50 @@ void rcu_barrier_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
+static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
+static struct rcu_head rcu_migrate_head[3];
+static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
+
+static void rcu_migrate_callback(struct rcu_head *notused)
+{
+ if (atomic_dec_and_test(&rcu_migrate_type_count))
+ wake_up(&rcu_migrate_wq);
+}
+
+static inline void wait_migrated_callbacks(void)
+{
+ wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
+}
+
+static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_DYING) {
+ /*
+ * preempt_disable() in on_each_cpu() prevents stop_machine(),
+ * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
+ * returns, all online cpus have queued rcu_barrier_func(),
+ * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
+ *
+ * These callbacks ensure _rcu_barrier() waits for all
+ * RCU callbacks of the specified type to complete.
+ */
+ atomic_set(&rcu_migrate_type_count, 3);
+ call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
+ call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
+ call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
+ } else if (action == CPU_POST_DEAD) {
+ /* rcu_migrate_head is protected by cpu_add_remove_lock */
+ wait_migrated_callbacks();
+ }
+
+ return NOTIFY_OK;
+}
+
void __init rcu_init(void)
{
__rcu_init();
+ hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
}
void rcu_scheduler_starting(void)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5d59e850fb7..ce97a4df64d 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -147,7 +147,51 @@ struct rcu_ctrlblk {
wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
};
+struct rcu_dyntick_sched {
+ int dynticks;
+ int dynticks_snap;
+ int sched_qs;
+ int sched_qs_snap;
+ int sched_dynticks_snap;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
+ .dynticks = 1,
+};
+
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
+
+ rdssp->sched_qs++;
+}
+
+#ifdef CONFIG_NO_HZ
+
+void rcu_enter_nohz(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
+}
+
+void rcu_exit_nohz(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
+ &rs);
+}
+
+#endif /* CONFIG_NO_HZ */
+
+
static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
static struct rcu_ctrlblk rcu_ctrlblk = {
.fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
.completed = 0,
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
}
}
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
- .dynticks = 1,
-};
-
#ifdef CONFIG_NO_HZ
static DEFINE_PER_CPU(int, rcu_update_flag);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7c4142a79f0..9b4a975a4b4 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_timers = 0;
static struct list_head rcu_torture_removed;
+static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test = 0;
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask;
int i;
- cpus_setall(tmp_mask);
+ cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
}
if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
+ cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
- set_cpus_allowed_ptr(current, &tmp_mask);
+ set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, &tmp_mask);
+ set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, &tmp_mask);
+ set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
+ free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
+
+ if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ firsterr = -ENOMEM;
+ VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
+ goto unwind;
+ }
+
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
+ free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 97ce31579ec..7f326692257 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -78,6 +78,26 @@ DEFINE_PER_CPU(struct rcu_data, rcu_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
+void rcu_bh_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = 1,
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
new file mode 100644
index 00000000000..5e872bbf07f
--- /dev/null
+++ b/kernel/rcutree.h
@@ -0,0 +1,10 @@
+
+/*
+ * RCU implementation internal declarations:
+ */
+extern struct rcu_state rcu_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_data);
+
+extern struct rcu_state rcu_bh_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
+
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index d6db3e83782..4ee954f6a8d 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -43,6 +43,8 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "rcutree.h"
+
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
diff --git a/kernel/relay.c b/kernel/relay.c
index edc0ba6d816..bc188549788 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -748,7 +748,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
* from the scheduler (trying to re-grab
* rq->lock), so defer it.
*/
- __mod_timer(&buf->timer, jiffies + 1);
+ mod_timer(&buf->timer, jiffies + 1);
}
old = buf->data;
@@ -795,13 +795,15 @@ void relay_subbufs_consumed(struct rchan *chan,
if (!chan)
return;
- if (cpu >= NR_CPUS || !chan->buf[cpu])
+ if (cpu >= NR_CPUS || !chan->buf[cpu] ||
+ subbufs_consumed > chan->n_subbufs)
return;
buf = chan->buf[cpu];
- buf->subbufs_consumed += subbufs_consumed;
- if (buf->subbufs_consumed > buf->subbufs_produced)
+ if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
buf->subbufs_consumed = buf->subbufs_produced;
+ else
+ buf->subbufs_consumed += subbufs_consumed;
}
EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
diff --git a/kernel/sched.c b/kernel/sched.c
index 7299083e69e..6cc1fd5d507 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
+ unsigned long delta;
+ ktime_t soft, hard;
+
if (hrtimer_active(&rt_b->rt_period_timer))
break;
now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
- hrtimer_start_expires(&rt_b->rt_period_timer,
- HRTIMER_MODE_ABS);
+
+ soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
+ hard = hrtimer_get_expires(&rt_b->rt_period_timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
+ __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
+ HRTIMER_MODE_ABS, 0);
}
spin_unlock(&rt_b->rt_runtime_lock);
}
@@ -331,6 +338,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
*/
static DEFINE_SPINLOCK(task_group_lock);
+#ifdef CONFIG_SMP
+static int root_task_group_empty(void)
+{
+ return list_empty(&root_task_group.children);
+}
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@ -391,6 +405,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#else
+#ifdef CONFIG_SMP
+static int root_task_group_empty(void)
+{
+ return 1;
+}
+#endif
+
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
@@ -467,11 +488,17 @@ struct rt_rq {
struct rt_prio_array active;
unsigned long rt_nr_running;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
- int highest_prio; /* highest queued rt task prio */
+ struct {
+ int curr; /* highest queued rt task prio */
+#ifdef CONFIG_SMP
+ int next; /* next highest */
+#endif
+ } highest_prio;
#endif
#ifdef CONFIG_SMP
unsigned long rt_nr_migratory;
int overloaded;
+ struct plist_head pushable_tasks;
#endif
int rt_throttled;
u64 rt_time;
@@ -549,7 +576,6 @@ struct rq {
unsigned long nr_running;
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
- unsigned char idle_at_tick;
#ifdef CONFIG_NO_HZ
unsigned long last_tick_seen;
unsigned char in_nohz_recently;
@@ -590,6 +616,7 @@ struct rq {
struct root_domain *rd;
struct sched_domain *sd;
+ unsigned char idle_at_tick;
/* For active balancing */
int active_balance;
int push_cpu;
@@ -618,9 +645,6 @@ struct rq {
/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
- unsigned int yld_exp_empty;
- unsigned int yld_act_empty;
- unsigned int yld_both_empty;
unsigned int yld_count;
/* schedule() stats */
@@ -1093,7 +1117,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
if (rq == this_rq()) {
hrtimer_restart(timer);
} else if (!rq->hrtick_csd_pending) {
- __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
+ __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
@@ -1129,7 +1153,8 @@ static __init void init_hrtick(void)
*/
static void hrtick_start(struct rq *rq, u64 delay)
{
- hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
+ __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
+ HRTIMER_MODE_REL, 0);
}
static inline void init_hrtick(void)
@@ -1183,10 +1208,10 @@ static void resched_task(struct task_struct *p)
assert_spin_locked(&task_rq(p)->lock);
- if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+ if (test_tsk_need_resched(p))
return;
- set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+ set_tsk_need_resched(p);
cpu = task_cpu(p);
if (cpu == smp_processor_id())
@@ -1242,7 +1267,7 @@ void wake_up_idle_cpu(int cpu)
* lockless. The worst case is that the other CPU runs the
* idle task through an additional NOOP schedule()
*/
- set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
+ set_tsk_need_resched(rq->idle);
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
@@ -1610,21 +1635,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
#endif
+#ifdef CONFIG_PREEMPT
+
/*
- * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ * fair double_lock_balance: Safely acquires both rq->locks in a fair
+ * way at the expense of forcing extra atomic operations in all
+ * invocations. This assures that the double_lock is acquired using the
+ * same underlying policy as the spinlock_t on this architecture, which
+ * reduces latency compared to the unfair variant below. However, it
+ * also adds more overhead and therefore may reduce throughput.
*/
-static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+ __releases(this_rq->lock)
+ __acquires(busiest->lock)
+ __acquires(this_rq->lock)
+{
+ spin_unlock(&this_rq->lock);
+ double_rq_lock(this_rq, busiest);
+
+ return 1;
+}
+
+#else
+/*
+ * Unfair double_lock_balance: Optimizes throughput at the expense of
+ * latency by eliminating extra atomic operations when the locks are
+ * already in proper order on entry. This favors lower cpu-ids and will
+ * grant the double lock to lower cpus over higher ids under contention,
+ * regardless of entry order into the function.
+ */
+static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
int ret = 0;
- if (unlikely(!irqs_disabled())) {
- /* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
- BUG_ON(1);
- }
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
@@ -1637,6 +1683,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
return ret;
}
+#endif /* CONFIG_PREEMPT */
+
+/*
+ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ */
+static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+{
+ if (unlikely(!irqs_disabled())) {
+ /* printk() doesn't work good under rq->lock */
+ spin_unlock(&this_rq->lock);
+ BUG_ON(1);
+ }
+
+ return _double_lock_balance(this_rq, busiest);
+}
+
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
@@ -1705,6 +1767,9 @@ static void update_avg(u64 *avg, u64 sample)
static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
{
+ if (wakeup)
+ p->se.start_runtime = p->se.sum_exec_runtime;
+
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup);
p->se.on_rq = 1;
@@ -1712,10 +1777,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
- if (sleep && p->se.last_wakeup) {
- update_avg(&p->se.avg_overlap,
- p->se.sum_exec_runtime - p->se.last_wakeup);
- p->se.last_wakeup = 0;
+ if (sleep) {
+ if (p->se.last_wakeup) {
+ update_avg(&p->se.avg_overlap,
+ p->se.sum_exec_runtime - p->se.last_wakeup);
+ p->se.last_wakeup = 0;
+ } else {
+ update_avg(&p->se.avg_wakeup,
+ sysctl_sched_wakeup_granularity);
+ }
}
sched_info_dequeued(p);
@@ -2017,7 +2087,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* it must be off the runqueue _entirely_, and not
* preempted!
*
- * So if it wa still runnable (but just not actively
+ * So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
@@ -2267,7 +2337,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
sync = 0;
#ifdef CONFIG_SMP
- if (sched_feat(LB_WAKEUP_UPDATE)) {
+ if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
struct sched_domain *sd;
this_cpu = raw_smp_processor_id();
@@ -2345,6 +2415,22 @@ out_activate:
activate_task(rq, p, 1);
success = 1;
+ /*
+ * Only attribute actual wakeups done by this task.
+ */
+ if (!in_interrupt()) {
+ struct sched_entity *se = &current->se;
+ u64 sample = se->sum_exec_runtime;
+
+ if (se->last_wakeup)
+ sample -= se->last_wakeup;
+ else
+ sample -= se->start_runtime;
+ update_avg(&se->avg_wakeup, sample);
+
+ se->last_wakeup = se->sum_exec_runtime;
+ }
+
out_running:
trace_sched_wakeup(rq, p, success);
check_preempt_curr(rq, p, sync);
@@ -2355,8 +2441,6 @@ out_running:
p->sched_class->task_wake_up(rq, p);
#endif
out:
- current->se.last_wakeup = current->se.sum_exec_runtime;
-
task_rq_unlock(rq, &flags);
return success;
@@ -2386,6 +2470,8 @@ static void __sched_fork(struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;
+ p->se.start_runtime = 0;
+ p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
@@ -2448,6 +2534,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+
put_cpu();
}
@@ -2491,7 +2579,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
#ifdef CONFIG_PREEMPT_NOTIFIERS
/**
- * preempt_notifier_register - tell me when current is being being preempted & rescheduled
+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register
*/
void preempt_notifier_register(struct preempt_notifier *notifier)
@@ -2588,6 +2676,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
+#ifdef CONFIG_SMP
+ int post_schedule = 0;
+
+ if (current->sched_class->needs_post_schedule)
+ post_schedule = current->sched_class->needs_post_schedule(rq);
+#endif
rq->prev_mm = NULL;
@@ -2606,7 +2700,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_arch_switch(prev);
finish_lock_switch(rq, prev);
#ifdef CONFIG_SMP
- if (current->sched_class->post_schedule)
+ if (post_schedule)
current->sched_class->post_schedule(rq);
#endif
@@ -2913,6 +3007,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned)
{
+ int tsk_cache_hot = 0;
/*
* We do not migrate tasks that are:
* 1) running (obviously), or
@@ -2936,10 +3031,11 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
* 2) too many balance attempts have failed.
*/
- if (!task_hot(p, rq->clock, sd) ||
- sd->nr_balance_failed > sd->cache_nice_tries) {
+ tsk_cache_hot = task_hot(p, rq->clock, sd);
+ if (!tsk_cache_hot ||
+ sd->nr_balance_failed > sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
- if (task_hot(p, rq->clock, sd)) {
+ if (tsk_cache_hot) {
schedstat_inc(sd, lb_hot_gained[idle]);
schedstat_inc(p, se.nr_forced_migrations);
}
@@ -2947,7 +3043,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
return 1;
}
- if (task_hot(p, rq->clock, sd)) {
+ if (tsk_cache_hot) {
schedstat_inc(p, se.nr_failed_migrations_hot);
return 0;
}
@@ -2987,6 +3083,16 @@ next:
pulled++;
rem_load_move -= p->se.load.weight;
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible kernels
+ * will stop after the first task is pulled to minimize the critical
+ * section.
+ */
+ if (idle == CPU_NEWLY_IDLE)
+ goto out;
+#endif
+
/*
* We only want to steal up to the prescribed amount of weighted load.
*/
@@ -3033,9 +3139,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
sd, idle, all_pinned, &this_best_prio);
class = class->next;
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible
+ * kernels will stop after the first task is pulled to minimize
+ * the critical section.
+ */
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
break;
-
+#endif
} while (class && max_load_move > total_load_moved);
return total_load_moved > 0;
@@ -3085,246 +3197,480 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
-
+/********** Helpers for find_busiest_group ************************/
/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+ * sd_lb_stats - Structure to store the statistics of a sched_domain
+ * during load balancing.
*/
-static struct sched_group *
-find_busiest_group(struct sched_domain *sd, int this_cpu,
- unsigned long *imbalance, enum cpu_idle_type idle,
- int *sd_idle, const struct cpumask *cpus, int *balance)
-{
- struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
- unsigned long max_load, avg_load, total_load, this_load, total_pwr;
- unsigned long max_pull;
- unsigned long busiest_load_per_task, busiest_nr_running;
- unsigned long this_load_per_task, this_nr_running;
- int load_idx, group_imb = 0;
+struct sd_lb_stats {
+ struct sched_group *busiest; /* Busiest group in this sd */
+ struct sched_group *this; /* Local group in this sd */
+ unsigned long total_load; /* Total load of all groups in sd */
+ unsigned long total_pwr; /* Total power of all groups in sd */
+ unsigned long avg_load; /* Average load across all groups in sd */
+
+ /** Statistics of this group */
+ unsigned long this_load;
+ unsigned long this_load_per_task;
+ unsigned long this_nr_running;
+
+ /* Statistics of the busiest group */
+ unsigned long max_load;
+ unsigned long busiest_load_per_task;
+ unsigned long busiest_nr_running;
+
+ int group_imb; /* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- int power_savings_balance = 1;
- unsigned long leader_nr_running = 0, min_load_per_task = 0;
- unsigned long min_nr_running = ULONG_MAX;
- struct sched_group *group_min = NULL, *group_leader = NULL;
+ int power_savings_balance; /* Is powersave balance needed for this sd */
+ struct sched_group *group_min; /* Least loaded group in sd */
+ struct sched_group *group_leader; /* Group which relieves group_min */
+ unsigned long min_load_per_task; /* load_per_task in group_min */
+ unsigned long leader_nr_running; /* Nr running of group_leader */
+ unsigned long min_nr_running; /* Nr running of group_min */
#endif
+};
+
+/*
+ * sg_lb_stats - stats of a sched_group required for load_balancing
+ */
+struct sg_lb_stats {
+ unsigned long avg_load; /*Avg load across the CPUs of the group */
+ unsigned long group_load; /* Total load over the CPUs of the group */
+ unsigned long sum_nr_running; /* Nr tasks running in the group */
+ unsigned long sum_weighted_load; /* Weighted load of group's tasks */
+ unsigned long group_capacity;
+ int group_imb; /* Is there an imbalance in the group ? */
+};
+
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
- max_load = this_load = total_load = total_pwr = 0;
- busiest_load_per_task = busiest_nr_running = 0;
- this_load_per_task = this_nr_running = 0;
+/**
+ * get_sd_load_idx - Obtain the load index for a given sched domain.
+ * @sd: The sched_domain whose load_idx is to be obtained.
+ * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ */
+static inline int get_sd_load_idx(struct sched_domain *sd,
+ enum cpu_idle_type idle)
+{
+ int load_idx;
- if (idle == CPU_NOT_IDLE)
+ switch (idle) {
+ case CPU_NOT_IDLE:
load_idx = sd->busy_idx;
- else if (idle == CPU_NEWLY_IDLE)
+ break;
+
+ case CPU_NEWLY_IDLE:
load_idx = sd->newidle_idx;
- else
+ break;
+ default:
load_idx = sd->idle_idx;
+ break;
+ }
- do {
- unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
- int local_group;
- int i;
- int __group_imb = 0;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long sum_nr_running, sum_weighted_load;
- unsigned long sum_avg_load_per_task;
- unsigned long avg_load_per_task;
+ return load_idx;
+}
- local_group = cpumask_test_cpu(this_cpu,
- sched_group_cpus(group));
- if (local_group)
- balance_cpu = cpumask_first(sched_group_cpus(group));
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+/**
+ * init_sd_power_savings_stats - Initialize power savings statistics for
+ * the given sched_domain, during load balancing.
+ *
+ * @sd: Sched domain whose power-savings statistics are to be initialized.
+ * @sds: Variable containing the statistics for sd.
+ * @idle: Idle status of the CPU at which we're performing load-balancing.
+ */
+static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+ struct sd_lb_stats *sds, enum cpu_idle_type idle)
+{
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ sds->power_savings_balance = 0;
+ else {
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
+ sds->leader_nr_running = 0;
+ }
+}
+
+/**
+ * update_sd_power_savings_stats - Update the power saving stats for a
+ * sched_domain while performing load balancing.
+ *
+ * @group: sched_group belonging to the sched_domain under consideration.
+ * @sds: Variable containing the statistics of the sched_domain
+ * @local_group: Does group contain the CPU for which we're performing
+ * load balancing ?
+ * @sgs: Variable containing the statistics of the group.
+ */
+static inline void update_sd_power_savings_stats(struct sched_group *group,
+ struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+{
- /* Tally up the load of all CPUs in the group */
- sum_weighted_load = sum_nr_running = avg_load = 0;
- sum_avg_load_per_task = avg_load_per_task = 0;
+ if (!sds->power_savings_balance)
+ return;
- max_cpu_load = 0;
- min_cpu_load = ~0UL;
+ /*
+ * If the local group is idle or completely loaded
+ * no need to do power savings balance at this domain
+ */
+ if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
+ !sds->this_nr_running))
+ sds->power_savings_balance = 0;
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
- struct rq *rq = cpu_rq(i);
+ /*
+ * If a group is already running at full capacity or idle,
+ * don't include that group in power savings calculations
+ */
+ if (!sds->power_savings_balance ||
+ sgs->sum_nr_running >= sgs->group_capacity ||
+ !sgs->sum_nr_running)
+ return;
- if (*sd_idle && rq->nr_running)
- *sd_idle = 0;
+ /*
+ * Calculate the group which has the least non-idle load.
+ * This is the group from where we need to pick up the load
+ * for saving power
+ */
+ if ((sgs->sum_nr_running < sds->min_nr_running) ||
+ (sgs->sum_nr_running == sds->min_nr_running &&
+ group_first_cpu(group) > group_first_cpu(sds->group_min))) {
+ sds->group_min = group;
+ sds->min_nr_running = sgs->sum_nr_running;
+ sds->min_load_per_task = sgs->sum_weighted_load /
+ sgs->sum_nr_running;
+ }
- /* Bias balancing toward cpus of our domain */
- if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu) {
- first_idle_cpu = 1;
- balance_cpu = i;
- }
+ /*
+ * Calculate the group which is almost near its
+ * capacity but still has some space to pick up some load
+ * from other group and save more power
+ */
+ if (sgs->sum_nr_running > sgs->group_capacity - 1)
+ return;
- load = target_load(i, load_idx);
- } else {
- load = source_load(i, load_idx);
- if (load > max_cpu_load)
- max_cpu_load = load;
- if (min_cpu_load > load)
- min_cpu_load = load;
- }
+ if (sgs->sum_nr_running > sds->leader_nr_running ||
+ (sgs->sum_nr_running == sds->leader_nr_running &&
+ group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
+ sds->group_leader = group;
+ sds->leader_nr_running = sgs->sum_nr_running;
+ }
+}
- avg_load += load;
- sum_nr_running += rq->nr_running;
- sum_weighted_load += weighted_cpuload(i);
+/**
+ * check_power_save_busiest_group - see if there is potential for some power-savings balance
+ * @sds: Variable containing the statistics of the sched_domain
+ * under consideration.
+ * @this_cpu: Cpu at which we're currently performing load-balancing.
+ * @imbalance: Variable to store the imbalance.
+ *
+ * Description:
+ * Check if we have potential to perform some power-savings balance.
+ * If yes, set the busiest group to be the least loaded group in the
+ * sched_domain, so that it's CPUs can be put to idle.
+ *
+ * Returns 1 if there is potential to perform power-savings balance.
+ * Else returns 0.
+ */
+static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ if (!sds->power_savings_balance)
+ return 0;
- sum_avg_load_per_task += cpu_avg_load_per_task(i);
- }
+ if (sds->this != sds->group_leader ||
+ sds->group_leader == sds->group_min)
+ return 0;
- /*
- * First idle cpu or the first cpu(busiest) in this sched group
- * is eligible for doing load balancing at this and above
- * domains. In the newly idle case, we will allow all the cpu's
- * to do the newly idle load balance.
- */
- if (idle != CPU_NEWLY_IDLE && local_group &&
- balance_cpu != this_cpu && balance) {
- *balance = 0;
- goto ret;
- }
+ *imbalance = sds->min_load_per_task;
+ sds->busiest = sds->group_min;
- total_load += avg_load;
- total_pwr += group->__cpu_power;
+ if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
+ cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
+ group_first_cpu(sds->group_leader);
+ }
- /* Adjust by relative CPU power of the group */
- avg_load = sg_div_cpu_power(group,
- avg_load * SCHED_LOAD_SCALE);
+ return 1;
+}
+#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+ struct sd_lb_stats *sds, enum cpu_idle_type idle)
+{
+ return;
+}
- /*
- * Consider the group unbalanced when the imbalance is larger
- * than the average weight of two tasks.
- *
- * APZ: with cgroup the avg task weight can vary wildly and
- * might not be a suitable number - should we keep a
- * normalized nr_running number somewhere that negates
- * the hierarchy?
- */
- avg_load_per_task = sg_div_cpu_power(group,
- sum_avg_load_per_task * SCHED_LOAD_SCALE);
+static inline void update_sd_power_savings_stats(struct sched_group *group,
+ struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+{
+ return;
+}
+
+static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+
+
+/**
+ * update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @group: sched_group whose statistics are to be updated.
+ * @this_cpu: Cpu for which load balance is currently performed.
+ * @idle: Idle status of this_cpu
+ * @load_idx: Load index of sched_domain of this_cpu for load calc.
+ * @sd_idle: Idle status of the sched_domain containing group.
+ * @local_group: Does group contain this_cpu.
+ * @cpus: Set of cpus considered for load balancing.
+ * @balance: Should we balance.
+ * @sgs: variable to hold the statistics for this group.
+ */
+static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+ enum cpu_idle_type idle, int load_idx, int *sd_idle,
+ int local_group, const struct cpumask *cpus,
+ int *balance, struct sg_lb_stats *sgs)
+{
+ unsigned long load, max_cpu_load, min_cpu_load;
+ int i;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
+ unsigned long sum_avg_load_per_task;
+ unsigned long avg_load_per_task;
+
+ if (local_group)
+ balance_cpu = group_first_cpu(group);
+
+ /* Tally up the load of all CPUs in the group */
+ sum_avg_load_per_task = avg_load_per_task = 0;
+ max_cpu_load = 0;
+ min_cpu_load = ~0UL;
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
- __group_imb = 1;
+ for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+ struct rq *rq = cpu_rq(i);
- group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+ if (*sd_idle && rq->nr_running)
+ *sd_idle = 0;
+ /* Bias balancing toward cpus of our domain */
if (local_group) {
- this_load = avg_load;
- this = group;
- this_nr_running = sum_nr_running;
- this_load_per_task = sum_weighted_load;
- } else if (avg_load > max_load &&
- (sum_nr_running > group_capacity || __group_imb)) {
- max_load = avg_load;
- busiest = group;
- busiest_nr_running = sum_nr_running;
- busiest_load_per_task = sum_weighted_load;
- group_imb = __group_imb;
+ if (idle_cpu(i) && !first_idle_cpu) {
+ first_idle_cpu = 1;
+ balance_cpu = i;
+ }
+
+ load = target_load(i, load_idx);
+ } else {
+ load = source_load(i, load_idx);
+ if (load > max_cpu_load)
+ max_cpu_load = load;
+ if (min_cpu_load > load)
+ min_cpu_load = load;
}
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- /*
- * Busy processors will not participate in power savings
- * balance.
- */
- if (idle == CPU_NOT_IDLE ||
- !(sd->flags & SD_POWERSAVINGS_BALANCE))
- goto group_next;
+ sgs->group_load += load;
+ sgs->sum_nr_running += rq->nr_running;
+ sgs->sum_weighted_load += weighted_cpuload(i);
- /*
- * If the local group is idle or completely loaded
- * no need to do power savings balance at this domain
- */
- if (local_group && (this_nr_running >= group_capacity ||
- !this_nr_running))
- power_savings_balance = 0;
+ sum_avg_load_per_task += cpu_avg_load_per_task(i);
+ }
- /*
- * If a group is already running at full capacity or idle,
- * don't include that group in power savings calculations
- */
- if (!power_savings_balance || sum_nr_running >= group_capacity
- || !sum_nr_running)
- goto group_next;
+ /*
+ * First idle cpu or the first cpu(busiest) in this sched group
+ * is eligible for doing load balancing at this and above
+ * domains. In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
+ */
+ if (idle != CPU_NEWLY_IDLE && local_group &&
+ balance_cpu != this_cpu && balance) {
+ *balance = 0;
+ return;
+ }
- /*
- * Calculate the group which has the least non-idle load.
- * This is the group from where we need to pick up the load
- * for saving power
- */
- if ((sum_nr_running < min_nr_running) ||
- (sum_nr_running == min_nr_running &&
- cpumask_first(sched_group_cpus(group)) >
- cpumask_first(sched_group_cpus(group_min)))) {
- group_min = group;
- min_nr_running = sum_nr_running;
- min_load_per_task = sum_weighted_load /
- sum_nr_running;
- }
+ /* Adjust by relative CPU power of the group */
+ sgs->avg_load = sg_div_cpu_power(group,
+ sgs->group_load * SCHED_LOAD_SCALE);
- /*
- * Calculate the group which is almost near its
- * capacity but still has some space to pick up some load
- * from other group and save more power
- */
- if (sum_nr_running <= group_capacity - 1) {
- if (sum_nr_running > leader_nr_running ||
- (sum_nr_running == leader_nr_running &&
- cpumask_first(sched_group_cpus(group)) <
- cpumask_first(sched_group_cpus(group_leader)))) {
- group_leader = group;
- leader_nr_running = sum_nr_running;
- }
+
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of two tasks.
+ *
+ * APZ: with cgroup the avg task weight can vary wildly and
+ * might not be a suitable number - should we keep a
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+ avg_load_per_task = sg_div_cpu_power(group,
+ sum_avg_load_per_task * SCHED_LOAD_SCALE);
+
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ sgs->group_imb = 1;
+
+ sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+
+}
+
+/**
+ * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: sched_domain whose statistics are to be updated.
+ * @this_cpu: Cpu for which load balance is currently performed.
+ * @idle: Idle status of this_cpu
+ * @sd_idle: Idle status of the sched_domain containing group.
+ * @cpus: Set of cpus considered for load balancing.
+ * @balance: Should we balance.
+ * @sds: variable to hold the statistics for this sched_domain.
+ */
+static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
+ enum cpu_idle_type idle, int *sd_idle,
+ const struct cpumask *cpus, int *balance,
+ struct sd_lb_stats *sds)
+{
+ struct sched_group *group = sd->groups;
+ struct sg_lb_stats sgs;
+ int load_idx;
+
+ init_sd_power_savings_stats(sd, sds, idle);
+ load_idx = get_sd_load_idx(sd, idle);
+
+ do {
+ int local_group;
+
+ local_group = cpumask_test_cpu(this_cpu,
+ sched_group_cpus(group));
+ memset(&sgs, 0, sizeof(sgs));
+ update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+ local_group, cpus, balance, &sgs);
+
+ if (local_group && balance && !(*balance))
+ return;
+
+ sds->total_load += sgs.group_load;
+ sds->total_pwr += group->__cpu_power;
+
+ if (local_group) {
+ sds->this_load = sgs.avg_load;
+ sds->this = group;
+ sds->this_nr_running = sgs.sum_nr_running;
+ sds->this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > sds->max_load &&
+ (sgs.sum_nr_running > sgs.group_capacity ||
+ sgs.group_imb)) {
+ sds->max_load = sgs.avg_load;
+ sds->busiest = group;
+ sds->busiest_nr_running = sgs.sum_nr_running;
+ sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->group_imb = sgs.group_imb;
}
-group_next:
-#endif
+
+ update_sd_power_savings_stats(group, sds, local_group, &sgs);
group = group->next;
} while (group != sd->groups);
- if (!busiest || this_load >= max_load || busiest_nr_running == 0)
- goto out_balanced;
-
- avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
+}
- if (this_load >= avg_load ||
- 100*max_load <= sd->imbalance_pct*this_load)
- goto out_balanced;
+/**
+ * fix_small_imbalance - Calculate the minor imbalance that exists
+ * amongst the groups of a sched_domain, during
+ * load balancing.
+ * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
+ * @imbalance: Variable to store the imbalance.
+ */
+static inline void fix_small_imbalance(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ unsigned long tmp, pwr_now = 0, pwr_move = 0;
+ unsigned int imbn = 2;
+
+ if (sds->this_nr_running) {
+ sds->this_load_per_task /= sds->this_nr_running;
+ if (sds->busiest_load_per_task >
+ sds->this_load_per_task)
+ imbn = 1;
+ } else
+ sds->this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
- busiest_load_per_task /= busiest_nr_running;
- if (group_imb)
- busiest_load_per_task = min(busiest_load_per_task, avg_load);
+ if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+ sds->busiest_load_per_task * imbn) {
+ *imbalance = sds->busiest_load_per_task;
+ return;
+ }
/*
- * We're trying to get all the cpus to the average_load, so we don't
- * want to push ourselves above the average load, nor do we wish to
- * reduce the max loaded cpu below the average load, as either of these
- * actions would just result in more rebalancing later, and ping-pong
- * tasks around. Thus we look for the minimum possible imbalance.
- * Negative imbalances (*we* are more loaded than anyone else) will
- * be counted as no imbalance for these purposes -- we can't fix that
- * by pulling tasks to us. Be careful of negative numbers as they'll
- * appear as very large values with unsigned longs.
+ * OK, we don't have enough imbalance to justify moving tasks,
+ * however we may be able to increase total CPU power used by
+ * moving them.
*/
- if (max_load <= busiest_load_per_task)
- goto out_balanced;
+ pwr_now += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load);
+ pwr_now += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load);
+ pwr_now /= SCHED_LOAD_SCALE;
+
+ /* Amount of load we'd subtract */
+ tmp = sg_div_cpu_power(sds->busiest,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ if (sds->max_load > tmp)
+ pwr_move += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load - tmp);
+
+ /* Amount of load we'd add */
+ if (sds->max_load * sds->busiest->__cpu_power <
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sds->this,
+ sds->max_load * sds->busiest->__cpu_power);
+ else
+ tmp = sg_div_cpu_power(sds->this,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load + tmp);
+ pwr_move /= SCHED_LOAD_SCALE;
+
+ /* Move if we gain throughput */
+ if (pwr_move > pwr_now)
+ *imbalance = sds->busiest_load_per_task;
+}
+
+/**
+ * calculate_imbalance - Calculate the amount of imbalance present within the
+ * groups of a given sched_domain during load balance.
+ * @sds: statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: Cpu for which currently load balance is being performed.
+ * @imbalance: The variable to store the imbalance.
+ */
+static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
+ unsigned long *imbalance)
+{
+ unsigned long max_pull;
/*
* In the presence of smp nice balancing, certain scenarios can have
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (max_load < avg_load) {
+ if (sds->max_load < sds->avg_load) {
*imbalance = 0;
- goto small_imbalance;
+ return fix_small_imbalance(sds, this_cpu, imbalance);
}
/* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
+ max_pull = min(sds->max_load - sds->avg_load,
+ sds->max_load - sds->busiest_load_per_task);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * busiest->__cpu_power,
- (avg_load - this_load) * this->__cpu_power)
+ *imbalance = min(max_pull * sds->busiest->__cpu_power,
+ (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
/ SCHED_LOAD_SCALE;
/*
@@ -3333,78 +3679,110 @@ group_next:
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance < busiest_load_per_task) {
- unsigned long tmp, pwr_now, pwr_move;
- unsigned int imbn;
-
-small_imbalance:
- pwr_move = pwr_now = 0;
- imbn = 2;
- if (this_nr_running) {
- this_load_per_task /= this_nr_running;
- if (busiest_load_per_task > this_load_per_task)
- imbn = 1;
- } else
- this_load_per_task = cpu_avg_load_per_task(this_cpu);
+ if (*imbalance < sds->busiest_load_per_task)
+ return fix_small_imbalance(sds, this_cpu, imbalance);
- if (max_load - this_load + busiest_load_per_task >=
- busiest_load_per_task * imbn) {
- *imbalance = busiest_load_per_task;
- return busiest;
- }
+}
+/******* find_busiest_group() helpers end here *********************/
- /*
- * OK, we don't have enough imbalance to justify moving tasks,
- * however we may be able to increase total CPU power used by
- * moving them.
- */
+/**
+ * find_busiest_group - Returns the busiest group within the sched_domain
+ * if there is an imbalance. If there isn't an imbalance, and
+ * the user has opted for power-savings, it returns a group whose
+ * CPUs can be put to idle by rebalancing those tasks elsewhere, if
+ * such a group exists.
+ *
+ * Also calculates the amount of weighted load which should be moved
+ * to restore balance.
+ *
+ * @sd: The sched_domain whose busiest group is to be returned.
+ * @this_cpu: The cpu for which load balancing is currently being performed.
+ * @imbalance: Variable which stores amount of weighted load which should
+ * be moved to restore balance/put a group to idle.
+ * @idle: The idle status of this_cpu.
+ * @sd_idle: The idleness of sd
+ * @cpus: The set of CPUs under consideration for load-balancing.
+ * @balance: Pointer to a variable indicating if this_cpu
+ * is the appropriate cpu to perform load balancing at this_level.
+ *
+ * Returns: - the busiest group if imbalance exists.
+ * - If no imbalance and user has opted for power-savings balance,
+ * return the least loaded group whose CPUs can be
+ * put to idle by rebalancing its tasks onto our group.
+ */
+static struct sched_group *
+find_busiest_group(struct sched_domain *sd, int this_cpu,
+ unsigned long *imbalance, enum cpu_idle_type idle,
+ int *sd_idle, const struct cpumask *cpus, int *balance)
+{
+ struct sd_lb_stats sds;
- pwr_now += busiest->__cpu_power *
- min(busiest_load_per_task, max_load);
- pwr_now += this->__cpu_power *
- min(this_load_per_task, this_load);
- pwr_now /= SCHED_LOAD_SCALE;
-
- /* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(busiest,
- busiest_load_per_task * SCHED_LOAD_SCALE);
- if (max_load > tmp)
- pwr_move += busiest->__cpu_power *
- min(busiest_load_per_task, max_load - tmp);
-
- /* Amount of load we'd add */
- if (max_load * busiest->__cpu_power <
- busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(this,
- max_load * busiest->__cpu_power);
- else
- tmp = sg_div_cpu_power(this,
- busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += this->__cpu_power *
- min(this_load_per_task, this_load + tmp);
- pwr_move /= SCHED_LOAD_SCALE;
+ memset(&sds, 0, sizeof(sds));
- /* Move if we gain throughput */
- if (pwr_move > pwr_now)
- *imbalance = busiest_load_per_task;
- }
+ /*
+ * Compute the various statistics relavent for load balancing at
+ * this level.
+ */
+ update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
+ balance, &sds);
+
+ /* Cases where imbalance does not exist from POV of this_cpu */
+ /* 1) this_cpu is not the appropriate cpu to perform load balancing
+ * at this level.
+ * 2) There is no busy sibling group to pull from.
+ * 3) This group is the busiest group.
+ * 4) This group is more busy than the avg busieness at this
+ * sched_domain.
+ * 5) The imbalance is within the specified limit.
+ * 6) Any rebalance would lead to ping-pong
+ */
+ if (balance && !(*balance))
+ goto ret;
- return busiest;
+ if (!sds.busiest || sds.busiest_nr_running == 0)
+ goto out_balanced;
-out_balanced:
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
- goto ret;
+ if (sds.this_load >= sds.max_load)
+ goto out_balanced;
- if (this == group_leader && group_leader != group_min) {
- *imbalance = min_load_per_task;
- if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
- cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- cpumask_first(sched_group_cpus(group_leader));
- }
- return group_min;
- }
-#endif
+ sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
+
+ if (sds.this_load >= sds.avg_load)
+ goto out_balanced;
+
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+ goto out_balanced;
+
+ sds.busiest_load_per_task /= sds.busiest_nr_running;
+ if (sds.group_imb)
+ sds.busiest_load_per_task =
+ min(sds.busiest_load_per_task, sds.avg_load);
+
+ /*
+ * We're trying to get all the cpus to the average_load, so we don't
+ * want to push ourselves above the average load, nor do we wish to
+ * reduce the max loaded cpu below the average load, as either of these
+ * actions would just result in more rebalancing later, and ping-pong
+ * tasks around. Thus we look for the minimum possible imbalance.
+ * Negative imbalances (*we* are more loaded than anyone else) will
+ * be counted as no imbalance for these purposes -- we can't fix that
+ * by pulling tasks to us. Be careful of negative numbers as they'll
+ * appear as very large values with unsigned longs.
+ */
+ if (sds.max_load <= sds.busiest_load_per_task)
+ goto out_balanced;
+
+ /* Looks like there is an imbalance. Compute it */
+ calculate_imbalance(&sds, this_cpu, imbalance);
+ return sds.busiest;
+
+out_balanced:
+ /*
+ * There is no obvious imbalance. But check if we can do some balancing
+ * to save power.
+ */
+ if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
+ return sds.busiest;
ret:
*imbalance = 0;
return NULL;
@@ -3448,19 +3826,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
*/
#define MAX_PINNED_INTERVAL 512
+/* Working cpumask for load_balance and load_balance_newidle. */
+static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
+
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance, struct cpumask *cpus)
+ int *balance)
{
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
unsigned long flags;
+ struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
cpumask_setall(cpus);
@@ -3615,8 +3997,7 @@ out:
* this_rq is locked.
*/
static int
-load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
- struct cpumask *cpus)
+load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
{
struct sched_group *group;
struct rq *busiest = NULL;
@@ -3624,6 +4005,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
int ld_moved = 0;
int sd_idle = 0;
int all_pinned = 0;
+ struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
cpumask_setall(cpus);
@@ -3764,10 +4146,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
- cpumask_var_t tmpmask;
-
- if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
- return;
for_each_domain(this_cpu, sd) {
unsigned long interval;
@@ -3778,7 +4156,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
if (sd->flags & SD_BALANCE_NEWIDLE)
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance_newidle(this_cpu, this_rq,
- sd, tmpmask);
+ sd);
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
@@ -3793,7 +4171,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
*/
this_rq->next_balance = next_balance;
}
- free_cpumask_var(tmpmask);
}
/*
@@ -3943,11 +4320,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
int need_serialize;
- cpumask_var_t tmp;
-
- /* Fails alloc? Rebalancing probably not a priority right now. */
- if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
- return;
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
@@ -3972,7 +4344,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
+ if (load_balance(cpu, rq, sd, idle, &balance)) {
/*
* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
@@ -4006,8 +4378,6 @@ out:
*/
if (likely(update_next_balance))
rq->next_balance = next_balance;
-
- free_cpumask_var(tmp);
}
/*
@@ -4057,6 +4427,11 @@ static void run_rebalance_domains(struct softirq_action *h)
#endif
}
+static inline int on_null_domain(int cpu)
+{
+ return !rcu_dereference(cpu_rq(cpu)->sd);
+}
+
/*
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
*
@@ -4114,7 +4489,9 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
cpumask_test_cpu(cpu, nohz.cpu_mask))
return;
#endif
- if (time_after_eq(jiffies, rq->next_balance))
+ /* Don't need to rebalance while attached to NULL domain */
+ if (time_after_eq(jiffies, rq->next_balance) &&
+ likely(!on_null_domain(cpu)))
raise_softirq(SCHED_SOFTIRQ);
}
@@ -4508,11 +4885,33 @@ static inline void schedule_debug(struct task_struct *prev)
#endif
}
+static void put_prev_task(struct rq *rq, struct task_struct *prev)
+{
+ if (prev->state == TASK_RUNNING) {
+ u64 runtime = prev->se.sum_exec_runtime;
+
+ runtime -= prev->se.prev_sum_exec_runtime;
+ runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+
+ /*
+ * In order to avoid avg_overlap growing stale when we are
+ * indeed overlapping and hence not getting put to sleep, grow
+ * the avg_overlap on preemption.
+ *
+ * We use the average preemption runtime because that
+ * correlates to the amount of cache footprint a task can
+ * build up.
+ */
+ update_avg(&prev->se.avg_overlap, runtime);
+ }
+ prev->sched_class->put_prev_task(rq, prev);
+}
+
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev)
+pick_next_task(struct rq *rq)
{
const struct sched_class *class;
struct task_struct *p;
@@ -4584,8 +4983,8 @@ need_resched_nonpreemptible:
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
- prev->sched_class->put_prev_task(rq, prev);
- next = pick_next_task(rq, prev);
+ put_prev_task(rq, prev);
+ next = pick_next_task(rq);
if (likely(prev != next)) {
sched_info_switch(prev, next);
@@ -4707,7 +5106,7 @@ asmlinkage void __sched preempt_schedule(void)
* between schedule and now.
*/
barrier();
- } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+ } while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
@@ -4736,7 +5135,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
* between schedule and now.
*/
barrier();
- } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+ } while (need_resched());
}
#endif /* CONFIG_PREEMPT */
@@ -4797,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
__wake_up_common(q, mode, 1, 0, NULL);
}
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+{
+ __wake_up_common(q, mode, 1, 0, key);
+}
+
/**
- * __wake_up_sync - wake up threads blocked on a waitqueue.
+ * __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
@@ -4810,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
*
* On UP it can prevent extra preemption.
*/
-void
-__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, void *key)
{
unsigned long flags;
int sync = 1;
@@ -4823,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
sync = 0;
spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, sync, NULL);
+ __wake_up_common(q, mode, nr_exclusive, sync, key);
spin_unlock_irqrestore(&q->lock, flags);
}
+EXPORT_SYMBOL_GPL(__wake_up_sync_key);
+
+/*
+ * __wake_up_sync - see __wake_up_sync_key()
+ */
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+{
+ __wake_up_sync_key(q, mode, nr_exclusive, NULL);
+}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
/**
@@ -5210,7 +5624,7 @@ SYSCALL_DEFINE1(nice, int, increment)
if (increment > 40)
increment = 40;
- nice = PRIO_TO_NICE(current->static_prio) + increment;
+ nice = TASK_NICE(current) + increment;
if (nice < -20)
nice = -20;
if (nice > 19)
@@ -6483,7 +6897,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
if (!rq->nr_running)
break;
update_rq_clock(rq);
- next = pick_next_task(rq, rq->curr);
+ next = pick_next_task(rq);
if (!next)
break;
next->sched_class->put_prev_task(rq, next);
@@ -7314,7 +7728,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
{
int group;
- cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
if (sg)
*sg = &per_cpu(sched_group_core, group).sg;
@@ -7343,7 +7757,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
- cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
#else
group = cpu;
@@ -7686,7 +8100,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
SD_INIT(sd, SIBLING);
set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd),
- &per_cpu(cpu_sibling_map, i), cpu_map);
+ topology_thread_cpumask(i), cpu_map);
sd->parent = p;
p->child = sd;
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7697,7 +8111,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) {
cpumask_and(this_sibling_map,
- &per_cpu(cpu_sibling_map, i), cpu_map);
+ topology_thread_cpumask(i), cpu_map);
if (i != cpumask_first(this_sibling_map))
continue;
@@ -8278,11 +8692,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
- rt_rq->highest_prio = MAX_RT_PRIO;
+ rt_rq->highest_prio.curr = MAX_RT_PRIO;
+#ifdef CONFIG_SMP
+ rt_rq->highest_prio.next = MAX_RT_PRIO;
+#endif
#endif
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
+ plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
@@ -8369,6 +8787,9 @@ void __init sched_init(void)
#ifdef CONFIG_USER_SCHED
alloc_size *= 2;
#endif
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ alloc_size += num_possible_cpus() * cpumask_size();
+#endif
/*
* As sched_init() is called before page_alloc is setup,
* we use alloc_bootmem().
@@ -8406,6 +8827,12 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ for_each_possible_cpu(i) {
+ per_cpu(load_balance_tmpmask, i) = (void *)ptr;
+ ptr += cpumask_size();
+ }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
}
#ifdef CONFIG_SMP
@@ -9658,7 +10085,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
struct cpuacct *ca;
int cpu;
- if (!cpuacct_subsys.active)
+ if (unlikely(!cpuacct_subsys.active))
return;
cpu = task_cpu(tsk);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 7ec82c1c61c..819f17ac796 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -45,9 +45,6 @@ static __read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__read_mostly int sched_clock_stable;
-#else
-static const int sched_clock_stable = 1;
-#endif
struct sched_clock_data {
/*
@@ -116,14 +113,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
s64 delta = now - scd->tick_raw;
u64 clock, min_clock, max_clock;
- WARN_ON_ONCE(!irqs_disabled());
-
if (unlikely(delta < 0))
delta = 0;
- if (unlikely(!sched_clock_running))
- return 0ull;
-
/*
* scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock),
@@ -213,18 +205,20 @@ u64 sched_clock_cpu(int cpu)
return clock;
}
-#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-
void sched_clock_tick(void)
{
- struct sched_clock_data *scd = this_scd();
+ struct sched_clock_data *scd;
u64 now, now_gtod;
+ if (sched_clock_stable)
+ return;
+
if (unlikely(!sched_clock_running))
return;
WARN_ON_ONCE(!irqs_disabled());
+ scd = this_scd();
now_gtod = ktime_to_ns(ktime_get());
now = sched_clock();
@@ -257,6 +251,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
+#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
+
+void sched_clock_init(void)
+{
+ sched_clock_running = 1;
+}
+
+u64 sched_clock_cpu(int cpu)
+{
+ if (unlikely(!sched_clock_running))
+ return 0;
+
+ return sched_clock();
+}
+
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned long long cpu_clock(int cpu)
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 642a94ef8a0..9a7e859b8fb 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -25,7 +25,7 @@ struct cpupri {
#ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp,
- struct task_struct *p, cpumask_t *lowest_mask);
+ struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri);
int cpupri_init(struct cpupri *cp, bool bootmem);
void cpupri_cleanup(struct cpupri *cp);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e416..467ca72f165 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
- SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
PN(next_balance);
P(curr->pid);
PN(clock);
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
#ifdef CONFIG_SCHEDSTATS
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
- P(yld_exp_empty);
- P(yld_act_empty);
- P(yld_both_empty);
P(yld_count);
P(sched_switch);
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
+ P(jiffies);
PN(sysctl_sched_latency);
PN(sysctl_sched_min_granularity);
PN(sysctl_sched_wakeup_granularity);
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.vruntime);
PN(se.sum_exec_runtime);
PN(se.avg_overlap);
+ PN(se.avg_wakeup);
nr_switches = p->nvcsw + p->nivcsw;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0566f2a03c4..3816f217f11 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1314,16 +1314,63 @@ out:
}
#endif /* CONFIG_SMP */
-static unsigned long wakeup_gran(struct sched_entity *se)
+/*
+ * Adaptive granularity
+ *
+ * se->avg_wakeup gives the average time a task runs until it does a wakeup,
+ * with the limit of wakeup_gran -- when it never does a wakeup.
+ *
+ * So the smaller avg_wakeup is the faster we want this task to preempt,
+ * but we don't want to treat the preemptee unfairly and therefore allow it
+ * to run for at least the amount of time we'd like to run.
+ *
+ * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
+ *
+ * NOTE: we use *nr_running to scale with load, this nicely matches the
+ * degrading latency on load.
+ */
+static unsigned long
+adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
+{
+ u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
+ u64 gran = 0;
+
+ if (this_run < expected_wakeup)
+ gran = expected_wakeup - this_run;
+
+ return min_t(s64, gran, sysctl_sched_wakeup_granularity);
+}
+
+static unsigned long
+wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
+ if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
+ gran = adaptive_gran(curr, se);
+
/*
- * More easily preempt - nice tasks, while not making it harder for
- * + nice tasks.
+ * Since its curr running now, convert the gran from real-time
+ * to virtual-time in his units.
*/
- if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
- gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
+ if (sched_feat(ASYM_GRAN)) {
+ /*
+ * By using 'se' instead of 'curr' we penalize light tasks, so
+ * they get preempted easier. That is, if 'se' < 'curr' then
+ * the resulting gran will be larger, therefore penalizing the
+ * lighter, if otoh 'se' > 'curr' then the resulting gran will
+ * be smaller, again penalizing the lighter task.
+ *
+ * This is especially important for buddies when the leftmost
+ * task is higher priority than the buddy.
+ */
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ gran = calc_delta_fair(gran, se);
+ } else {
+ if (unlikely(curr->load.weight != NICE_0_LOAD))
+ gran = calc_delta_fair(gran, curr);
+ }
return gran;
}
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
if (vdiff <= 0)
return -1;
- gran = wakeup_gran(curr);
+ gran = wakeup_gran(curr, se);
if (vdiff > gran)
return 1;
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 07bc02e99ab..4569bfa7df9 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,6 @@
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
-SCHED_FEAT(NORMALIZED_SLEEPER, 1)
+SCHED_FEAT(NORMALIZED_SLEEPER, 0)
+SCHED_FEAT(ADAPTIVE_GRAN, 1)
SCHED_FEAT(WAKEUP_PREEMPT, 1)
SCHED_FEAT(START_DEBIT, 1)
SCHED_FEAT(AFFINE_WAKEUPS, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index da932f4c852..299d012b439 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
* policies)
*/
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+ return container_of(rt_se, struct task_struct, rt);
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+ return rt_se->rt_rq;
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+ struct task_struct *p = rt_task_of(rt_se);
+ struct rq *rq = task_rq(p);
+
+ return &rq->rt;
+}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
#ifdef CONFIG_SMP
static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
-static void update_rt_migration(struct rq *rq)
+static void update_rt_migration(struct rt_rq *rt_rq)
{
- if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
- if (!rq->rt.overloaded) {
- rt_set_overload(rq);
- rq->rt.overloaded = 1;
+ if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+ if (!rt_rq->overloaded) {
+ rt_set_overload(rq_of_rt_rq(rt_rq));
+ rt_rq->overloaded = 1;
}
- } else if (rq->rt.overloaded) {
- rt_clear_overload(rq);
- rq->rt.overloaded = 0;
+ } else if (rt_rq->overloaded) {
+ rt_clear_overload(rq_of_rt_rq(rt_rq));
+ rt_rq->overloaded = 0;
}
}
-#endif /* CONFIG_SMP */
-static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ if (rt_se->nr_cpus_allowed > 1)
+ rt_rq->rt_nr_migratory++;
+
+ update_rt_migration(rt_rq);
+}
+
+static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ if (rt_se->nr_cpus_allowed > 1)
+ rt_rq->rt_nr_migratory--;
+
+ update_rt_migration(rt_rq);
+}
+
+static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+ plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ plist_node_init(&p->pushable_tasks, p->prio);
+ plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+}
+
+static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+ plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+}
+
+#else
+
+static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
- return container_of(rt_se, struct task_struct, rt);
}
+static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline
+void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+static inline
+void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+#endif /* CONFIG_SMP */
+
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
#define for_each_leaf_rt_rq(rt_rq, rq) \
list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
- return rt_rq->rq;
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
- return rt_se->rt_rq;
-}
-
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent)
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
if (rt_rq->rt_nr_running) {
if (rt_se && !on_rt_rq(rt_se))
enqueue_rt_entity(rt_se);
- if (rt_rq->highest_prio < curr->prio)
+ if (rt_rq->highest_prio.curr < curr->prio)
resched_task(curr);
}
}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
#define for_each_leaf_rt_rq(rt_rq, rq) \
for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
- return container_of(rt_rq, struct rq, rt);
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
- struct task_struct *p = rt_task_of(rt_se);
- struct rq *rq = task_rq(p);
-
- return &rq->rt;
-}
-
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL)
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
- return rt_rq->highest_prio;
+ return rt_rq->highest_prio.curr;
#endif
return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
}
}
-static inline
-void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+#if defined CONFIG_SMP
+
+static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
+
+static inline int next_prio(struct rq *rq)
{
- WARN_ON(!rt_prio(rt_se_prio(rt_se)));
- rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
- if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
-#ifdef CONFIG_SMP
- struct rq *rq = rq_of_rt_rq(rt_rq);
-#endif
+ struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
+
+ if (next && rt_prio(next->prio))
+ return next->prio;
+ else
+ return MAX_RT_PRIO;
+}
+
+static void
+inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ if (prio < prev_prio) {
+
+ /*
+ * If the new task is higher in priority than anything on the
+ * run-queue, we know that the previous high becomes our
+ * next-highest.
+ */
+ rt_rq->highest_prio.next = prev_prio;
- rt_rq->highest_prio = rt_se_prio(rt_se);
-#ifdef CONFIG_SMP
if (rq->online)
- cpupri_set(&rq->rd->cpupri, rq->cpu,
- rt_se_prio(rt_se));
-#endif
- }
-#endif
-#ifdef CONFIG_SMP
- if (rt_se->nr_cpus_allowed > 1) {
- struct rq *rq = rq_of_rt_rq(rt_rq);
+ cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
- rq->rt.rt_nr_migratory++;
- }
+ } else if (prio == rt_rq->highest_prio.curr)
+ /*
+ * If the next task is equal in priority to the highest on
+ * the run-queue, then we implicitly know that the next highest
+ * task cannot be any lower than current
+ */
+ rt_rq->highest_prio.next = prio;
+ else if (prio < rt_rq->highest_prio.next)
+ /*
+ * Otherwise, we need to recompute next-highest
+ */
+ rt_rq->highest_prio.next = next_prio(rq);
+}
- update_rt_migration(rq_of_rt_rq(rt_rq));
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
- if (rt_se_boosted(rt_se))
- rt_rq->rt_nr_boosted++;
+static void
+dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+ struct rq *rq = rq_of_rt_rq(rt_rq);
- if (rt_rq->tg)
- start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
-#else
- start_rt_bandwidth(&def_rt_bandwidth);
-#endif
+ if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
+ rt_rq->highest_prio.next = next_prio(rq);
+
+ if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
+#else /* CONFIG_SMP */
+
static inline
-void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
-{
-#ifdef CONFIG_SMP
- int highest_prio = rt_rq->highest_prio;
-#endif
+void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+static inline
+void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+
+#endif /* CONFIG_SMP */
- WARN_ON(!rt_prio(rt_se_prio(rt_se)));
- WARN_ON(!rt_rq->rt_nr_running);
- rt_rq->rt_nr_running--;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+static void
+inc_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+ int prev_prio = rt_rq->highest_prio.curr;
+
+ if (prio < prev_prio)
+ rt_rq->highest_prio.curr = prio;
+
+ inc_rt_prio_smp(rt_rq, prio, prev_prio);
+}
+
+static void
+dec_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+ int prev_prio = rt_rq->highest_prio.curr;
+
if (rt_rq->rt_nr_running) {
- struct rt_prio_array *array;
- WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
- if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
- /* recalculate */
- array = &rt_rq->active;
- rt_rq->highest_prio =
+ WARN_ON(prio < prev_prio);
+
+ /*
+ * This may have been our highest task, and therefore
+ * we may have some recomputation to do
+ */
+ if (prio == prev_prio) {
+ struct rt_prio_array *array = &rt_rq->active;
+
+ rt_rq->highest_prio.curr =
sched_find_first_bit(array->bitmap);
- } /* otherwise leave rq->highest prio alone */
+ }
+
} else
- rt_rq->highest_prio = MAX_RT_PRIO;
-#endif
-#ifdef CONFIG_SMP
- if (rt_se->nr_cpus_allowed > 1) {
- struct rq *rq = rq_of_rt_rq(rt_rq);
- rq->rt.rt_nr_migratory--;
- }
+ rt_rq->highest_prio.curr = MAX_RT_PRIO;
- if (rt_rq->highest_prio != highest_prio) {
- struct rq *rq = rq_of_rt_rq(rt_rq);
+ dec_rt_prio_smp(rt_rq, prio, prev_prio);
+}
- if (rq->online)
- cpupri_set(&rq->rd->cpupri, rq->cpu,
- rt_rq->highest_prio);
- }
+#else
+
+static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
+static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
+
+#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
- update_rt_migration(rq_of_rt_rq(rt_rq));
-#endif /* CONFIG_SMP */
#ifdef CONFIG_RT_GROUP_SCHED
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ if (rt_se_boosted(rt_se))
+ rt_rq->rt_nr_boosted++;
+
+ if (rt_rq->tg)
+ start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+}
+
+static void
+dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--;
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
-#endif
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ start_rt_bandwidth(&def_rt_bandwidth);
+}
+
+static inline
+void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static inline
+void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ int prio = rt_se_prio(rt_se);
+
+ WARN_ON(!rt_prio(prio));
+ rt_rq->rt_nr_running++;
+
+ inc_rt_prio(rt_rq, prio);
+ inc_rt_migration(rt_se, rt_rq);
+ inc_rt_group(rt_se, rt_rq);
+}
+
+static inline
+void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+ WARN_ON(!rt_rq->rt_nr_running);
+ rt_rq->rt_nr_running--;
+
+ dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+ dec_rt_migration(rt_se, rt_rq);
+ dec_rt_group(rt_se, rt_rq);
}
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
enqueue_rt_entity(rt_se);
+ if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+ enqueue_pushable_task(rq, p);
+
inc_cpu_load(rq, p->se.load.weight);
}
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
update_curr_rt(rq);
dequeue_rt_entity(rt_se);
+ dequeue_pushable_task(rq, p);
+
dec_cpu_load(rq, p->se.load.weight);
}
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
return next;
}
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
p = rt_task_of(rt_se);
p->se.exec_start = rq->clock;
+
+ return p;
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+ struct task_struct *p = _pick_next_task_rt(rq);
+
+ /* The running task is never eligible for pushing */
+ if (p)
+ dequeue_pushable_task(rq, p);
+
return p;
}
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
p->se.exec_start = 0;
+
+ /*
+ * The previous task needs to be made eligible for pushing
+ * if it is still active
+ */
+ if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+ enqueue_pushable_task(rq, p);
}
#ifdef CONFIG_SMP
@@ -1080,7 +1242,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
}
/* If this rq is still suitable use it. */
- if (lowest_rq->rt.highest_prio > task->prio)
+ if (lowest_rq->rt.highest_prio.curr > task->prio)
break;
/* try again */
@@ -1091,6 +1253,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
return lowest_rq;
}
+static inline int has_pushable_tasks(struct rq *rq)
+{
+ return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
+static struct task_struct *pick_next_pushable_task(struct rq *rq)
+{
+ struct task_struct *p;
+
+ if (!has_pushable_tasks(rq))
+ return NULL;
+
+ p = plist_first_entry(&rq->rt.pushable_tasks,
+ struct task_struct, pushable_tasks);
+
+ BUG_ON(rq->cpu != task_cpu(p));
+ BUG_ON(task_current(rq, p));
+ BUG_ON(p->rt.nr_cpus_allowed <= 1);
+
+ BUG_ON(!p->se.on_rq);
+ BUG_ON(!rt_task(p));
+
+ return p;
+}
+
/*
* If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task
@@ -1100,13 +1287,11 @@ static int push_rt_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *lowest_rq;
- int ret = 0;
- int paranoid = RT_MAX_TRIES;
if (!rq->rt.overloaded)
return 0;
- next_task = pick_next_highest_task_rt(rq, -1);
+ next_task = pick_next_pushable_task(rq);
if (!next_task)
return 0;
@@ -1135,16 +1320,34 @@ static int push_rt_task(struct rq *rq)
struct task_struct *task;
/*
* find lock_lowest_rq releases rq->lock
- * so it is possible that next_task has changed.
- * If it has, then try again.
+ * so it is possible that next_task has migrated.
+ *
+ * We need to make sure that the task is still on the same
+ * run-queue and is also still the next task eligible for
+ * pushing.
*/
- task = pick_next_highest_task_rt(rq, -1);
- if (unlikely(task != next_task) && task && paranoid--) {
- put_task_struct(next_task);
- next_task = task;
- goto retry;
+ task = pick_next_pushable_task(rq);
+ if (task_cpu(next_task) == rq->cpu && task == next_task) {
+ /*
+ * If we get here, the task hasnt moved at all, but
+ * it has failed to push. We will not try again,
+ * since the other cpus will pull from us when they
+ * are ready.
+ */
+ dequeue_pushable_task(rq, next_task);
+ goto out;
}
- goto out;
+
+ if (!task)
+ /* No more tasks, just exit */
+ goto out;
+
+ /*
+ * Something has shifted, try again.
+ */
+ put_task_struct(next_task);
+ next_task = task;
+ goto retry;
}
deactivate_task(rq, next_task, 0);
@@ -1155,23 +1358,12 @@ static int push_rt_task(struct rq *rq)
double_unlock_balance(rq, lowest_rq);
- ret = 1;
out:
put_task_struct(next_task);
- return ret;
+ return 1;
}
-/*
- * TODO: Currently we just use the second highest prio task on
- * the queue, and stop when it can't migrate (or there's
- * no more RT tasks). There may be a case where a lower
- * priority RT task has a different affinity than the
- * higher RT task. In this case the lower RT task could
- * possibly be able to migrate where as the higher priority
- * RT task could not. We currently ignore this issue.
- * Enhancements are welcome!
- */
static void push_rt_tasks(struct rq *rq)
{
/* push_rt_task will return true if it moved an RT */
@@ -1182,33 +1374,35 @@ static void push_rt_tasks(struct rq *rq)
static int pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, ret = 0, cpu;
- struct task_struct *p, *next;
+ struct task_struct *p;
struct rq *src_rq;
if (likely(!rt_overloaded(this_rq)))
return 0;
- next = pick_next_task_rt(this_rq);
-
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
+
+ /*
+ * Don't bother taking the src_rq->lock if the next highest
+ * task is known to be lower-priority than our current task.
+ * This may look racy, but if this value is about to go
+ * logically higher, the src_rq will push this task away.
+ * And if its going logically lower, we do not care
+ */
+ if (src_rq->rt.highest_prio.next >=
+ this_rq->rt.highest_prio.curr)
+ continue;
+
/*
* We can potentially drop this_rq's lock in
* double_lock_balance, and another CPU could
- * steal our next task - hence we must cause
- * the caller to recalculate the next task
- * in that case:
+ * alter this_rq
*/
- if (double_lock_balance(this_rq, src_rq)) {
- struct task_struct *old_next = next;
-
- next = pick_next_task_rt(this_rq);
- if (next != old_next)
- ret = 1;
- }
+ double_lock_balance(this_rq, src_rq);
/*
* Are there still pullable RT tasks?
@@ -1222,7 +1416,7 @@ static int pull_rt_task(struct rq *this_rq)
* Do we have an RT task that preempts
* the to-be-scheduled task?
*/
- if (p && (!next || (p->prio < next->prio))) {
+ if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
WARN_ON(!p->se.on_rq);
@@ -1232,12 +1426,9 @@ static int pull_rt_task(struct rq *this_rq)
* This is just that p is wakeing up and hasn't
* had a chance to schedule. We only pull
* p if it is lower in priority than the
- * current task on the run queue or
- * this_rq next task is lower in prio than
- * the current task on that rq.
+ * current task on the run queue
*/
- if (p->prio < src_rq->curr->prio ||
- (next && next->prio < src_rq->curr->prio))
+ if (p->prio < src_rq->curr->prio)
goto skip;
ret = 1;
@@ -1250,13 +1441,7 @@ static int pull_rt_task(struct rq *this_rq)
* case there's an even higher prio task
* in another runqueue. (low likelyhood
* but possible)
- *
- * Update next so that we won't pick a task
- * on another cpu with a priority lower (or equal)
- * than the one we just picked.
*/
- next = p;
-
}
skip:
double_unlock_balance(this_rq, src_rq);
@@ -1268,24 +1453,27 @@ static int pull_rt_task(struct rq *this_rq)
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
- if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
+ if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
pull_rt_task(rq);
}
+/*
+ * assumes rq->lock is held
+ */
+static int needs_post_schedule_rt(struct rq *rq)
+{
+ return has_pushable_tasks(rq);
+}
+
static void post_schedule_rt(struct rq *rq)
{
/*
- * If we have more than one rt_task queued, then
- * see if we can push the other rt_tasks off to other CPUS.
- * Note we may release the rq lock, and since
- * the lock was owned by prev, we need to release it
- * first via finish_lock_switch and then reaquire it here.
+ * This is only called if needs_post_schedule_rt() indicates that
+ * we need to push tasks away
*/
- if (unlikely(rq->rt.overloaded)) {
- spin_lock_irq(&rq->lock);
- push_rt_tasks(rq);
- spin_unlock_irq(&rq->lock);
- }
+ spin_lock_irq(&rq->lock);
+ push_rt_tasks(rq);
+ spin_unlock_irq(&rq->lock);
}
/*
@@ -1296,7 +1484,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
- rq->rt.overloaded)
+ has_pushable_tasks(rq) &&
+ p->rt.nr_cpus_allowed > 1)
push_rt_tasks(rq);
}
@@ -1332,6 +1521,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
struct rq *rq = task_rq(p);
+ if (!task_current(rq, p)) {
+ /*
+ * Make sure we dequeue this task from the pushable list
+ * before going further. It will either remain off of
+ * the list because we are no longer pushable, or it
+ * will be requeued.
+ */
+ if (p->rt.nr_cpus_allowed > 1)
+ dequeue_pushable_task(rq, p);
+
+ /*
+ * Requeue if our weight is changing and still > 1
+ */
+ if (weight > 1)
+ enqueue_pushable_task(rq, p);
+
+ }
+
if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
rq->rt.rt_nr_migratory++;
} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1339,7 +1546,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
rq->rt.rt_nr_migratory--;
}
- update_rt_migration(rq);
+ update_rt_migration(&rq->rt);
}
cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1354,7 +1561,7 @@ static void rq_online_rt(struct rq *rq)
__enable_runtime(rq);
- cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
}
/* Assumes rq->lock is held */
@@ -1446,7 +1653,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
* can release the rq lock and p could migrate.
* Only reschedule if p is still on the same runqueue.
*/
- if (p->prio > rq->rt.highest_prio && rq->curr == p)
+ if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
resched_task(p);
#else
/* For UP simply resched on drop of prio */
@@ -1517,6 +1724,9 @@ static void set_curr_task_rt(struct rq *rq)
struct task_struct *p = rq->curr;
p->se.exec_start = rq->clock;
+
+ /* The running task is never eligible for pushing */
+ dequeue_pushable_task(rq, p);
}
static const struct sched_class rt_sched_class = {
@@ -1539,6 +1749,7 @@ static const struct sched_class rt_sched_class = {
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
+ .needs_post_schedule = needs_post_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
.switched_from = switched_from_rt,
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a8f93dd374e..32d2bd4061b 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -4,7 +4,7 @@
* bump this up when changing the output format or the meaning of an existing
* format, so that tools can adapt (or abort)
*/
-#define SCHEDSTAT_VERSION 14
+#define SCHEDSTAT_VERSION 15
static int show_schedstat(struct seq_file *seq, void *v)
{
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
/* runqueue-specific stats */
seq_printf(seq,
- "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
- cpu, rq->yld_both_empty,
- rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
+ "cpu%d %u %u %u %u %u %u %llu %llu %lu",
+ cpu, rq->yld_count,
rq->sched_switch, rq->sched_count, rq->sched_goidle,
rq->ttwu_count, rq->ttwu_local,
rq->rq_cpu_time,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a74fe87c0d..d8034737db4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -55,10 +55,22 @@ static int sig_handler_ignored(void __user *handler, int sig)
(handler == SIG_DFL && sig_kernel_ignore(sig));
}
-static int sig_ignored(struct task_struct *t, int sig)
+static int sig_task_ignored(struct task_struct *t, int sig,
+ int from_ancestor_ns)
{
void __user *handler;
+ handler = sig_handler(t, sig);
+
+ if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
+ handler == SIG_DFL && !from_ancestor_ns)
+ return 1;
+
+ return sig_handler_ignored(handler, sig);
+}
+
+static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
+{
/*
* Blocked signals are never ignored, since the
* signal handler may change by the time it is
@@ -67,14 +79,13 @@ static int sig_ignored(struct task_struct *t, int sig)
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return 0;
- handler = sig_handler(t, sig);
- if (!sig_handler_ignored(handler, sig))
+ if (!sig_task_ignored(t, sig, from_ancestor_ns))
return 0;
/*
* Tracers may want to know about even ignored signals.
*/
- return !tracehook_consider_ignored_signal(t, sig, handler);
+ return !tracehook_consider_ignored_signal(t, sig);
}
/*
@@ -318,7 +329,7 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return 1;
if (handler != SIG_IGN && handler != SIG_DFL)
return 0;
- return !tracehook_consider_fatal_signal(tsk, sig, handler);
+ return !tracehook_consider_fatal_signal(tsk, sig);
}
@@ -624,7 +635,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
* Returns true if the signal should be actually delivered, otherwise
* it should be dropped.
*/
-static int prepare_signal(int sig, struct task_struct *p)
+static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
{
struct signal_struct *signal = p->signal;
struct task_struct *t;
@@ -708,7 +719,7 @@ static int prepare_signal(int sig, struct task_struct *p)
}
}
- return !sig_ignored(p, sig);
+ return !sig_ignored(p, sig, from_ancestor_ns);
}
/*
@@ -777,7 +788,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
!(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL ||
- !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
+ !tracehook_consider_fatal_signal(t, sig))) {
/*
* This signal will be fatal to the whole group.
*/
@@ -813,8 +824,8 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
-static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
- int group)
+static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ int group, int from_ancestor_ns)
{
struct sigpending *pending;
struct sigqueue *q;
@@ -822,7 +833,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
trace_sched_signal_send(sig, t);
assert_spin_locked(&t->sighand->siglock);
- if (!prepare_signal(sig, t))
+
+ if (!prepare_signal(sig, t, from_ancestor_ns))
return 0;
pending = group ? &t->signal->shared_pending : &t->pending;
@@ -871,6 +883,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
break;
default:
copy_siginfo(&q->info, info);
+ if (from_ancestor_ns)
+ q->info.si_pid = 0;
break;
}
} else if (!is_si_special(info)) {
@@ -889,6 +903,20 @@ out_set:
return 0;
}
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ int group)
+{
+ int from_ancestor_ns = 0;
+
+#ifdef CONFIG_PID_NS
+ if (!is_si_special(info) && SI_FROMUSER(info) &&
+ task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
+ from_ancestor_ns = 1;
+#endif
+
+ return __send_signal(sig, info, t, group, from_ancestor_ns);
+}
+
int print_fatal_signals;
static void print_fatal_signal(struct pt_regs *regs, int signr)
@@ -1133,7 +1161,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
if (sig && p->sighand) {
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
- ret = __group_send_sig_info(sig, info, p);
+ ret = __send_signal(sig, info, p, 1, 0);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
out_unlock:
@@ -1320,7 +1348,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
goto ret;
ret = 1; /* the signal is ignored */
- if (!prepare_signal(sig, t))
+ if (!prepare_signal(sig, t, 0))
goto out;
ret = 0;
@@ -1575,7 +1603,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
read_lock(&tasklist_lock);
if (may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+ *
+ * XXX: implement read_unlock_no_resched().
+ */
+ preempt_disable();
read_unlock(&tasklist_lock);
+ preempt_enable_no_resched();
schedule();
} else {
/*
@@ -1836,9 +1872,16 @@ relock:
/*
* Global init gets no signals it doesn't want.
+ * Container-init gets no signals it doesn't want from same
+ * container.
+ *
+ * Note that if global/container-init sees a sig_kernel_only()
+ * signal here, the signal must have been generated internally
+ * or must have come from an ancestor namespace. In either
+ * case, the signal cannot be dropped.
*/
if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
- !signal_group_exit(signal))
+ !sig_kernel_only(signr))
continue;
if (sig_kernel_stop(signr)) {
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
new file mode 100644
index 00000000000..cf2bc01186e
--- /dev/null
+++ b/kernel/slow-work.c
@@ -0,0 +1,640 @@
+/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ *
+ * See Documentation/slow-work.txt
+ */
+
+#include <linux/module.h>
+#include <linux/slow-work.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/wait.h>
+
+#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
+ * things to do */
+#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
+ * OOM */
+
+static void slow_work_cull_timeout(unsigned long);
+static void slow_work_oom_timeout(unsigned long);
+
+#ifdef CONFIG_SYSCTL
+static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
+
+static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
+ void __user *, size_t *, loff_t *);
+#endif
+
+/*
+ * The pool of threads has at least min threads in it as long as someone is
+ * using the facility, and may have as many as max.
+ *
+ * A portion of the pool may be processing very slow operations.
+ */
+static unsigned slow_work_min_threads = 2;
+static unsigned slow_work_max_threads = 4;
+static unsigned vslow_work_proportion = 50; /* % of threads that may process
+ * very slow work */
+
+#ifdef CONFIG_SYSCTL
+static const int slow_work_min_min_threads = 2;
+static int slow_work_max_max_threads = 255;
+static const int slow_work_min_vslow = 1;
+static const int slow_work_max_vslow = 99;
+
+ctl_table slow_work_sysctls[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "min-threads",
+ .data = &slow_work_min_threads,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = slow_work_min_threads_sysctl,
+ .extra1 = (void *) &slow_work_min_min_threads,
+ .extra2 = &slow_work_max_threads,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "max-threads",
+ .data = &slow_work_max_threads,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = slow_work_max_threads_sysctl,
+ .extra1 = &slow_work_min_threads,
+ .extra2 = (void *) &slow_work_max_max_threads,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "vslow-percentage",
+ .data = &vslow_work_proportion,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &slow_work_min_vslow,
+ .extra2 = (void *) &slow_work_max_vslow,
+ },
+ { .ctl_name = 0 }
+};
+#endif
+
+/*
+ * The active state of the thread pool
+ */
+static atomic_t slow_work_thread_count;
+static atomic_t vslow_work_executing_count;
+
+static bool slow_work_may_not_start_new_thread;
+static bool slow_work_cull; /* cull a thread due to lack of activity */
+static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
+static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
+static struct slow_work slow_work_new_thread; /* new thread starter */
+
+/*
+ * The queues of work items and the lock governing access to them. These are
+ * shared between all the CPUs. It doesn't make sense to have per-CPU queues
+ * as the number of threads bears no relation to the number of CPUs.
+ *
+ * There are two queues of work items: one for slow work items, and one for
+ * very slow work items.
+ */
+static LIST_HEAD(slow_work_queue);
+static LIST_HEAD(vslow_work_queue);
+static DEFINE_SPINLOCK(slow_work_queue_lock);
+
+/*
+ * The thread controls. A variable used to signal to the threads that they
+ * should exit when the queue is empty, a waitqueue used by the threads to wait
+ * for signals, and a completion set by the last thread to exit.
+ */
+static bool slow_work_threads_should_exit;
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
+static DECLARE_COMPLETION(slow_work_last_thread_exited);
+
+/*
+ * The number of users of the thread pool and its lock. Whilst this is zero we
+ * have no threads hanging around, and when this reaches zero, we wait for all
+ * active or queued work items to complete and kill all the threads we do have.
+ */
+static int slow_work_user_count;
+static DEFINE_MUTEX(slow_work_user_lock);
+
+/*
+ * Calculate the maximum number of active threads in the pool that are
+ * permitted to process very slow work items.
+ *
+ * The answer is rounded up to at least 1, but may not equal or exceed the
+ * maximum number of the threads in the pool. This means we always have at
+ * least one thread that can process slow work items, and we always have at
+ * least one thread that won't get tied up doing so.
+ */
+static unsigned slow_work_calc_vsmax(void)
+{
+ unsigned vsmax;
+
+ vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
+ vsmax /= 100;
+ vsmax = max(vsmax, 1U);
+ return min(vsmax, slow_work_max_threads - 1);
+}
+
+/*
+ * Attempt to execute stuff queued on a slow thread. Return true if we managed
+ * it, false if there was nothing to do.
+ */
+static bool slow_work_execute(void)
+{
+ struct slow_work *work = NULL;
+ unsigned vsmax;
+ bool very_slow;
+
+ vsmax = slow_work_calc_vsmax();
+
+ /* see if we can schedule a new thread to be started if we're not
+ * keeping up with the work */
+ if (!waitqueue_active(&slow_work_thread_wq) &&
+ (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
+ atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
+ !slow_work_may_not_start_new_thread)
+ slow_work_enqueue(&slow_work_new_thread);
+
+ /* find something to execute */
+ spin_lock_irq(&slow_work_queue_lock);
+ if (!list_empty(&vslow_work_queue) &&
+ atomic_read(&vslow_work_executing_count) < vsmax) {
+ work = list_entry(vslow_work_queue.next,
+ struct slow_work, link);
+ if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
+ BUG();
+ list_del_init(&work->link);
+ atomic_inc(&vslow_work_executing_count);
+ very_slow = true;
+ } else if (!list_empty(&slow_work_queue)) {
+ work = list_entry(slow_work_queue.next,
+ struct slow_work, link);
+ if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
+ BUG();
+ list_del_init(&work->link);
+ very_slow = false;
+ } else {
+ very_slow = false; /* avoid the compiler warning */
+ }
+ spin_unlock_irq(&slow_work_queue_lock);
+
+ if (!work)
+ return false;
+
+ if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
+ BUG();
+
+ work->ops->execute(work);
+
+ if (very_slow)
+ atomic_dec(&vslow_work_executing_count);
+ clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
+
+ /* if someone tried to enqueue the item whilst we were executing it,
+ * then it'll be left unenqueued to avoid multiple threads trying to
+ * execute it simultaneously
+ *
+ * there is, however, a race between us testing the pending flag and
+ * getting the spinlock, and between the enqueuer setting the pending
+ * flag and getting the spinlock, so we use a deferral bit to tell us
+ * if the enqueuer got there first
+ */
+ if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
+ spin_lock_irq(&slow_work_queue_lock);
+
+ if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
+ test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
+ goto auto_requeue;
+
+ spin_unlock_irq(&slow_work_queue_lock);
+ }
+
+ work->ops->put_ref(work);
+ return true;
+
+auto_requeue:
+ /* we must complete the enqueue operation
+ * - we transfer our ref on the item back to the appropriate queue
+ * - don't wake another thread up as we're awake already
+ */
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
+ list_add_tail(&work->link, &vslow_work_queue);
+ else
+ list_add_tail(&work->link, &slow_work_queue);
+ spin_unlock_irq(&slow_work_queue_lock);
+ return true;
+}
+
+/**
+ * slow_work_enqueue - Schedule a slow work item for processing
+ * @work: The work item to queue
+ *
+ * Schedule a slow work item for processing. If the item is already undergoing
+ * execution, this guarantees not to re-enter the execution routine until the
+ * first execution finishes.
+ *
+ * The item is pinned by this function as it retains a reference to it, managed
+ * through the item operations. The item is unpinned once it has been
+ * executed.
+ *
+ * An item may hog the thread that is running it for a relatively large amount
+ * of time, sufficient, for example, to perform several lookup, mkdir, create
+ * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
+ *
+ * Conversely, if a number of items are awaiting processing, it may take some
+ * time before any given item is given attention. The number of threads in the
+ * pool may be increased to deal with demand, but only up to a limit.
+ *
+ * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
+ * the very slow queue, from which only a portion of the threads will be
+ * allowed to pick items to execute. This ensures that very slow items won't
+ * overly block ones that are just ordinarily slow.
+ *
+ * Returns 0 if successful, -EAGAIN if not.
+ */
+int slow_work_enqueue(struct slow_work *work)
+{
+ unsigned long flags;
+
+ BUG_ON(slow_work_user_count <= 0);
+ BUG_ON(!work);
+ BUG_ON(!work->ops);
+ BUG_ON(!work->ops->get_ref);
+
+ /* when honouring an enqueue request, we only promise that we will run
+ * the work function in the future; we do not promise to run it once
+ * per enqueue request
+ *
+ * we use the PENDING bit to merge together repeat requests without
+ * having to disable IRQs and take the spinlock, whilst still
+ * maintaining our promise
+ */
+ if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+ spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+ /* we promise that we will not attempt to execute the work
+ * function in more than one thread simultaneously
+ *
+ * this, however, leaves us with a problem if we're asked to
+ * enqueue the work whilst someone is executing the work
+ * function as simply queueing the work immediately means that
+ * another thread may try executing it whilst it is already
+ * under execution
+ *
+ * to deal with this, we set the ENQ_DEFERRED bit instead of
+ * enqueueing, and the thread currently executing the work
+ * function will enqueue the work item when the work function
+ * returns and it has cleared the EXECUTING bit
+ */
+ if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
+ set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
+ } else {
+ if (work->ops->get_ref(work) < 0)
+ goto cant_get_ref;
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
+ list_add_tail(&work->link, &vslow_work_queue);
+ else
+ list_add_tail(&work->link, &slow_work_queue);
+ wake_up(&slow_work_thread_wq);
+ }
+
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ }
+ return 0;
+
+cant_get_ref:
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ return -EAGAIN;
+}
+EXPORT_SYMBOL(slow_work_enqueue);
+
+/*
+ * Worker thread culling algorithm
+ */
+static bool slow_work_cull_thread(void)
+{
+ unsigned long flags;
+ bool do_cull = false;
+
+ spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+ if (slow_work_cull) {
+ slow_work_cull = false;
+
+ if (list_empty(&slow_work_queue) &&
+ list_empty(&vslow_work_queue) &&
+ atomic_read(&slow_work_thread_count) >
+ slow_work_min_threads) {
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ do_cull = true;
+ }
+ }
+
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ return do_cull;
+}
+
+/*
+ * Determine if there is slow work available for dispatch
+ */
+static inline bool slow_work_available(int vsmax)
+{
+ return !list_empty(&slow_work_queue) ||
+ (!list_empty(&vslow_work_queue) &&
+ atomic_read(&vslow_work_executing_count) < vsmax);
+}
+
+/*
+ * Worker thread dispatcher
+ */
+static int slow_work_thread(void *_data)
+{
+ int vsmax;
+
+ DEFINE_WAIT(wait);
+
+ set_freezable();
+ set_user_nice(current, -5);
+
+ for (;;) {
+ vsmax = vslow_work_proportion;
+ vsmax *= atomic_read(&slow_work_thread_count);
+ vsmax /= 100;
+
+ prepare_to_wait(&slow_work_thread_wq, &wait,
+ TASK_INTERRUPTIBLE);
+ if (!freezing(current) &&
+ !slow_work_threads_should_exit &&
+ !slow_work_available(vsmax) &&
+ !slow_work_cull)
+ schedule();
+ finish_wait(&slow_work_thread_wq, &wait);
+
+ try_to_freeze();
+
+ vsmax = vslow_work_proportion;
+ vsmax *= atomic_read(&slow_work_thread_count);
+ vsmax /= 100;
+
+ if (slow_work_available(vsmax) && slow_work_execute()) {
+ cond_resched();
+ if (list_empty(&slow_work_queue) &&
+ list_empty(&vslow_work_queue) &&
+ atomic_read(&slow_work_thread_count) >
+ slow_work_min_threads)
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ continue;
+ }
+
+ if (slow_work_threads_should_exit)
+ break;
+
+ if (slow_work_cull && slow_work_cull_thread())
+ break;
+ }
+
+ if (atomic_dec_and_test(&slow_work_thread_count))
+ complete_and_exit(&slow_work_last_thread_exited, 0);
+ return 0;
+}
+
+/*
+ * Handle thread cull timer expiration
+ */
+static void slow_work_cull_timeout(unsigned long data)
+{
+ slow_work_cull = true;
+ wake_up(&slow_work_thread_wq);
+}
+
+/*
+ * Get a reference on slow work thread starter
+ */
+static int slow_work_new_thread_get_ref(struct slow_work *work)
+{
+ return 0;
+}
+
+/*
+ * Drop a reference on slow work thread starter
+ */
+static void slow_work_new_thread_put_ref(struct slow_work *work)
+{
+}
+
+/*
+ * Start a new slow work thread
+ */
+static void slow_work_new_thread_execute(struct slow_work *work)
+{
+ struct task_struct *p;
+
+ if (slow_work_threads_should_exit)
+ return;
+
+ if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
+ return;
+
+ if (!mutex_trylock(&slow_work_user_lock))
+ return;
+
+ slow_work_may_not_start_new_thread = true;
+ atomic_inc(&slow_work_thread_count);
+ p = kthread_run(slow_work_thread, NULL, "kslowd");
+ if (IS_ERR(p)) {
+ printk(KERN_DEBUG "Slow work thread pool: OOM\n");
+ if (atomic_dec_and_test(&slow_work_thread_count))
+ BUG(); /* we're running on a slow work thread... */
+ mod_timer(&slow_work_oom_timer,
+ jiffies + SLOW_WORK_OOM_TIMEOUT);
+ } else {
+ /* ratelimit the starting of new threads */
+ mod_timer(&slow_work_oom_timer, jiffies + 1);
+ }
+
+ mutex_unlock(&slow_work_user_lock);
+}
+
+static const struct slow_work_ops slow_work_new_thread_ops = {
+ .get_ref = slow_work_new_thread_get_ref,
+ .put_ref = slow_work_new_thread_put_ref,
+ .execute = slow_work_new_thread_execute,
+};
+
+/*
+ * post-OOM new thread start suppression expiration
+ */
+static void slow_work_oom_timeout(unsigned long data)
+{
+ slow_work_may_not_start_new_thread = false;
+}
+
+#ifdef CONFIG_SYSCTL
+/*
+ * Handle adjustment of the minimum number of threads
+ */
+static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int n;
+
+ if (ret == 0) {
+ mutex_lock(&slow_work_user_lock);
+ if (slow_work_user_count > 0) {
+ /* see if we need to start or stop threads */
+ n = atomic_read(&slow_work_thread_count) -
+ slow_work_min_threads;
+
+ if (n < 0 && !slow_work_may_not_start_new_thread)
+ slow_work_enqueue(&slow_work_new_thread);
+ else if (n > 0)
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ }
+ mutex_unlock(&slow_work_user_lock);
+ }
+
+ return ret;
+}
+
+/*
+ * Handle adjustment of the maximum number of threads
+ */
+static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int n;
+
+ if (ret == 0) {
+ mutex_lock(&slow_work_user_lock);
+ if (slow_work_user_count > 0) {
+ /* see if we need to stop threads */
+ n = slow_work_max_threads -
+ atomic_read(&slow_work_thread_count);
+
+ if (n < 0)
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ }
+ mutex_unlock(&slow_work_user_lock);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_SYSCTL */
+
+/**
+ * slow_work_register_user - Register a user of the facility
+ *
+ * Register a user of the facility, starting up the initial threads if there
+ * aren't any other users at this point. This will return 0 if successful, or
+ * an error if not.
+ */
+int slow_work_register_user(void)
+{
+ struct task_struct *p;
+ int loop;
+
+ mutex_lock(&slow_work_user_lock);
+
+ if (slow_work_user_count == 0) {
+ printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
+ init_completion(&slow_work_last_thread_exited);
+
+ slow_work_threads_should_exit = false;
+ slow_work_init(&slow_work_new_thread,
+ &slow_work_new_thread_ops);
+ slow_work_may_not_start_new_thread = false;
+ slow_work_cull = false;
+
+ /* start the minimum number of threads */
+ for (loop = 0; loop < slow_work_min_threads; loop++) {
+ atomic_inc(&slow_work_thread_count);
+ p = kthread_run(slow_work_thread, NULL, "kslowd");
+ if (IS_ERR(p))
+ goto error;
+ }
+ printk(KERN_NOTICE "Slow work thread pool: Ready\n");
+ }
+
+ slow_work_user_count++;
+ mutex_unlock(&slow_work_user_lock);
+ return 0;
+
+error:
+ if (atomic_dec_and_test(&slow_work_thread_count))
+ complete(&slow_work_last_thread_exited);
+ if (loop > 0) {
+ printk(KERN_ERR "Slow work thread pool:"
+ " Aborting startup on ENOMEM\n");
+ slow_work_threads_should_exit = true;
+ wake_up_all(&slow_work_thread_wq);
+ wait_for_completion(&slow_work_last_thread_exited);
+ printk(KERN_ERR "Slow work thread pool: Aborted\n");
+ }
+ mutex_unlock(&slow_work_user_lock);
+ return PTR_ERR(p);
+}
+EXPORT_SYMBOL(slow_work_register_user);
+
+/**
+ * slow_work_unregister_user - Unregister a user of the facility
+ *
+ * Unregister a user of the facility, killing all the threads if this was the
+ * last one.
+ */
+void slow_work_unregister_user(void)
+{
+ mutex_lock(&slow_work_user_lock);
+
+ BUG_ON(slow_work_user_count <= 0);
+
+ slow_work_user_count--;
+ if (slow_work_user_count == 0) {
+ printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
+ slow_work_threads_should_exit = true;
+ wake_up_all(&slow_work_thread_wq);
+ wait_for_completion(&slow_work_last_thread_exited);
+ printk(KERN_NOTICE "Slow work thread pool:"
+ " Shut down complete\n");
+ }
+
+ del_timer_sync(&slow_work_cull_timer);
+
+ mutex_unlock(&slow_work_user_lock);
+}
+EXPORT_SYMBOL(slow_work_unregister_user);
+
+/*
+ * Initialise the slow work facility
+ */
+static int __init init_slow_work(void)
+{
+ unsigned nr_cpus = num_possible_cpus();
+
+ if (slow_work_max_threads < nr_cpus)
+ slow_work_max_threads = nr_cpus;
+#ifdef CONFIG_SYSCTL
+ if (slow_work_max_max_threads < nr_cpus * 2)
+ slow_work_max_max_threads = nr_cpus * 2;
+#endif
+ return 0;
+}
+
+subsys_initcall(init_slow_work);
diff --git a/kernel/smp.c b/kernel/smp.c
index bbedbb7efe3..858baac568e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -2,40 +2,82 @@
* Generic helpers for smp ipi calls
*
* (C) Jens Axboe <jens.axboe@oracle.com> 2008
- *
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
-static LIST_HEAD(call_function_queue);
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+static struct {
+ struct list_head queue;
+ spinlock_t lock;
+} call_function __cacheline_aligned_in_smp =
+ {
+ .queue = LIST_HEAD_INIT(call_function.queue),
+ .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
+ };
enum {
- CSD_FLAG_WAIT = 0x01,
- CSD_FLAG_ALLOC = 0x02,
- CSD_FLAG_LOCK = 0x04,
+ CSD_FLAG_LOCK = 0x01,
};
struct call_function_data {
- struct call_single_data csd;
- spinlock_t lock;
- unsigned int refs;
- struct rcu_head rcu_head;
- unsigned long cpumask_bits[];
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_var_t cpumask;
};
struct call_single_queue {
- struct list_head list;
- spinlock_t lock;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
+ .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
+};
+
+static int
+hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return NOTIFY_BAD;
+ break;
+
+#ifdef CONFIG_CPU_HOTPLUG
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_cpumask_var(cfd->cpumask);
+ break;
+#endif
+ };
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
+ .notifier_call = hotplug_cfd,
};
static int __cpuinit init_call_single_data(void)
{
+ void *cpu = (void *)(long)smp_processor_id();
int i;
for_each_possible_cpu(i) {
@@ -44,29 +86,63 @@ static int __cpuinit init_call_single_data(void)
spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
+
+ hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
+ register_cpu_notifier(&hotplug_cfd_notifier);
+
return 0;
}
early_initcall(init_call_single_data);
-static void csd_flag_wait(struct call_single_data *data)
+/*
+ * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
+ *
+ * For non-synchronous ipi calls the csd can still be in use by the
+ * previous function call. For multi-cpu calls its even more interesting
+ * as we'll have to ensure no other cpu is observing our csd.
+ */
+static void csd_lock_wait(struct call_single_data *data)
{
- /* Wait for response */
- do {
- if (!(data->flags & CSD_FLAG_WAIT))
- break;
+ while (data->flags & CSD_FLAG_LOCK)
cpu_relax();
- } while (1);
+}
+
+static void csd_lock(struct call_single_data *data)
+{
+ csd_lock_wait(data);
+ data->flags = CSD_FLAG_LOCK;
+
+ /*
+ * prevent CPU from reordering the above assignment
+ * to ->flags with any subsequent assignments to other
+ * fields of the specified call_single_data structure:
+ */
+ smp_mb();
+}
+
+static void csd_unlock(struct call_single_data *data)
+{
+ WARN_ON(!(data->flags & CSD_FLAG_LOCK));
+
+ /*
+ * ensure we're all done before releasing data:
+ */
+ smp_mb();
+
+ data->flags &= ~CSD_FLAG_LOCK;
}
/*
- * Insert a previously allocated call_single_data element for execution
- * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ * Insert a previously allocated call_single_data element
+ * for execution on the given CPU. data must already have
+ * ->func, ->info, and ->flags set.
*/
-static void generic_exec_single(int cpu, struct call_single_data *data)
+static
+void generic_exec_single(int cpu, struct call_single_data *data, int wait)
{
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
- int wait = data->flags & CSD_FLAG_WAIT, ipi;
unsigned long flags;
+ int ipi;
spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
@@ -74,24 +150,21 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
spin_unlock_irqrestore(&dst->lock, flags);
/*
- * Make the list addition visible before sending the ipi.
+ * The list addition should be visible before sending the IPI
+ * handler locks the list to pull the entry off it because of
+ * normal cache coherency rules implied by spinlocks.
+ *
+ * If IPIs can go out of order to the cache coherency protocol
+ * in an architecture, sufficient synchronisation should be added
+ * to arch code to make it appear to obey cache coherency WRT
+ * locking and barrier primitives. Generic code isn't really
+ * equipped to do the right thing...
*/
- smp_mb();
-
if (ipi)
arch_send_call_function_single_ipi(cpu);
if (wait)
- csd_flag_wait(data);
-}
-
-static void rcu_free_call_data(struct rcu_head *head)
-{
- struct call_function_data *data;
-
- data = container_of(head, struct call_function_data, rcu_head);
-
- kfree(data);
+ csd_lock_wait(data);
}
/*
@@ -104,99 +177,83 @@ void generic_smp_call_function_interrupt(void)
int cpu = get_cpu();
/*
- * It's ok to use list_for_each_rcu() here even though we may delete
- * 'pos', since list_del_rcu() doesn't clear ->next
+ * Ensure entry is visible on call_function_queue after we have
+ * entered the IPI. See comment in smp_call_function_many.
+ * If we don't have this, then we may miss an entry on the list
+ * and never get another IPI to process it.
+ */
+ smp_mb();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may
+ * delete 'pos', since list_del_rcu() doesn't clear ->next
*/
- rcu_read_lock();
- list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
+ list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
+ spin_lock(&data->lock);
+ if (!cpumask_test_cpu(cpu, data->cpumask)) {
+ spin_unlock(&data->lock);
continue;
+ }
+ cpumask_clear_cpu(cpu, data->cpumask);
+ spin_unlock(&data->lock);
data->csd.func(data->csd.info);
spin_lock(&data->lock);
- cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
WARN_ON(data->refs == 0);
- data->refs--;
- refs = data->refs;
+ refs = --data->refs;
+ if (!refs) {
+ spin_lock(&call_function.lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function.lock);
+ }
spin_unlock(&data->lock);
if (refs)
continue;
- spin_lock(&call_function_lock);
- list_del_rcu(&data->csd.list);
- spin_unlock(&call_function_lock);
-
- if (data->csd.flags & CSD_FLAG_WAIT) {
- /*
- * serialize stores to data with the flag clear
- * and wakeup
- */
- smp_wmb();
- data->csd.flags &= ~CSD_FLAG_WAIT;
- }
- if (data->csd.flags & CSD_FLAG_ALLOC)
- call_rcu(&data->rcu_head, rcu_free_call_data);
+ csd_unlock(&data->csd);
}
- rcu_read_unlock();
put_cpu();
}
/*
- * Invoked by arch to handle an IPI for call function single. Must be called
- * from the arch with interrupts disabled.
+ * Invoked by arch to handle an IPI for call function single. Must be
+ * called from the arch with interrupts disabled.
*/
void generic_smp_call_function_single_interrupt(void)
{
struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ unsigned int data_flags;
LIST_HEAD(list);
- /*
- * Need to see other stores to list head for checking whether
- * list is empty without holding q->lock
- */
- smp_read_barrier_depends();
- while (!list_empty(&q->list)) {
- unsigned int data_flags;
-
- spin_lock(&q->lock);
- list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
-
- while (!list_empty(&list)) {
- struct call_single_data *data;
-
- data = list_entry(list.next, struct call_single_data,
- list);
- list_del(&data->list);
-
- /*
- * 'data' can be invalid after this call if
- * flags == 0 (when called through
- * generic_exec_single(), so save them away before
- * making the call.
- */
- data_flags = data->flags;
-
- data->func(data->info);
-
- if (data_flags & CSD_FLAG_WAIT) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_WAIT;
- } else if (data_flags & CSD_FLAG_LOCK) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_LOCK;
- } else if (data_flags & CSD_FLAG_ALLOC)
- kfree(data);
- }
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data, list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if flags == 0
+ * (when called through generic_exec_single()),
+ * so save them away before making the call:
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
/*
- * See comment on outer loop
+ * Unlocked CSDs are valid through generic_exec_single():
*/
- smp_read_barrier_depends();
+ if (data_flags & CSD_FLAG_LOCK)
+ csd_unlock(data);
}
}
@@ -215,65 +272,45 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
- struct call_single_data d;
+ struct call_single_data d = {
+ .flags = 0,
+ };
unsigned long flags;
- /* prevent preemption and reschedule on another processor,
- as well as CPU removal */
- int me = get_cpu();
+ int this_cpu;
int err = 0;
+ /*
+ * prevent preemption and reschedule on another processor,
+ * as well as CPU removal
+ */
+ this_cpu = get_cpu();
+
/* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
- if (cpu == me) {
+ if (cpu == this_cpu) {
local_irq_save(flags);
func(info);
local_irq_restore(flags);
- } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
- struct call_single_data *data;
+ } else {
+ if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
+ struct call_single_data *data = &d;
+
+ if (!wait)
+ data = &__get_cpu_var(csd_data);
- if (!wait) {
- /*
- * We are calling a function on a single CPU
- * and we are not going to wait for it to finish.
- * We first try to allocate the data, but if we
- * fail, we fall back to use a per cpu data to pass
- * the information to that CPU. Since all callers
- * of this code will use the same data, we must
- * synchronize the callers to prevent a new caller
- * from corrupting the data before the callee
- * can access it.
- *
- * The CSD_FLAG_LOCK is used to let us know when
- * the IPI handler is done with the data.
- * The first caller will set it, and the callee
- * will clear it. The next caller must wait for
- * it to clear before we set it again. This
- * will make sure the callee is done with the
- * data before a new caller will use it.
- */
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data)
- data->flags = CSD_FLAG_ALLOC;
- else {
- data = &per_cpu(csd_data, me);
- while (data->flags & CSD_FLAG_LOCK)
- cpu_relax();
- data->flags = CSD_FLAG_LOCK;
- }
+ csd_lock(data);
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data, wait);
} else {
- data = &d;
- data->flags = CSD_FLAG_WAIT;
+ err = -ENXIO; /* CPU not online */
}
-
- data->func = func;
- data->info = info;
- generic_exec_single(cpu, data);
- } else {
- err = -ENXIO; /* CPU not online */
}
put_cpu();
+
return err;
}
EXPORT_SYMBOL(smp_call_function_single);
@@ -283,23 +320,26 @@ EXPORT_SYMBOL(smp_call_function_single);
* @cpu: The CPU to run on.
* @data: Pre-allocated and setup data structure
*
- * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
- * data structure. Useful for embedding @data inside other structures, for
- * instance.
- *
+ * Like smp_call_function_single(), but allow caller to pass in a
+ * pre-allocated data structure. Useful for embedding @data inside
+ * other structures, for instance.
*/
-void __smp_call_function_single(int cpu, struct call_single_data *data)
+void __smp_call_function_single(int cpu, struct call_single_data *data,
+ int wait)
{
+ csd_lock(data);
+
/* Can deadlock when called with interrupts disabled */
- WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
+ WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress);
- generic_exec_single(cpu, data);
+ generic_exec_single(cpu, data, wait);
}
-/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
+/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
+
#ifndef arch_send_call_function_ipi_mask
-#define arch_send_call_function_ipi_mask(maskp) \
- arch_send_call_function_ipi(*(maskp))
+# define arch_send_call_function_ipi_mask(maskp) \
+ arch_send_call_function_ipi(*(maskp))
#endif
/**
@@ -307,7 +347,8 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
*
* If @wait is true, then returns once @func has returned. Note that @wait
* will be implicitly turned on in case of allocation failures, since
@@ -318,27 +359,27 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
- void (*func)(void *), void *info,
- bool wait)
+ void (*func)(void *), void *info, bool wait)
{
struct call_function_data *data;
unsigned long flags;
- int cpu, next_cpu;
+ int cpu, next_cpu, this_cpu = smp_processor_id();
/* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
- /* So, what's a CPU they want? Ignoring this one. */
+ /* So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
+
/* No online cpus? We're done. */
if (cpu >= nr_cpu_ids)
return;
/* Do we have another CPU which isn't us? */
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
- if (next_cpu == smp_processor_id())
+ if (next_cpu == this_cpu)
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
/* Fastpath: do that cpu by itself. */
@@ -347,43 +388,40 @@ void smp_call_function_many(const struct cpumask *mask,
return;
}
- data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
- if (unlikely(!data)) {
- /* Slow path. */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- if (cpumask_test_cpu(cpu, mask))
- smp_call_function_single(cpu, func, info, wait);
- }
- return;
- }
+ data = &__get_cpu_var(cfd_data);
+ csd_lock(&data->csd);
- spin_lock_init(&data->lock);
- data->csd.flags = CSD_FLAG_ALLOC;
- if (wait)
- data->csd.flags |= CSD_FLAG_WAIT;
+ spin_lock_irqsave(&data->lock, flags);
data->csd.func = func;
data->csd.info = info;
- cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
- data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
+ cpumask_and(data->cpumask, mask, cpu_online_mask);
+ cpumask_clear_cpu(this_cpu, data->cpumask);
+ data->refs = cpumask_weight(data->cpumask);
- spin_lock_irqsave(&call_function_lock, flags);
- list_add_tail_rcu(&data->csd.list, &call_function_queue);
- spin_unlock_irqrestore(&call_function_lock, flags);
+ spin_lock(&call_function.lock);
+ /*
+ * Place entry at the _HEAD_ of the list, so that any cpu still
+ * observing the entry in generic_smp_call_function_interrupt()
+ * will not miss any other list entries:
+ */
+ list_add_rcu(&data->csd.list, &call_function.queue);
+ spin_unlock(&call_function.lock);
+
+ spin_unlock_irqrestore(&data->lock, flags);
/*
* Make the list addition visible before sending the ipi.
+ * (IPIs must obey or appear to obey normal Linux cache
+ * coherency rules -- see comment in generic_exec_single).
*/
smp_mb();
/* Send a message to all CPUs in the map */
- arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
+ arch_send_call_function_ipi_mask(data->cpumask);
- /* optionally wait for the CPUs to complete */
+ /* Optionally wait for the CPUs to complete */
if (wait)
- csd_flag_wait(&data->csd);
+ csd_lock_wait(&data->csd);
}
EXPORT_SYMBOL(smp_call_function_many);
@@ -391,7 +429,8 @@ EXPORT_SYMBOL(smp_call_function_many);
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
*
* Returns 0.
*
@@ -407,26 +446,27 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
preempt_disable();
smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
+
return 0;
}
EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
- spin_lock(&call_function_lock);
+ spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
- spin_unlock(&call_function_lock);
+ spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
- spin_lock_irq(&call_function_lock);
+ spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
- spin_unlock_irq(&call_function_lock);
+ spin_unlock_irq(&call_function.lock);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 65ff3e3961b..2fecefacdc5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -65,7 +65,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
-static inline void wakeup_softirqd(void)
+void wakeup_softirqd(void)
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __get_cpu_var(ksoftirqd);
@@ -518,7 +518,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
cp->flags = 0;
cp->priv = softirq;
- __smp_call_function_single(cpu, cp);
+ __smp_call_function_single(cpu, cp, 0);
return 0;
}
return 1;
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 29ab20749dd..7932653c4eb 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_read_lock_irqsave);
@@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_write_lock_irqsave);
@@ -299,16 +301,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-#else
- _raw_spin_lock_flags(lock, &flags);
-#endif
+ LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
+ _raw_spin_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 74541ca4953..912823e2a11 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock);
static int refcount;
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
-static const cpumask_t *active_cpus;
+static const struct cpumask *active_cpus;
static void *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
diff --git a/kernel/sys.c b/kernel/sys.c
index 37f458e6882..51dbb55604e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -34,6 +34,7 @@
#include <linux/seccomp.h>
#include <linux/cpu.h>
#include <linux/ptrace.h>
+#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -1013,10 +1014,8 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
if (err)
goto out;
- if (task_pgrp(p) != pgrp) {
+ if (task_pgrp(p) != pgrp)
change_pid(p, PIDTYPE_PGID, pgrp);
- set_task_pgrp(p, pid_nr(pgrp));
- }
err = 0;
out:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c5ef44ff850..72eb1a41dca 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -48,6 +48,7 @@
#include <linux/acpi.h>
#include <linux/reboot.h>
#include <linux/ftrace.h>
+#include <linux/slow-work.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -95,14 +96,12 @@ static int sixty = 60;
static int neg_one = -1;
#endif
-#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
-static int two = 2;
-#endif
-
static int zero;
-static int one = 1;
+static int __maybe_unused one = 1;
+static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
static int one_hundred = 100;
+static int one_thousand = 1000;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
@@ -900,6 +899,14 @@ static struct ctl_table kern_table[] = {
.proc_handler = &scan_unevictable_handler,
},
#endif
+#ifdef CONFIG_SLOW_WORK
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "slow-work",
+ .mode = 0555,
+ .child = slow_work_sysctls,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -1010,7 +1017,7 @@ static struct ctl_table vm_table[] = {
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
.mode = 0644,
- .proc_handler = &proc_dointvec_userhz_jiffies,
+ .proc_handler = &proc_dointvec,
},
{
.ctl_name = VM_NR_PDFLUSH_THREADS,
@@ -1021,6 +1028,28 @@ static struct ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_pdflush_threads_min",
+ .data = &nr_pdflush_threads_min,
+ .maxlen = sizeof nr_pdflush_threads_min,
+ .mode = 0644 /* read-write */,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &nr_pdflush_threads_max,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_pdflush_threads_max",
+ .data = &nr_pdflush_threads_max,
+ .maxlen = sizeof nr_pdflush_threads_max,
+ .mode = 0644 /* read-write */,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &nr_pdflush_threads_min,
+ .extra2 = &one_thousand,
+ },
+ {
.ctl_name = VM_SWAPPINESS,
.procname = "swappiness",
.data = &vm_swappiness,
@@ -1373,10 +1402,7 @@ static struct ctl_table fs_table[] = {
.data = &lease_break_time,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &zero,
- .extra2 = &two,
+ .proc_handler = &proc_dointvec,
},
#endif
#ifdef CONFIG_AIO
@@ -1417,7 +1443,10 @@ static struct ctl_table fs_table[] = {
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &two,
},
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
{
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index fafeb48f27c..b38423ca711 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
{ NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
{ NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
{ NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
+ { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
{}
};
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 905b0b50792..0b0a6366c9d 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ea2f48af83c..d13be216a79 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -68,6 +68,17 @@ void clockevents_set_mode(struct clock_event_device *dev,
if (dev->mode != mode) {
dev->set_mode(mode, dev);
dev->mode = mode;
+
+ /*
+ * A nsec2cyc multiplicator of 0 is invalid and we'd crash
+ * on it, so fix it up and emit a warning:
+ */
+ if (mode == CLOCK_EVT_MODE_ONESHOT) {
+ if (unlikely(!dev->mult)) {
+ dev->mult = 1;
+ WARN_ON(1);
+ }
+ }
}
}
@@ -168,15 +179,6 @@ void clockevents_register_device(struct clock_event_device *dev)
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
BUG_ON(!dev->cpumask);
- /*
- * A nsec2cyc multiplicator of 0 is invalid and we'd crash
- * on it, so fix it up and emit a warning:
- */
- if (unlikely(!dev->mult)) {
- dev->mult = 1;
- WARN_ON(1);
- }
-
spin_lock(&clockevents_lock);
list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ca89e1593f0..c46c931a7fe 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -31,6 +31,82 @@
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
#include <linux/tick.h>
+void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp)
+{
+ tc->cc = cc;
+ tc->cycle_last = cc->read(cc);
+ tc->nsec = start_tstamp;
+}
+EXPORT_SYMBOL(timecounter_init);
+
+/**
+ * timecounter_read_delta - get nanoseconds since last call of this function
+ * @tc: Pointer to time counter
+ *
+ * When the underlying cycle counter runs over, this will be handled
+ * correctly as long as it does not run over more than once between
+ * calls.
+ *
+ * The first call to this function for a new time counter initializes
+ * the time tracking and returns an undefined result.
+ */
+static u64 timecounter_read_delta(struct timecounter *tc)
+{
+ cycle_t cycle_now, cycle_delta;
+ u64 ns_offset;
+
+ /* read cycle counter: */
+ cycle_now = tc->cc->read(tc->cc);
+
+ /* calculate the delta since the last timecounter_read_delta(): */
+ cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
+
+ /* convert to nanoseconds: */
+ ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
+
+ /* update time stamp of timecounter_read_delta() call: */
+ tc->cycle_last = cycle_now;
+
+ return ns_offset;
+}
+
+u64 timecounter_read(struct timecounter *tc)
+{
+ u64 nsec;
+
+ /* increment time by nanoseconds since last call */
+ nsec = timecounter_read_delta(tc);
+ nsec += tc->nsec;
+ tc->nsec = nsec;
+
+ return nsec;
+}
+EXPORT_SYMBOL(timecounter_read);
+
+u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp)
+{
+ u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
+ u64 nsec;
+
+ /*
+ * Instead of always treating cycle_tstamp as more recent
+ * than tc->cycle_last, detect when it is too far in the
+ * future and treat it as old time stamp instead.
+ */
+ if (cycle_delta > tc->cc->mask / 2) {
+ cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
+ nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
+ } else {
+ nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
+ }
+
+ return nsec;
+}
+EXPORT_SYMBOL(timecounter_cyc2time);
+
/* XXX - Would like a better way for initializing curr_clocksource */
extern struct clocksource clocksource_jiffies;
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f5f793d9241..7fc64375ff4 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -1,71 +1,129 @@
/*
- * linux/kernel/time/ntp.c
- *
* NTP state machine interfaces and logic.
*
* This code was mainly moved from kernel/timer.c and kernel/time.c
* Please see those files for relevant copyright info and historical
* changelogs.
*/
-
-#include <linux/mm.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/jiffies.h>
-#include <linux/hrtimer.h>
#include <linux/capability.h>
-#include <linux/math64.h>
#include <linux/clocksource.h>
#include <linux/workqueue.h>
-#include <asm/timex.h>
+#include <linux/hrtimer.h>
+#include <linux/jiffies.h>
+#include <linux/math64.h>
+#include <linux/timex.h>
+#include <linux/time.h>
+#include <linux/mm.h>
/*
- * Timekeeping variables
+ * NTP timekeeping variables:
*/
-unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
-unsigned long tick_nsec; /* ACTHZ period (nsec) */
-u64 tick_length;
-static u64 tick_length_base;
-static struct hrtimer leap_timer;
+/* USER_HZ period (usecs): */
+unsigned long tick_usec = TICK_USEC;
-#define MAX_TICKADJ 500 /* microsecs */
-#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
- NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+/* ACTHZ period (nsecs): */
+unsigned long tick_nsec;
+
+u64 tick_length;
+static u64 tick_length_base;
+
+static struct hrtimer leap_timer;
+
+#define MAX_TICKADJ 500LL /* usecs */
+#define MAX_TICKADJ_SCALED \
+ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
/*
* phase-lock loop variables
*/
-/* TIME_ERROR prevents overwriting the CMOS clock */
-static int time_state = TIME_OK; /* clock synchronization status */
-int time_status = STA_UNSYNC; /* clock status bits */
-static long time_tai; /* TAI offset (s) */
-static s64 time_offset; /* time adjustment (ns) */
-static long time_constant = 2; /* pll time constant */
-long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
-long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-static s64 time_freq; /* frequency offset (scaled ns/s)*/
-static long time_reftime; /* time at last adjustment (s) */
-long time_adjust;
-static long ntp_tick_adj;
+/*
+ * clock synchronization status
+ *
+ * (TIME_ERROR prevents overwriting the CMOS clock)
+ */
+static int time_state = TIME_OK;
+
+/* clock status bits: */
+int time_status = STA_UNSYNC;
+
+/* TAI offset (secs): */
+static long time_tai;
+
+/* time adjustment (nsecs): */
+static s64 time_offset;
+
+/* pll time constant: */
+static long time_constant = 2;
+
+/* maximum error (usecs): */
+long time_maxerror = NTP_PHASE_LIMIT;
+
+/* estimated error (usecs): */
+long time_esterror = NTP_PHASE_LIMIT;
+
+/* frequency offset (scaled nsecs/secs): */
+static s64 time_freq;
+
+/* time at last adjustment (secs): */
+static long time_reftime;
+
+long time_adjust;
+
+/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
+static s64 ntp_tick_adj;
+
+/*
+ * NTP methods:
+ */
+
+/*
+ * Update (tick_length, tick_length_base, tick_nsec), based
+ * on (tick_usec, ntp_tick_adj, time_freq):
+ */
static void ntp_update_frequency(void)
{
- u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
- << NTP_SCALE_SHIFT;
- second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT;
- second_length += time_freq;
+ u64 second_length;
+ u64 new_base;
+
+ second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
+ << NTP_SCALE_SHIFT;
+
+ second_length += ntp_tick_adj;
+ second_length += time_freq;
- tick_length_base = second_length;
+ tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
+ new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
- tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
- tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ);
+ /*
+ * Don't wait for the next second_overflow, apply
+ * the change to the tick length immediately:
+ */
+ tick_length += new_base - tick_length_base;
+ tick_length_base = new_base;
+}
+
+static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
+{
+ time_status &= ~STA_MODE;
+
+ if (secs < MINSEC)
+ return 0;
+
+ if (!(time_status & STA_FLL) && (secs <= MAXSEC))
+ return 0;
+
+ time_status |= STA_MODE;
+
+ return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
}
static void ntp_update_offset(long offset)
{
- long mtemp;
s64 freq_adj;
+ s64 offset64;
+ long secs;
if (!(time_status & STA_PLL))
return;
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
* Select how the frequency is to be controlled
* and in which mode (PLL or FLL).
*/
- if (time_status & STA_FREQHOLD || time_reftime == 0)
- time_reftime = xtime.tv_sec;
- mtemp = xtime.tv_sec - time_reftime;
+ secs = xtime.tv_sec - time_reftime;
+ if (unlikely(time_status & STA_FREQHOLD))
+ secs = 0;
+
time_reftime = xtime.tv_sec;
- freq_adj = (s64)offset * mtemp;
- freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant);
- time_status &= ~STA_MODE;
- if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
- freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
- mtemp);
- time_status |= STA_MODE;
- }
- freq_adj += time_freq;
- freq_adj = min(freq_adj, MAXFREQ_SCALED);
- time_freq = max(freq_adj, -MAXFREQ_SCALED);
+ offset64 = offset;
+ freq_adj = (offset64 * secs) <<
+ (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
- time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
+ freq_adj += ntp_update_offset_fll(offset64, secs);
+
+ freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
+
+ time_freq = max(freq_adj, -MAXFREQ_SCALED);
+
+ time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
}
/**
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
*/
void ntp_clear(void)
{
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
ntp_update_frequency();
- tick_length = tick_length_base;
- time_offset = 0;
+ tick_length = tick_length_base;
+ time_offset = 0;
}
/*
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
xtime.tv_sec--;
wall_to_monotonic.tv_sec++;
time_state = TIME_OOP;
- printk(KERN_NOTICE "Clock: "
- "inserting leap second 23:59:60 UTC\n");
+ printk(KERN_NOTICE
+ "Clock: inserting leap second 23:59:60 UTC\n");
hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
res = HRTIMER_RESTART;
break;
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
time_tai--;
wall_to_monotonic.tv_sec--;
time_state = TIME_WAIT;
- printk(KERN_NOTICE "Clock: "
- "deleting leap second 23:59:59 UTC\n");
+ printk(KERN_NOTICE
+ "Clock: deleting leap second 23:59:59 UTC\n");
break;
case TIME_OOP:
time_tai++;
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
*/
void second_overflow(void)
{
- s64 time_adj;
+ s64 delta;
/* Bump the maxerror field */
time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -192,24 +249,30 @@ void second_overflow(void)
* Compute the phase adjustment for the next second. The offset is
* reduced by a fixed factor times the time constant.
*/
- tick_length = tick_length_base;
- time_adj = shift_right(time_offset, SHIFT_PLL + time_constant);
- time_offset -= time_adj;
- tick_length += time_adj;
-
- if (unlikely(time_adjust)) {
- if (time_adjust > MAX_TICKADJ) {
- time_adjust -= MAX_TICKADJ;
- tick_length += MAX_TICKADJ_SCALED;
- } else if (time_adjust < -MAX_TICKADJ) {
- time_adjust += MAX_TICKADJ;
- tick_length -= MAX_TICKADJ_SCALED;
- } else {
- tick_length += (s64)(time_adjust * NSEC_PER_USEC /
- NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
- time_adjust = 0;
- }
+ tick_length = tick_length_base;
+
+ delta = shift_right(time_offset, SHIFT_PLL + time_constant);
+ time_offset -= delta;
+ tick_length += delta;
+
+ if (!time_adjust)
+ return;
+
+ if (time_adjust > MAX_TICKADJ) {
+ time_adjust -= MAX_TICKADJ;
+ tick_length += MAX_TICKADJ_SCALED;
+ return;
}
+
+ if (time_adjust < -MAX_TICKADJ) {
+ time_adjust += MAX_TICKADJ;
+ tick_length -= MAX_TICKADJ_SCALED;
+ return;
+ }
+
+ tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
+ << NTP_SCALE_SHIFT;
+ time_adjust = 0;
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
* This code is run on a timer. If the clock is set, that timer
* may not expire at the correct time. Thus, we adjust...
*/
- if (!ntp_synced())
+ if (!ntp_synced()) {
/*
* Not synced, exit, do not restart a timer (if one is
* running, let it run out).
*/
return;
+ }
getnstimeofday(&now);
if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
static inline void notify_cmos_timer(void) { }
#endif
-/* adjtimex mainly allows reading (and writing, if superuser) of
+/*
+ * Start the leap seconds timer:
+ */
+static inline void ntp_start_leap_timer(struct timespec *ts)
+{
+ long now = ts->tv_sec;
+
+ if (time_status & STA_INS) {
+ time_state = TIME_INS;
+ now += 86400 - now % 86400;
+ hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+
+ return;
+ }
+
+ if (time_status & STA_DEL) {
+ time_state = TIME_DEL;
+ now += 86400 - (now + 1) % 86400;
+ hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+ }
+}
+
+/*
+ * Propagate a new txc->status value into the NTP state:
+ */
+static inline void process_adj_status(struct timex *txc, struct timespec *ts)
+{
+ if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
+ time_state = TIME_OK;
+ time_status = STA_UNSYNC;
+ }
+
+ /*
+ * If we turn on PLL adjustments then reset the
+ * reference time to current time.
+ */
+ if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
+ time_reftime = xtime.tv_sec;
+
+ /* only set allowed bits */
+ time_status &= STA_RONLY;
+ time_status |= txc->status & ~STA_RONLY;
+
+ switch (time_state) {
+ case TIME_OK:
+ ntp_start_leap_timer(ts);
+ break;
+ case TIME_INS:
+ case TIME_DEL:
+ time_state = TIME_OK;
+ ntp_start_leap_timer(ts);
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ break;
+ case TIME_OOP:
+ hrtimer_restart(&leap_timer);
+ break;
+ }
+}
+/*
+ * Called with the xtime lock held, so we can access and modify
+ * all the global NTP state:
+ */
+static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
+{
+ if (txc->modes & ADJ_STATUS)
+ process_adj_status(txc, ts);
+
+ if (txc->modes & ADJ_NANO)
+ time_status |= STA_NANO;
+
+ if (txc->modes & ADJ_MICRO)
+ time_status &= ~STA_NANO;
+
+ if (txc->modes & ADJ_FREQUENCY) {
+ time_freq = txc->freq * PPM_SCALE;
+ time_freq = min(time_freq, MAXFREQ_SCALED);
+ time_freq = max(time_freq, -MAXFREQ_SCALED);
+ }
+
+ if (txc->modes & ADJ_MAXERROR)
+ time_maxerror = txc->maxerror;
+
+ if (txc->modes & ADJ_ESTERROR)
+ time_esterror = txc->esterror;
+
+ if (txc->modes & ADJ_TIMECONST) {
+ time_constant = txc->constant;
+ if (!(time_status & STA_NANO))
+ time_constant += 4;
+ time_constant = min(time_constant, (long)MAXTC);
+ time_constant = max(time_constant, 0l);
+ }
+
+ if (txc->modes & ADJ_TAI && txc->constant > 0)
+ time_tai = txc->constant;
+
+ if (txc->modes & ADJ_OFFSET)
+ ntp_update_offset(txc->offset);
+
+ if (txc->modes & ADJ_TICK)
+ tick_usec = txc->tick;
+
+ if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
+ ntp_update_frequency();
+}
+
+/*
+ * adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
int do_adjtimex(struct timex *txc)
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
if (txc->modes && !capable(CAP_SYS_TIME))
return -EPERM;
- /* if the quartz is off by more than 10% something is VERY wrong! */
+ /*
+ * if the quartz is off by more than 10% then
+ * something is VERY wrong!
+ */
if (txc->modes & ADJ_TICK &&
(txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ))
- return -EINVAL;
+ return -EINVAL;
if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
hrtimer_cancel(&leap_timer);
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
write_seqlock_irq(&xtime_lock);
- /* If there are input parameters, then process them */
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
ntp_update_frequency();
}
txc->offset = save_adjust;
- goto adj_done;
- }
- if (txc->modes) {
- long sec;
-
- if (txc->modes & ADJ_STATUS) {
- if ((time_status & STA_PLL) &&
- !(txc->status & STA_PLL)) {
- time_state = TIME_OK;
- time_status = STA_UNSYNC;
- }
- /* only set allowed bits */
- time_status &= STA_RONLY;
- time_status |= txc->status & ~STA_RONLY;
-
- switch (time_state) {
- case TIME_OK:
- start_timer:
- sec = ts.tv_sec;
- if (time_status & STA_INS) {
- time_state = TIME_INS;
- sec += 86400 - sec % 86400;
- hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
- } else if (time_status & STA_DEL) {
- time_state = TIME_DEL;
- sec += 86400 - (sec + 1) % 86400;
- hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
- }
- break;
- case TIME_INS:
- case TIME_DEL:
- time_state = TIME_OK;
- goto start_timer;
- break;
- case TIME_WAIT:
- if (!(time_status & (STA_INS | STA_DEL)))
- time_state = TIME_OK;
- break;
- case TIME_OOP:
- hrtimer_restart(&leap_timer);
- break;
- }
- }
-
- if (txc->modes & ADJ_NANO)
- time_status |= STA_NANO;
- if (txc->modes & ADJ_MICRO)
- time_status &= ~STA_NANO;
-
- if (txc->modes & ADJ_FREQUENCY) {
- time_freq = (s64)txc->freq * PPM_SCALE;
- time_freq = min(time_freq, MAXFREQ_SCALED);
- time_freq = max(time_freq, -MAXFREQ_SCALED);
- }
-
- if (txc->modes & ADJ_MAXERROR)
- time_maxerror = txc->maxerror;
- if (txc->modes & ADJ_ESTERROR)
- time_esterror = txc->esterror;
-
- if (txc->modes & ADJ_TIMECONST) {
- time_constant = txc->constant;
- if (!(time_status & STA_NANO))
- time_constant += 4;
- time_constant = min(time_constant, (long)MAXTC);
- time_constant = max(time_constant, 0l);
- }
-
- if (txc->modes & ADJ_TAI && txc->constant > 0)
- time_tai = txc->constant;
-
- if (txc->modes & ADJ_OFFSET)
- ntp_update_offset(txc->offset);
- if (txc->modes & ADJ_TICK)
- tick_usec = txc->tick;
+ } else {
- if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
- ntp_update_frequency();
- }
+ /* If there are input parameters, then process them: */
+ if (txc->modes)
+ process_adjtimex_modes(txc, &ts);
- txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+ txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
NTP_SCALE_SHIFT);
- if (!(time_status & STA_NANO))
- txc->offset /= NSEC_PER_USEC;
+ if (!(time_status & STA_NANO))
+ txc->offset /= NSEC_PER_USEC;
+ }
-adj_done:
result = time_state; /* mostly `TIME_OK' */
if (time_status & (STA_UNSYNC|STA_CLOCKERR))
result = TIME_ERROR;
txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
- (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
+ PPM_SCALE_INV, NTP_SCALE_SHIFT);
txc->maxerror = time_maxerror;
txc->esterror = time_esterror;
txc->status = time_status;
@@ -425,6 +526,7 @@ adj_done:
txc->calcnt = 0;
txc->errcnt = 0;
txc->stbcnt = 0;
+
write_sequnlock_irq(&xtime_lock);
txc->time.tv_sec = ts.tv_sec;
@@ -440,6 +542,8 @@ adj_done:
static int __init ntp_tick_adj_setup(char *str)
{
ntp_tick_adj = simple_strtol(str, NULL, 0);
+ ntp_tick_adj <<= NTP_SCALE_SHIFT;
+
return 1;
}
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
new file mode 100644
index 00000000000..71e7f1a1915
--- /dev/null
+++ b/kernel/time/timecompare.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2009 Intel Corporation.
+ * Author: Patrick Ohly <patrick.ohly@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/timecompare.h>
+#include <linux/module.h>
+#include <linux/math64.h>
+
+/*
+ * fixed point arithmetic scale factor for skew
+ *
+ * Usually one would measure skew in ppb (parts per billion, 1e9), but
+ * using a factor of 2 simplifies the math.
+ */
+#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
+
+ktime_t timecompare_transform(struct timecompare *sync,
+ u64 source_tstamp)
+{
+ u64 nsec;
+
+ nsec = source_tstamp + sync->offset;
+ nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
+ TIMECOMPARE_SKEW_RESOLUTION;
+
+ return ns_to_ktime(nsec);
+}
+EXPORT_SYMBOL(timecompare_transform);
+
+int timecompare_offset(struct timecompare *sync,
+ s64 *offset,
+ u64 *source_tstamp)
+{
+ u64 start_source = 0, end_source = 0;
+ struct {
+ s64 offset;
+ s64 duration_target;
+ } buffer[10], sample, *samples;
+ int counter = 0, i;
+ int used;
+ int index;
+ int num_samples = sync->num_samples;
+
+ if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
+ samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
+ if (!samples) {
+ samples = buffer;
+ num_samples = sizeof(buffer)/sizeof(buffer[0]);
+ }
+ } else {
+ samples = buffer;
+ }
+
+ /* run until we have enough valid samples, but do not try forever */
+ i = 0;
+ counter = 0;
+ while (1) {
+ u64 ts;
+ ktime_t start, end;
+
+ start = sync->target();
+ ts = timecounter_read(sync->source);
+ end = sync->target();
+
+ if (!i)
+ start_source = ts;
+
+ /* ignore negative durations */
+ sample.duration_target = ktime_to_ns(ktime_sub(end, start));
+ if (sample.duration_target >= 0) {
+ /*
+ * assume symetric delay to and from source:
+ * average target time corresponds to measured
+ * source time
+ */
+ sample.offset =
+ ktime_to_ns(ktime_add(end, start)) / 2 -
+ ts;
+
+ /* simple insertion sort based on duration */
+ index = counter - 1;
+ while (index >= 0) {
+ if (samples[index].duration_target <
+ sample.duration_target)
+ break;
+ samples[index + 1] = samples[index];
+ index--;
+ }
+ samples[index + 1] = sample;
+ counter++;
+ }
+
+ i++;
+ if (counter >= num_samples || i >= 100000) {
+ end_source = ts;
+ break;
+ }
+ }
+
+ *source_tstamp = (end_source + start_source) / 2;
+
+ /* remove outliers by only using 75% of the samples */
+ used = counter * 3 / 4;
+ if (!used)
+ used = counter;
+ if (used) {
+ /* calculate average */
+ s64 off = 0;
+ for (index = 0; index < used; index++)
+ off += samples[index].offset;
+ *offset = div_s64(off, used);
+ }
+
+ if (samples && samples != buffer)
+ kfree(samples);
+
+ return used;
+}
+EXPORT_SYMBOL(timecompare_offset);
+
+void __timecompare_update(struct timecompare *sync,
+ u64 source_tstamp)
+{
+ s64 offset;
+ u64 average_time;
+
+ if (!timecompare_offset(sync, &offset, &average_time))
+ return;
+
+ if (!sync->last_update) {
+ sync->last_update = average_time;
+ sync->offset = offset;
+ sync->skew = 0;
+ } else {
+ s64 delta_nsec = average_time - sync->last_update;
+
+ /* avoid division by negative or small deltas */
+ if (delta_nsec >= 10000) {
+ s64 delta_offset_nsec = offset - sync->offset;
+ s64 skew; /* delta_offset_nsec *
+ TIMECOMPARE_SKEW_RESOLUTION /
+ delta_nsec */
+ u64 divisor;
+
+ /* div_s64() is limited to 32 bit divisor */
+ skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
+ divisor = delta_nsec;
+ while (unlikely(divisor >= ((s64)1) << 32)) {
+ /* divide both by 2; beware, right shift
+ of negative value has undefined
+ behavior and can only be used for
+ the positive divisor */
+ skew = div_s64(skew, 2);
+ divisor >>= 1;
+ }
+ skew = div_s64(skew, divisor);
+
+ /*
+ * Calculate new overall skew as 4/16 the
+ * old value and 12/16 the new one. This is
+ * a rather arbitrary tradeoff between
+ * only using the latest measurement (0/16 and
+ * 16/16) and even more weight on past measurements.
+ */
+#define TIMECOMPARE_NEW_SKEW_PER_16 12
+ sync->skew =
+ div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
+ sync->skew +
+ TIMECOMPARE_NEW_SKEW_PER_16 * skew,
+ 16);
+ sync->last_update = average_time;
+ sync->offset = offset;
+ }
+ }
+}
+EXPORT_SYMBOL(__timecompare_update);
diff --git a/kernel/timer.c b/kernel/timer.c
index ef1c385bc57..b4555568b4e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -600,11 +600,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
}
-int __mod_timer(struct timer_list *timer, unsigned long expires)
+static inline int
+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
{
struct tvec_base *base, *new_base;
unsigned long flags;
- int ret = 0;
+ int ret;
+
+ ret = 0;
timer_stats_timer_set_start_info(timer);
BUG_ON(!timer->function);
@@ -614,6 +617,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
if (timer_pending(timer)) {
detach_timer(timer, 0);
ret = 1;
+ } else {
+ if (pending_only)
+ goto out_unlock;
}
debug_timer_activate(timer);
@@ -640,42 +646,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
timer->expires = expires;
internal_add_timer(base, timer);
+
+out_unlock:
spin_unlock_irqrestore(&base->lock, flags);
return ret;
}
-EXPORT_SYMBOL(__mod_timer);
-
/**
- * add_timer_on - start a timer on a particular CPU
- * @timer: the timer to be added
- * @cpu: the CPU to start it on
+ * mod_timer_pending - modify a pending timer's timeout
+ * @timer: the pending timer to be modified
+ * @expires: new timeout in jiffies
*
- * This is not very scalable on SMP. Double adds are not possible.
+ * mod_timer_pending() is the same for pending timers as mod_timer(),
+ * but will not re-activate and modify already deleted timers.
+ *
+ * It is useful for unserialized use of timers.
*/
-void add_timer_on(struct timer_list *timer, int cpu)
+int mod_timer_pending(struct timer_list *timer, unsigned long expires)
{
- struct tvec_base *base = per_cpu(tvec_bases, cpu);
- unsigned long flags;
-
- timer_stats_timer_set_start_info(timer);
- BUG_ON(timer_pending(timer) || !timer->function);
- spin_lock_irqsave(&base->lock, flags);
- timer_set_base(timer, base);
- debug_timer_activate(timer);
- internal_add_timer(base, timer);
- /*
- * Check whether the other CPU is idle and needs to be
- * triggered to reevaluate the timer wheel when nohz is
- * active. We are protected against the other CPU fiddling
- * with the timer by holding the timer base lock. This also
- * makes sure that a CPU on the way to idle can not evaluate
- * the timer wheel.
- */
- wake_up_idle_cpu(cpu);
- spin_unlock_irqrestore(&base->lock, flags);
+ return __mod_timer(timer, expires, true);
}
+EXPORT_SYMBOL(mod_timer_pending);
/**
* mod_timer - modify a timer's timeout
@@ -699,9 +691,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
*/
int mod_timer(struct timer_list *timer, unsigned long expires)
{
- BUG_ON(!timer->function);
-
- timer_stats_timer_set_start_info(timer);
/*
* This is a common optimization triggered by the
* networking code - if the timer is re-modified
@@ -710,12 +699,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
if (timer->expires == expires && timer_pending(timer))
return 1;
- return __mod_timer(timer, expires);
+ return __mod_timer(timer, expires, false);
}
-
EXPORT_SYMBOL(mod_timer);
/**
+ * add_timer - start a timer
+ * @timer: the timer to be added
+ *
+ * The kernel will do a ->function(->data) callback from the
+ * timer interrupt at the ->expires point in the future. The
+ * current time is 'jiffies'.
+ *
+ * The timer's ->expires, ->function (and if the handler uses it, ->data)
+ * fields must be set prior calling this function.
+ *
+ * Timers with an ->expires field in the past will be executed in the next
+ * timer tick.
+ */
+void add_timer(struct timer_list *timer)
+{
+ BUG_ON(timer_pending(timer));
+ mod_timer(timer, timer->expires);
+}
+EXPORT_SYMBOL(add_timer);
+
+/**
+ * add_timer_on - start a timer on a particular CPU
+ * @timer: the timer to be added
+ * @cpu: the CPU to start it on
+ *
+ * This is not very scalable on SMP. Double adds are not possible.
+ */
+void add_timer_on(struct timer_list *timer, int cpu)
+{
+ struct tvec_base *base = per_cpu(tvec_bases, cpu);
+ unsigned long flags;
+
+ timer_stats_timer_set_start_info(timer);
+ BUG_ON(timer_pending(timer) || !timer->function);
+ spin_lock_irqsave(&base->lock, flags);
+ timer_set_base(timer, base);
+ debug_timer_activate(timer);
+ internal_add_timer(base, timer);
+ /*
+ * Check whether the other CPU is idle and needs to be
+ * triggered to reevaluate the timer wheel when nohz is
+ * active. We are protected against the other CPU fiddling
+ * with the timer by holding the timer base lock. This also
+ * makes sure that a CPU on the way to idle can not evaluate
+ * the timer wheel.
+ */
+ wake_up_idle_cpu(cpu);
+ spin_unlock_irqrestore(&base->lock, flags);
+}
+
+/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
*
@@ -744,7 +783,6 @@ int del_timer(struct timer_list *timer)
return ret;
}
-
EXPORT_SYMBOL(del_timer);
#ifdef CONFIG_SMP
@@ -778,7 +816,6 @@ out:
return ret;
}
-
EXPORT_SYMBOL(try_to_del_timer_sync);
/**
@@ -816,7 +853,6 @@ int del_timer_sync(struct timer_list *timer)
cpu_relax();
}
}
-
EXPORT_SYMBOL(del_timer_sync);
#endif
@@ -1314,7 +1350,7 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
- __mod_timer(&timer, expire);
+ __mod_timer(&timer, expire, false);
schedule();
del_singleshot_timer_sync(&timer);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8a4d7293104..2246141bda4 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -99,11 +99,10 @@ config FUNCTION_GRAPH_TRACER
help
Enable the kernel to trace a function at both its return
and its entry.
- It's first purpose is to trace the duration of functions and
- draw a call graph for each thread with some informations like
- the return value.
- This is done by setting the current return address on the current
- task structure into a stack of calls.
+ Its first purpose is to trace the duration of functions and
+ draw a call graph for each thread with some information like
+ the return value. This is done by setting the current return
+ address on the current task structure into a stack of calls.
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1752a63f37c..f1ed080406c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2719,6 +2719,9 @@ void unregister_ftrace_graph(void)
{
mutex_lock(&ftrace_lock);
+ if (!unlikely(atomic_read(&ftrace_graph_active)))
+ goto out;
+
atomic_dec(&ftrace_graph_active);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
@@ -2726,6 +2729,7 @@ void unregister_ftrace_graph(void)
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
+ out:
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index ae201b3eda8..5011f4d91e3 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -6,14 +6,16 @@
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
*/
-#include <linux/dcache.h>
+#include <linux/tracepoint.h>
+#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/seq_file.h>
+
#include <trace/kmemtrace.h>
-#include "trace.h"
#include "trace_output.h"
+#include "trace.h"
/* Select an alternative, minimalistic output than the original one */
#define TRACE_KMEM_OPT_MINIMAL 0x1
@@ -25,14 +27,156 @@ static struct tracer_opt kmem_opts[] = {
};
static struct tracer_flags kmem_tracer_flags = {
- .val = 0,
- .opts = kmem_opts
+ .val = 0,
+ .opts = kmem_opts
};
-
-static bool kmem_tracing_enabled __read_mostly;
static struct trace_array *kmemtrace_array;
+/* Trace allocations */
+static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+ struct trace_array *tr = kmemtrace_array;
+ struct kmemtrace_alloc_entry *entry;
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ if (!event)
+ return;
+
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, 0, 0);
+
+ entry->ent.type = TRACE_KMEM_ALLOC;
+ entry->type_id = type_id;
+ entry->call_site = call_site;
+ entry->ptr = ptr;
+ entry->bytes_req = bytes_req;
+ entry->bytes_alloc = bytes_alloc;
+ entry->gfp_flags = gfp_flags;
+ entry->node = node;
+
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ trace_wake_up();
+}
+
+static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr)
+{
+ struct trace_array *tr = kmemtrace_array;
+ struct kmemtrace_free_entry *entry;
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, 0, 0);
+
+ entry->ent.type = TRACE_KMEM_FREE;
+ entry->type_id = type_id;
+ entry->call_site = call_site;
+ entry->ptr = ptr;
+
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ trace_wake_up();
+}
+
+static void kmemtrace_kmalloc(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+static void kmemtrace_kmalloc_node(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, node);
+}
+
+static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, node);
+}
+
+static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
+{
+ kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
+}
+
+static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
+{
+ kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
+}
+
+static int kmemtrace_start_probes(void)
+{
+ int err;
+
+ err = register_trace_kmalloc(kmemtrace_kmalloc);
+ if (err)
+ return err;
+ err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
+ if (err)
+ return err;
+ err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
+ if (err)
+ return err;
+ err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
+ if (err)
+ return err;
+ err = register_trace_kfree(kmemtrace_kfree);
+ if (err)
+ return err;
+ err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
+
+ return err;
+}
+
+static void kmemtrace_stop_probes(void)
+{
+ unregister_trace_kmalloc(kmemtrace_kmalloc);
+ unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
+ unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
+ unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
+ unregister_trace_kfree(kmemtrace_kfree);
+ unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
+}
+
static int kmem_trace_init(struct trace_array *tr)
{
int cpu;
@@ -41,14 +185,14 @@ static int kmem_trace_init(struct trace_array *tr)
for_each_cpu_mask(cpu, cpu_possible_map)
tracing_reset(tr, cpu);
- kmem_tracing_enabled = true;
+ kmemtrace_start_probes();
return 0;
}
static void kmem_trace_reset(struct trace_array *tr)
{
- kmem_tracing_enabled = false;
+ kmemtrace_stop_probes();
}
static void kmemtrace_headers(struct seq_file *s)
@@ -66,47 +210,84 @@ static void kmemtrace_headers(struct seq_file *s)
}
/*
- * The two following functions give the original output from kmemtrace,
- * or something close to....perhaps they need some missing things
+ * The following functions give the original output from kmemtrace,
+ * plus the origin CPU, since reordering occurs in-kernel now.
*/
+
+#define KMEMTRACE_USER_ALLOC 0
+#define KMEMTRACE_USER_FREE 1
+
+struct kmemtrace_user_event {
+ u8 event_id;
+ u8 type_id;
+ u16 event_size;
+ u32 cpu;
+ u64 timestamp;
+ unsigned long call_site;
+ unsigned long ptr;
+};
+
+struct kmemtrace_user_event_alloc {
+ size_t bytes_req;
+ size_t bytes_alloc;
+ unsigned gfp_flags;
+ int node;
+};
+
static enum print_line_t
-kmemtrace_print_alloc_original(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc_user(struct trace_iterator *iter,
+ struct kmemtrace_alloc_entry *entry)
{
+ struct kmemtrace_user_event_alloc *ev_alloc;
struct trace_seq *s = &iter->seq;
- int ret;
+ struct kmemtrace_user_event *ev;
+
+ ev = trace_seq_reserve(s, sizeof(*ev));
+ if (!ev)
+ return TRACE_TYPE_PARTIAL_LINE;
- /* Taken from the old linux/kmemtrace.h */
- ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
- "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
- entry->type_id, entry->call_site, (unsigned long) entry->ptr,
- (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
- (unsigned long) entry->gfp_flags, entry->node);
+ ev->event_id = KMEMTRACE_USER_ALLOC;
+ ev->type_id = entry->type_id;
+ ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
+ ev->cpu = iter->cpu;
+ ev->timestamp = iter->ts;
+ ev->call_site = entry->call_site;
+ ev->ptr = (unsigned long)entry->ptr;
- if (!ret)
+ ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
+ if (!ev_alloc)
return TRACE_TYPE_PARTIAL_LINE;
+ ev_alloc->bytes_req = entry->bytes_req;
+ ev_alloc->bytes_alloc = entry->bytes_alloc;
+ ev_alloc->gfp_flags = entry->gfp_flags;
+ ev_alloc->node = entry->node;
+
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
-kmemtrace_print_free_original(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_user(struct trace_iterator *iter,
+ struct kmemtrace_free_entry *entry)
{
struct trace_seq *s = &iter->seq;
- int ret;
+ struct kmemtrace_user_event *ev;
- /* Taken from the old linux/kmemtrace.h */
- ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
- entry->type_id, entry->call_site, (unsigned long) entry->ptr);
-
- if (!ret)
+ ev = trace_seq_reserve(s, sizeof(*ev));
+ if (!ev)
return TRACE_TYPE_PARTIAL_LINE;
+ ev->event_id = KMEMTRACE_USER_FREE;
+ ev->type_id = entry->type_id;
+ ev->event_size = sizeof(*ev);
+ ev->cpu = iter->cpu;
+ ev->timestamp = iter->ts;
+ ev->call_site = entry->call_site;
+ ev->ptr = (unsigned long)entry->ptr;
+
return TRACE_TYPE_HANDLED;
}
-
/* The two other following provide a more minimalistic output */
static enum print_line_t
kmemtrace_print_alloc_compress(struct trace_iterator *iter,
@@ -178,7 +359,7 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter,
static enum print_line_t
kmemtrace_print_free_compress(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+ struct kmemtrace_free_entry *entry)
{
struct trace_seq *s = &iter->seq;
int ret;
@@ -239,20 +420,22 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
switch (entry->type) {
case TRACE_KMEM_ALLOC: {
struct kmemtrace_alloc_entry *field;
+
trace_assign_type(field, entry);
if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
return kmemtrace_print_alloc_compress(iter, field);
else
- return kmemtrace_print_alloc_original(iter, field);
+ return kmemtrace_print_alloc_user(iter, field);
}
case TRACE_KMEM_FREE: {
struct kmemtrace_free_entry *field;
+
trace_assign_type(field, entry);
if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
return kmemtrace_print_free_compress(iter, field);
else
- return kmemtrace_print_free_original(iter, field);
+ return kmemtrace_print_free_user(iter, field);
}
default:
@@ -260,70 +443,13 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
}
}
-/* Trace allocations */
-void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
- unsigned long call_site,
- const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags,
- int node)
-{
- struct ring_buffer_event *event;
- struct kmemtrace_alloc_entry *entry;
- struct trace_array *tr = kmemtrace_array;
-
- if (!kmem_tracing_enabled)
- return;
-
- event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
- sizeof(*entry), 0, 0);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
-
- entry->call_site = call_site;
- entry->ptr = ptr;
- entry->bytes_req = bytes_req;
- entry->bytes_alloc = bytes_alloc;
- entry->gfp_flags = gfp_flags;
- entry->node = node;
-
- trace_buffer_unlock_commit(tr, event, 0, 0);
-}
-EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
-
-void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
- unsigned long call_site,
- const void *ptr)
-{
- struct ring_buffer_event *event;
- struct kmemtrace_free_entry *entry;
- struct trace_array *tr = kmemtrace_array;
-
- if (!kmem_tracing_enabled)
- return;
-
- event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
- sizeof(*entry), 0, 0);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
- entry->type_id = type_id;
- entry->call_site = call_site;
- entry->ptr = ptr;
-
- trace_buffer_unlock_commit(tr, event, 0, 0);
-}
-EXPORT_SYMBOL(kmemtrace_mark_free);
-
static struct tracer kmem_tracer __read_mostly = {
- .name = "kmemtrace",
- .init = kmem_trace_init,
- .reset = kmem_trace_reset,
- .print_line = kmemtrace_print_line,
- .print_header = kmemtrace_headers,
- .flags = &kmem_tracer_flags
+ .name = "kmemtrace",
+ .init = kmem_trace_init,
+ .reset = kmem_trace_reset,
+ .print_line = kmemtrace_print_line,
+ .print_header = kmemtrace_headers,
+ .flags = &kmem_tracer_flags
};
void kmemtrace_init(void)
@@ -335,5 +461,4 @@ static int __init init_kmem_tracer(void)
{
return register_tracer(&kmem_tracer);
}
-
device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index edce2ff3894..960cbf44c84 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -563,7 +563,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = &cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
- list_del_init(&cpu_buffer->reader_page->list);
free_buffer_page(cpu_buffer->reader_page);
list_for_each_entry_safe(bpage, tmp, head, list) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 0d81a4a2a4a..e685ac2b2ba 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -182,6 +182,12 @@ struct trace_power {
struct power_trace state_data;
};
+enum kmemtrace_type_id {
+ KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
+ KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
+ KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
+};
+
struct kmemtrace_alloc_entry {
struct trace_entry ent;
enum kmemtrace_type_id type_id;
diff --git a/kernel/user.c b/kernel/user.c
index fbb300e6191..850e0ba41c1 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -20,7 +20,7 @@
struct user_namespace init_user_ns = {
.kref = {
- .refcount = ATOMIC_INIT(1),
+ .refcount = ATOMIC_INIT(2),
},
.creator = &root_user,
};
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 3b34b354593..92359cc747a 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -37,7 +37,7 @@ static void put_uts(ctl_table *table, int write, void *which)
up_write(&uts_sem);
}
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_PROC_SYSCTL
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e53ee18ef43..b6b966ce145 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -49,8 +49,6 @@ struct cpu_workqueue_struct {
struct workqueue_struct *wq;
struct task_struct *thread;
-
- int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
/*
@@ -269,13 +267,6 @@ DEFINE_TRACE(workqueue_execution);
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
spin_lock_irq(&cwq->lock);
- cwq->run_depth++;
- if (cwq->run_depth > 3) {
- /* morton gets to eat his hat */
- printk("%s: recursion depth exceeded: %d\n",
- __func__, cwq->run_depth);
- dump_stack();
- }
while (!list_empty(&cwq->worklist)) {
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
@@ -318,7 +309,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
spin_lock_irq(&cwq->lock);
cwq->current_work = NULL;
}
- cwq->run_depth--;
spin_unlock_irq(&cwq->lock);
}
@@ -375,29 +365,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
- int active;
+ int active = 0;
+ struct wq_barrier barr;
- if (cwq->thread == current) {
- /*
- * Probably keventd trying to flush its own queue. So simply run
- * it by hand rather than deadlocking.
- */
- run_workqueue(cwq);
- active = 1;
- } else {
- struct wq_barrier barr;
+ WARN_ON(cwq->thread == current);
- active = 0;
- spin_lock_irq(&cwq->lock);
- if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
- insert_wq_barrier(cwq, &barr, &cwq->worklist);
- active = 1;
- }
- spin_unlock_irq(&cwq->lock);
-
- if (active)
- wait_for_completion(&barr.done);
+ spin_lock_irq(&cwq->lock);
+ if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
+ insert_wq_barrier(cwq, &barr, &cwq->worklist);
+ active = 1;
}
+ spin_unlock_irq(&cwq->lock);
+
+ if (active)
+ wait_for_completion(&barr.done);
return active;
}
@@ -423,7 +404,7 @@ void flush_workqueue(struct workqueue_struct *wq)
might_sleep();
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -554,7 +535,7 @@ static void wait_on_work(struct work_struct *work)
wq = cwq->wq;
cpu_map = wq_cpu_map(wq);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -925,7 +906,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del(&wq->list);
spin_unlock(&workqueue_lock);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
cpu_maps_update_done();