diff options
Diffstat (limited to 'kernel/sched/debug.c')
| -rw-r--r-- | kernel/sched/debug.c | 82 | 
1 files changed, 73 insertions, 9 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e076bddd4c6..627b3c34b82 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -15,6 +15,7 @@  #include <linux/seq_file.h>  #include <linux/kallsyms.h>  #include <linux/utsname.h> +#include <linux/mempolicy.h>  #include "sched.h" @@ -110,8 +111,7 @@ static char *task_group_path(struct task_group *tg)  	if (autogroup_path(tg, group_path, PATH_MAX))  		return group_path; -	cgroup_path(tg->css.cgroup, group_path, PATH_MAX); -	return group_path; +	return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);  }  #endif @@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)  		SEQ_printf(m, " ");  	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", -		p->comm, p->pid, +		p->comm, task_pid_nr(p),  		SPLIT_NS(p->se.vruntime),  		(long long)(p->nvcsw + p->nivcsw),  		p->prio); @@ -137,6 +137,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)  	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",  		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);  #endif +#ifdef CONFIG_NUMA_BALANCING +	SEQ_printf(m, " %d", task_node(p)); +#endif  #ifdef CONFIG_CGROUP_SCHED  	SEQ_printf(m, " %s", task_group_path(task_group(p)));  #endif @@ -159,7 +162,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)  	read_lock_irqsave(&tasklist_lock, flags);  	do_each_thread(g, p) { -		if (!p->on_rq || task_cpu(p) != rq_cpu) +		if (task_cpu(p) != rq_cpu)  			continue;  		print_task(m, rq, p); @@ -225,6 +228,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)  			atomic_read(&cfs_rq->tg->runnable_avg));  #endif  #endif +#ifdef CONFIG_CFS_BANDWIDTH +	SEQ_printf(m, "  .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", +			cfs_rq->tg->cfs_bandwidth.timer_active); +	SEQ_printf(m, "  .%-30s: %d\n", "throttled", +			cfs_rq->throttled); +	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count", +			cfs_rq->throttle_count); +#endif  #ifdef CONFIG_FAIR_GROUP_SCHED  	print_cfs_group_stats(m, cpu, cfs_rq->tg); @@ -289,7 +300,7 @@ do {									\  	P(nr_load_updates);  	P(nr_uninterruptible);  	PN(next_balance); -	P(curr->pid); +	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));  	PN(clock);  	P(cpu_load[0]);  	P(cpu_load[1]); @@ -309,6 +320,7 @@ do {									\  	P(sched_goidle);  #ifdef CONFIG_SMP  	P64(avg_idle); +	P64(max_idle_balance_cost);  #endif  	P(ttwu_count); @@ -345,7 +357,7 @@ static void sched_debug_header(struct seq_file *m)  	cpu_clk = local_clock();  	local_irq_restore(flags); -	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", +	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",  		init_utsname()->release,  		(int)strcspn(init_utsname()->version, " "),  		init_utsname()->version); @@ -359,7 +371,7 @@ static void sched_debug_header(struct seq_file *m)  	PN(cpu_clk);  	P(jiffies);  #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -	P(sched_clock_stable); +	P(sched_clock_stable());  #endif  #undef PN  #undef P @@ -488,11 +500,61 @@ static int __init init_sched_debug_procfs(void)  __initcall(init_sched_debug_procfs); +#define __P(F) \ +	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) +#define P(F) \ +	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) +#define __PN(F) \ +	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) +#define PN(F) \ +	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) + + +static void sched_show_numa(struct task_struct *p, struct seq_file *m) +{ +#ifdef CONFIG_NUMA_BALANCING +	struct mempolicy *pol; +	int node, i; + +	if (p->mm) +		P(mm->numa_scan_seq); + +	task_lock(p); +	pol = p->mempolicy; +	if (pol && !(pol->flags & MPOL_F_MORON)) +		pol = NULL; +	mpol_get(pol); +	task_unlock(p); + +	SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0)); + +	for_each_online_node(node) { +		for (i = 0; i < 2; i++) { +			unsigned long nr_faults = -1; +			int cpu_current, home_node; + +			if (p->numa_faults_memory) +				nr_faults = p->numa_faults_memory[2*node + i]; + +			cpu_current = !i ? (task_node(p) == node) : +				(pol && node_isset(node, pol->v.nodes)); + +			home_node = (p->numa_preferred_nid == node); + +			SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n", +				i, node, cpu_current, home_node, nr_faults); +		} +	} + +	mpol_put(pol); +#endif +} +  void proc_sched_show_task(struct task_struct *p, struct seq_file *m)  {  	unsigned long nr_switches; -	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, +	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),  						get_nr_threads(p));  	SEQ_printf(m,  		"---------------------------------------------------------" @@ -546,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)  		avg_atom = p->se.sum_exec_runtime;  		if (nr_switches) -			do_div(avg_atom, nr_switches); +			avg_atom = div64_ul(avg_atom, nr_switches);  		else  			avg_atom = -1LL; @@ -591,6 +653,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)  		SEQ_printf(m, "%-45s:%21Ld\n",  			   "clock-delta", (long long)(t1-t0));  	} + +	sched_show_numa(p, m);  }  void proc_sched_set_task(struct task_struct *p)  | 
