diff options
Diffstat (limited to 'kernel/sched/stats.h')
| -rw-r--r-- | kernel/sched/stats.h | 47 | 
1 files changed, 40 insertions, 7 deletions
| diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 2ef90a51ec5..5aef494fc8b 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -61,7 +61,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)   */  static inline void sched_info_dequeued(struct task_struct *t)  { -	unsigned long long now = task_rq(t)->clock, delta = 0; +	unsigned long long now = rq_clock(task_rq(t)), delta = 0;  	if (unlikely(sched_info_on()))  		if (t->sched_info.last_queued) @@ -79,7 +79,7 @@ static inline void sched_info_dequeued(struct task_struct *t)   */  static void sched_info_arrive(struct task_struct *t)  { -	unsigned long long now = task_rq(t)->clock, delta = 0; +	unsigned long long now = rq_clock(task_rq(t)), delta = 0;  	if (t->sched_info.last_queued)  		delta = now - t->sched_info.last_queued; @@ -100,7 +100,7 @@ static inline void sched_info_queued(struct task_struct *t)  {  	if (unlikely(sched_info_on()))  		if (!t->sched_info.last_queued) -			t->sched_info.last_queued = task_rq(t)->clock; +			t->sched_info.last_queued = rq_clock(task_rq(t));  }  /* @@ -112,7 +112,7 @@ static inline void sched_info_queued(struct task_struct *t)   */  static inline void sched_info_depart(struct task_struct *t)  { -	unsigned long long delta = task_rq(t)->clock - +	unsigned long long delta = rq_clock(task_rq(t)) -  					t->sched_info.last_arrival;  	rq_sched_info_depart(task_rq(t), delta); @@ -162,6 +162,39 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)   */  /** + * cputimer_running - return true if cputimer is running + * + * @tsk:	Pointer to target task. + */ +static inline bool cputimer_running(struct task_struct *tsk) + +{ +	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + +	if (!cputimer->running) +		return false; + +	/* +	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime +	 * in __exit_signal(), we won't account to the signal struct further +	 * cputime consumed by that task, even though the task can still be +	 * ticking after __exit_signal(). +	 * +	 * In order to keep a consistent behaviour between thread group cputime +	 * and thread group cputimer accounting, lets also ignore the cputime +	 * elapsing after __exit_signal() in any thread group timer running. +	 * +	 * This makes sure that POSIX CPU clocks and timers are synchronized, so +	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU +	 * clock delta is behind the expiring timer value. +	 */ +	if (unlikely(!tsk->sighand)) +		return false; + +	return true; +} + +/**   * account_group_user_time - Maintain utime for a thread group.   *   * @tsk:	Pointer to task structure. @@ -176,7 +209,7 @@ static inline void account_group_user_time(struct task_struct *tsk,  {  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; -	if (!cputimer->running) +	if (!cputimer_running(tsk))  		return;  	raw_spin_lock(&cputimer->lock); @@ -199,7 +232,7 @@ static inline void account_group_system_time(struct task_struct *tsk,  {  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; -	if (!cputimer->running) +	if (!cputimer_running(tsk))  		return;  	raw_spin_lock(&cputimer->lock); @@ -222,7 +255,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,  {  	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; -	if (!cputimer->running) +	if (!cputimer_running(tsk))  		return;  	raw_spin_lock(&cputimer->lock); | 
