diff options
Diffstat (limited to 'kernel/rcutree.c')
| -rw-r--r-- | kernel/rcutree.c | 176 | 
1 files changed, 48 insertions, 128 deletions
| diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 35380019f0f..068de3a9360 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -218,8 +218,8 @@ module_param(blimit, long, 0444);  module_param(qhimark, long, 0444);  module_param(qlowmark, long, 0444); -static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; -static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; +static ulong jiffies_till_first_fqs = ULONG_MAX; +static ulong jiffies_till_next_fqs = ULONG_MAX;  module_param(jiffies_till_first_fqs, ulong, 0644);  module_param(jiffies_till_next_fqs, ulong, 0644); @@ -866,7 +866,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)  	 * See Documentation/RCU/stallwarn.txt for info on how to debug  	 * RCU CPU stall warnings.  	 */ -	printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks:", +	pr_err("INFO: %s detected stalls on CPUs/tasks:",  	       rsp->name);  	print_cpu_stall_info_begin();  	rcu_for_each_leaf_node(rsp, rnp) { @@ -899,7 +899,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)  	       smp_processor_id(), (long)(jiffies - rsp->gp_start),  	       rsp->gpnum, rsp->completed, totqlen);  	if (ndetected == 0) -		printk(KERN_ERR "INFO: Stall ended before state dump start\n"); +		pr_err("INFO: Stall ended before state dump start\n");  	else if (!trigger_all_cpu_backtrace())  		rcu_dump_cpu_stacks(rsp); @@ -922,7 +922,7 @@ static void print_cpu_stall(struct rcu_state *rsp)  	 * See Documentation/RCU/stallwarn.txt for info on how to debug  	 * RCU CPU stall warnings.  	 */ -	printk(KERN_ERR "INFO: %s self-detected stall on CPU", rsp->name); +	pr_err("INFO: %s self-detected stall on CPU", rsp->name);  	print_cpu_stall_info_begin();  	print_cpu_stall_info(rsp, smp_processor_id());  	print_cpu_stall_info_end(); @@ -985,65 +985,6 @@ void rcu_cpu_stall_reset(void)  }  /* - * Update CPU-local rcu_data state to record the newly noticed grace period. - * This is used both when we started the grace period and when we notice - * that someone else started the grace period.  The caller must hold the - * ->lock of the leaf rcu_node structure corresponding to the current CPU, - *  and must have irqs disabled. - */ -static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) -{ -	if (rdp->gpnum != rnp->gpnum) { -		/* -		 * If the current grace period is waiting for this CPU, -		 * set up to detect a quiescent state, otherwise don't -		 * go looking for one. -		 */ -		rdp->gpnum = rnp->gpnum; -		trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); -		rdp->passed_quiesce = 0; -		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); -		zero_cpu_stall_ticks(rdp); -	} -} - -static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) -{ -	unsigned long flags; -	struct rcu_node *rnp; - -	local_irq_save(flags); -	rnp = rdp->mynode; -	if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ -	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ -		local_irq_restore(flags); -		return; -	} -	__note_new_gpnum(rsp, rnp, rdp); -	raw_spin_unlock_irqrestore(&rnp->lock, flags); -} - -/* - * Did someone else start a new RCU grace period start since we last - * checked?  Update local state appropriately if so.  Must be called - * on the CPU corresponding to rdp. - */ -static int -check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) -{ -	unsigned long flags; -	int ret = 0; - -	local_irq_save(flags); -	if (rdp->gpnum != rsp->gpnum) { -		note_new_gpnum(rsp, rdp); -		ret = 1; -	} -	local_irq_restore(flags); -	return ret; -} - -/*   * Initialize the specified rcu_data structure's callback list to empty.   */  static void init_callback_list(struct rcu_data *rdp) @@ -1313,18 +1254,16 @@ static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,  }  /* - * Advance this CPU's callbacks, but only if the current grace period - * has ended.  This may be called only from the CPU to whom the rdp - * belongs.  In addition, the corresponding leaf rcu_node structure's - * ->lock must be held by the caller, with irqs disabled. + * Update CPU-local rcu_data state to record the beginnings and ends of + * grace periods.  The caller must hold the ->lock of the leaf rcu_node + * structure corresponding to the current CPU, and must have irqs disabled.   */ -static void -__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) +static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)  { -	/* Did another grace period end? */ +	/* Handle the ends of any preceding grace periods first. */  	if (rdp->completed == rnp->completed) { -		/* No, so just accelerate recent callbacks. */ +		/* No grace period end, so just accelerate recent callbacks. */  		rcu_accelerate_cbs(rsp, rnp, rdp);  	} else { @@ -1335,68 +1274,40 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat  		/* Remember that we saw this grace-period completion. */  		rdp->completed = rnp->completed;  		trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend"); +	} +	if (rdp->gpnum != rnp->gpnum) {  		/* -		 * If we were in an extended quiescent state, we may have -		 * missed some grace periods that others CPUs handled on -		 * our behalf. Catch up with this state to avoid noting -		 * spurious new grace periods.  If another grace period -		 * has started, then rnp->gpnum will have advanced, so -		 * we will detect this later on.  Of course, any quiescent -		 * states we found for the old GP are now invalid. -		 */ -		if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) { -			rdp->gpnum = rdp->completed; -			rdp->passed_quiesce = 0; -		} - -		/* -		 * If RCU does not need a quiescent state from this CPU, -		 * then make sure that this CPU doesn't go looking for one. +		 * If the current grace period is waiting for this CPU, +		 * set up to detect a quiescent state, otherwise don't +		 * go looking for one.  		 */ -		if ((rnp->qsmask & rdp->grpmask) == 0) -			rdp->qs_pending = 0; +		rdp->gpnum = rnp->gpnum; +		trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); +		rdp->passed_quiesce = 0; +		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); +		zero_cpu_stall_ticks(rdp);  	}  } -/* - * Advance this CPU's callbacks, but only if the current grace period - * has ended.  This may be called only from the CPU to whom the rdp - * belongs. - */ -static void -rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) +static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)  {  	unsigned long flags;  	struct rcu_node *rnp;  	local_irq_save(flags);  	rnp = rdp->mynode; -	if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ +	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && +	     rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */  	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */  		local_irq_restore(flags);  		return;  	} -	__rcu_process_gp_end(rsp, rnp, rdp); +	__note_gp_changes(rsp, rnp, rdp);  	raw_spin_unlock_irqrestore(&rnp->lock, flags);  }  /* - * Do per-CPU grace-period initialization for running CPU.  The caller - * must hold the lock of the leaf rcu_node structure corresponding to - * this CPU. - */ -static void -rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) -{ -	/* Prior grace period ended, so advance callbacks for current CPU. */ -	__rcu_process_gp_end(rsp, rnp, rdp); - -	/* Set state so that this CPU will detect the next quiescent state. */ -	__note_new_gpnum(rsp, rnp, rdp); -} - -/*   * Initialize a new grace period.   */  static int rcu_gp_init(struct rcu_state *rsp) @@ -1444,7 +1355,7 @@ static int rcu_gp_init(struct rcu_state *rsp)  		WARN_ON_ONCE(rnp->completed != rsp->completed);  		ACCESS_ONCE(rnp->completed) = rsp->completed;  		if (rnp == rdp->mynode) -			rcu_start_gp_per_cpu(rsp, rnp, rdp); +			__note_gp_changes(rsp, rnp, rdp);  		rcu_preempt_boost_start_gp(rnp);  		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,  					    rnp->level, rnp->grplo, @@ -1527,7 +1438,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)  		ACCESS_ONCE(rnp->completed) = rsp->gpnum;  		rdp = this_cpu_ptr(rsp->rda);  		if (rnp == rdp->mynode) -			__rcu_process_gp_end(rsp, rnp, rdp); +			__note_gp_changes(rsp, rnp, rdp);  		nocb += rcu_future_gp_cleanup(rsp, rnp);  		raw_spin_unlock_irq(&rnp->lock);  		cond_resched(); @@ -1805,9 +1716,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)  static void  rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)  { -	/* If there is now a new grace period, record and return. */ -	if (check_for_new_grace_period(rsp, rdp)) -		return; +	/* Check for grace-period ends and beginnings. */ +	note_gp_changes(rsp, rdp);  	/*  	 * Does this CPU still need to do its part for current grace period? @@ -2271,9 +2181,6 @@ __rcu_process_callbacks(struct rcu_state *rsp)  	WARN_ON_ONCE(rdp->beenonline == 0); -	/* Handle the end of a grace period that some other CPU ended.  */ -	rcu_process_gp_end(rsp, rdp); -  	/* Update RCU state based on any recent quiescent states. */  	rcu_check_quiescent_state(rsp, rdp); @@ -2358,8 +2265,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,  	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {  		/* Are we ignoring a completed grace period? */ -		rcu_process_gp_end(rsp, rdp); -		check_for_new_grace_period(rsp, rdp); +		note_gp_changes(rsp, rdp);  		/* Start a new grace period if one not already started. */  		if (!rcu_gp_in_progress(rsp)) { @@ -3004,7 +2910,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)   * can accept some slop in the rsp->completed access due to the fact   * that this CPU cannot possibly have any RCU callbacks in flight yet.   */ -static void __cpuinit +static void  rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)  {  	unsigned long flags; @@ -3056,7 +2962,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)  	mutex_unlock(&rsp->onoff_mutex);  } -static void __cpuinit rcu_prepare_cpu(int cpu) +static void rcu_prepare_cpu(int cpu)  {  	struct rcu_state *rsp; @@ -3068,7 +2974,7 @@ static void __cpuinit rcu_prepare_cpu(int cpu)  /*   * Handle CPU online/offline notification events.   */ -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, +static int rcu_cpu_notify(struct notifier_block *self,  				    unsigned long action, void *hcpu)  {  	long cpu = (long)hcpu; @@ -3120,7 +3026,7 @@ static int __init rcu_spawn_gp_kthread(void)  	struct task_struct *t;  	for_each_rcu_flavor(rsp) { -		t = kthread_run(rcu_gp_kthread, rsp, rsp->name); +		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);  		BUG_ON(IS_ERR(t));  		rnp = rcu_get_root(rsp);  		raw_spin_lock_irqsave(&rnp->lock, flags); @@ -3265,11 +3171,25 @@ static void __init rcu_init_one(struct rcu_state *rsp,   */  static void __init rcu_init_geometry(void)  { +	ulong d;  	int i;  	int j;  	int n = nr_cpu_ids;  	int rcu_capacity[MAX_RCU_LVLS + 1]; +	/* +	 * Initialize any unspecified boot parameters. +	 * The default values of jiffies_till_first_fqs and +	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS +	 * value, which is a function of HZ, then adding one for each +	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. +	 */ +	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; +	if (jiffies_till_first_fqs == ULONG_MAX) +		jiffies_till_first_fqs = d; +	if (jiffies_till_next_fqs == ULONG_MAX) +		jiffies_till_next_fqs = d; +  	/* If the compile-time values are accurate, just leave. */  	if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&  	    nr_cpu_ids == NR_CPUS) | 
