Skip to content

Commit

Permalink
rcu: Process offlining and onlining only at grace-period start
Browse files Browse the repository at this point in the history
Races between CPU hotplug and grace periods can be difficult to resolve,
so the ->onoff_mutex is used to exclude the two events.  Unfortunately,
this means that it is impossible for an outgoing CPU to perform the
last bits of its offlining from its last pass through the idle loop,
because sleeplocks cannot be acquired in that context.

This commit avoids these problems by buffering online and offline events
in a new ->qsmaskinitnext field in the leaf rcu_node structures.  When a
grace period starts, the events accumulated in this mask are applied to
the ->qsmaskinit field, and, if needed, up the rcu_node tree.  The special
case of all CPUs corresponding to a given leaf rcu_node structure being
offline while there are still elements in that structure's ->blkd_tasks
list is handled using a new ->wait_blkd_tasks field.  In this case,
propagating the offline bits up the tree is deferred until the beginning
of the grace period after all of the tasks have exited their RCU read-side
critical sections and removed themselves from the list, at which point
the ->wait_blkd_tasks flag is cleared.  If one of that leaf rcu_node
structure's CPUs comes back online before the list empties, then the
->wait_blkd_tasks flag is simply cleared.

This of course means that RCU's notion of which CPUs are offline can be
out of date.  This is OK because RCU need only wait on CPUs that were
online at the time that the grace period started.  In addition, RCU's
force-quiescent-state actions will handle the case where a CPU goes
offline after the grace period starts.

Signed-off-by: Paul E. McKenney <[email protected]>
  • Loading branch information
paulmck committed Mar 12, 2015
1 parent cc99a31 commit 0aa04b0
Show file tree
Hide file tree
Showing 4 changed files with 136 additions and 53 deletions.
154 changes: 121 additions & 33 deletions kernel/rcu/tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;

static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
Expand All @@ -178,6 +180,17 @@ module_param(gp_init_delay, int, 0644);
unsigned long rcutorture_testseq;
unsigned long rcutorture_vernum;

/*
* Compute the mask of online CPUs for the specified rcu_node structure.
* This will not be stable unless the rcu_node structure's ->lock is
* held, but the bit corresponding to the current CPU will be stable
* in most contexts.
*/
unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
{
return ACCESS_ONCE(rnp->qsmaskinitnext);
}

/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
Expand Down Expand Up @@ -960,7 +973,7 @@ bool rcu_lockdep_current_cpu_online(void)
preempt_disable();
rdp = this_cpu_ptr(&rcu_sched_data);
rnp = rdp->mynode;
ret = (rdp->grpmask & rnp->qsmaskinit) ||
ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
!rcu_scheduler_fully_active;
preempt_enable();
return ret;
Expand Down Expand Up @@ -1710,6 +1723,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static int rcu_gp_init(struct rcu_state *rsp)
{
unsigned long oldmask;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);

Expand Down Expand Up @@ -1744,6 +1758,55 @@ static int rcu_gp_init(struct rcu_state *rsp)
mutex_lock(&rsp->onoff_mutex);
smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */

/*
* Apply per-leaf buffered online and offline operations to the
* rcu_node tree. Note that this new grace period need not wait
* for subsequent online CPUs, and that quiescent-state forcing
* will handle subsequent offline CPUs.
*/
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) {
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq(&rnp->lock);
continue;
}

/* Record old state, apply changes to ->qsmaskinit field. */
oldmask = rnp->qsmaskinit;
rnp->qsmaskinit = rnp->qsmaskinitnext;

/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
if (!oldmask != !rnp->qsmaskinit) {
if (!oldmask) /* First online CPU for this rcu_node. */
rcu_init_new_rnp(rnp);
else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
rnp->wait_blkd_tasks = true;
else /* Last offline CPU and can propagate. */
rcu_cleanup_dead_rnp(rnp);
}

/*
* If all waited-on tasks from prior grace period are
* done, and if all this rcu_node structure's CPUs are
* still offline, propagate up the rcu_node tree and
* clear ->wait_blkd_tasks. Otherwise, if one of this
* rcu_node structure's CPUs has since come back online,
* simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
* checks for this, so just call it unconditionally).
*/
if (rnp->wait_blkd_tasks &&
(!rcu_preempt_has_tasks(rnp) ||
rnp->qsmaskinit)) {
rnp->wait_blkd_tasks = false;
rcu_cleanup_dead_rnp(rnp);
}

raw_spin_unlock_irq(&rnp->lock);
}

/*
* Set the quiescent-state-needed bits in all the rcu_node
* structures for all currently online CPUs in breadth-first order,
Expand Down Expand Up @@ -2133,7 +2196,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
static void __maybe_unused rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
Expand Down Expand Up @@ -2409,6 +2472,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock(); /* GP memory ordering. */
rnp->qsmaskinit &= ~mask;
rnp->qsmask &= ~mask;
if (rnp->qsmaskinit) {
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
return;
Expand All @@ -2427,6 +2491,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
unsigned long mask;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */

Expand All @@ -2443,12 +2508,12 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);

/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
rnp->qsmaskinit &= ~rdp->grpmask;
if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
rcu_cleanup_dead_rnp(rnp);
rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
rnp->qsmaskinitnext &= ~mask;
raw_spin_unlock_irqrestore(&rnp->lock, flags);

WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
cpu, rdp->qlen, rdp->nxtlist);
Expand Down Expand Up @@ -2654,12 +2719,21 @@ static void force_qs_rnp(struct rcu_state *rsp,
}
}
if (mask != 0) {

/* rcu_report_qs_rnp() releases rnp->lock. */
/* Idle/offline CPUs, report. */
rcu_report_qs_rnp(mask, rsp, rnp, flags);
continue;
} else if (rnp->parent &&
list_empty(&rnp->blkd_tasks) &&
!rnp->qsmask &&
(rnp->parent->qsmask & rnp->grpmask)) {
/*
* Race between grace-period initialization and task
* existing RCU read-side critical section, report.
*/
rcu_report_unblock_qs_rnp(rsp, rnp, flags);
} else {
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
}

Expand Down Expand Up @@ -3568,6 +3642,28 @@ void rcu_barrier_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);

/*
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
* first CPU in a given leaf rcu_node structure coming online. The caller
* must hold the corresponding leaf rcu_node ->lock with interrrupts
* disabled.
*/
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
{
long mask;
struct rcu_node *rnp = rnp_leaf;

for (;;) {
mask = rnp->grpmask;
rnp = rnp->parent;
if (rnp == NULL)
return;
raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
rnp->qsmaskinit |= mask;
raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
}
}

/*
* Do boot-time initialization of a CPU's per-CPU RCU data.
*/
Expand Down Expand Up @@ -3620,31 +3716,23 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */

/* Add CPU to rcu_node bitmasks. */
/*
* Add CPU to leaf rcu_node pending-online bitmask. Any needed
* propagation up the rcu_node tree will happen at the beginning
* of the next grace period.
*/
rnp = rdp->mynode;
mask = rdp->grpmask;
do {
/* Exclude any attempts to start a new GP on small systems. */
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit |= mask;
mask = rnp->grpmask;
if (rnp == rdp->mynode) {
/*
* If there is a grace period in progress, we will
* set up to wait for it next time we run the
* RCU core code.
*/
rdp->gpnum = rnp->completed;
rdp->completed = rnp->completed;
rdp->passed_quiesce = 0;
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
rdp->qs_pending = 0;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
}
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
} while (rnp != NULL && !(rnp->qsmaskinit & mask));
local_irq_restore(flags);
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
rnp->qsmaskinitnext |= mask;
rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
rdp->completed = rnp->completed;
rdp->passed_quiesce = false;
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
rdp->qs_pending = false;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
raw_spin_unlock_irqrestore(&rnp->lock, flags);

mutex_unlock(&rsp->onoff_mutex);
}
Expand Down
9 changes: 9 additions & 0 deletions kernel/rcu/tree.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,12 +141,20 @@ struct rcu_node {
/* complete (only for PREEMPT_RCU). */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */
/* Initialized from ->qsmaskinitnext at the */
/* beginning of each grace period. */
unsigned long qsmaskinitnext;
/* Online CPUs for next grace period. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */
int grphi; /* highest-numbered CPU or group here. */
u8 grpnum; /* CPU/group number for next level up. */
u8 level; /* root is at level 0. */
bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
/* exit RCU read-side critical sections */
/* before propagating offline up the */
/* rcu_node tree? */
struct rcu_node *parent;
struct list_head blkd_tasks;
/* Tasks blocked in RCU read-side critical */
Expand Down Expand Up @@ -559,6 +567,7 @@ static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(void);
static void rcu_prepare_for_idle(void);
static void rcu_idle_count_callbacks_posted(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static void print_cpu_stall_info_begin(void);
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void);
Expand Down
22 changes: 4 additions & 18 deletions kernel/rcu/tree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ static void rcu_preempt_note_context_switch(void)
* But first, note that the current CPU must still be
* on line!
*/
WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
Expand Down Expand Up @@ -263,7 +263,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
*/
void rcu_read_unlock_special(struct task_struct *t)
{
bool empty;
bool empty_exp;
bool empty_norm;
bool empty_exp_now;
Expand Down Expand Up @@ -319,7 +318,6 @@ void rcu_read_unlock_special(struct task_struct *t)
break;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
empty = !rcu_preempt_has_tasks(rnp);
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
empty_exp = !rcu_preempted_readers_exp(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
Expand All @@ -339,14 +337,6 @@ void rcu_read_unlock_special(struct task_struct *t)
drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
#endif /* #ifdef CONFIG_RCU_BOOST */

/*
* If this was the last task on the list, go see if we
* need to propagate ->qsmaskinit bit clearing up the
* rcu_node tree.
*/
if (!empty && !rcu_preempt_has_tasks(rnp))
rcu_cleanup_dead_rnp(rnp);

/*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
Expand Down Expand Up @@ -868,8 +858,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return 0;
}

#ifdef CONFIG_HOTPLUG_CPU

/*
* Because there is no preemptible RCU, there can be no readers blocked.
*/
Expand All @@ -878,8 +866,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
return false;
}

#endif /* #ifdef CONFIG_HOTPLUG_CPU */

/*
* Because preemptible RCU does not exist, we never have to check for
* tasks blocked within RCU read-side critical sections.
Expand Down Expand Up @@ -1179,7 +1165,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
* Returns zero if all is well, a negated errno otherwise.
*/
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp)
struct rcu_node *rnp)
{
int rnp_index = rnp - &rsp->node[0];
unsigned long flags;
Expand All @@ -1189,7 +1175,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (&rcu_preempt_state != rsp)
return 0;

if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
return 0;

rsp->boost = 1;
Expand Down Expand Up @@ -1282,7 +1268,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{
struct task_struct *t = rnp->boost_kthread_task;
unsigned long mask = rnp->qsmaskinit;
unsigned long mask = rcu_rnp_online_cpus(rnp);
cpumask_var_t cm;
int cpu;

Expand Down
4 changes: 2 additions & 2 deletions kernel/rcu/tree_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n");
level = rnp->level;
}
seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit,
seq_printf(m, "%lx/%lx->%lx %c%c>%c %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit, rnp->qsmaskinitnext,
".G"[rnp->gp_tasks != NULL],
".E"[rnp->exp_tasks != NULL],
".T"[!list_empty(&rnp->blkd_tasks)],
Expand Down

0 comments on commit 0aa04b0

Please sign in to comment.