Skip to content

Commit

Permalink
rcu/context_tracking: Move dynticks_nesting to context tracking
Browse files Browse the repository at this point in the history
The RCU eqs tracking is going to be performed by the context tracking
subsystem. The related nesting counters thus need to be moved to the
context tracking structure.

Acked-by: Paul E. McKenney <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Neeraj Upadhyay <[email protected]>
Cc: Uladzislau Rezki <[email protected]>
Cc: Joel Fernandes <[email protected]>
Cc: Boqun Feng <[email protected]>
Cc: Nicolas Saenz Julienne <[email protected]>
Cc: Marcelo Tosatti <[email protected]>
Cc: Xiongfeng Wang <[email protected]>
Cc: Yu Liao <[email protected]>
Cc: Phil Auld <[email protected]>
Cc: Paul Gortmaker<[email protected]>
Cc: Alex Belits <[email protected]>
Signed-off-by: Paul E. McKenney <[email protected]>
Reviewed-by: Nicolas Saenz Julienne <[email protected]>
Tested-by: Nicolas Saenz Julienne <[email protected]>
  • Loading branch information
Frederic Weisbecker authored and paulmckrcu committed Jul 5, 2022
1 parent 62e2412 commit 904e600
Show file tree
Hide file tree
Showing 5 changed files with 31 additions and 17 deletions.
13 changes: 13 additions & 0 deletions include/linux/context_tracking_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ struct context_tracking {
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
atomic_t dynticks; /* Even value for idle, else odd. */
long dynticks_nesting; /* Track process nesting level. */
#endif
};

Expand All @@ -53,6 +54,18 @@ static __always_inline int ct_dynticks_cpu_acquire(int cpu)

return atomic_read_acquire(&ct->dynticks);
}

static __always_inline long ct_dynticks_nesting(void)
{
return __this_cpu_read(context_tracking.dynticks_nesting);
}

static __always_inline long ct_dynticks_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);

return ct->dynticks_nesting;
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */

#ifdef CONFIG_CONTEXT_TRACKING_USER
Expand Down
1 change: 1 addition & 0 deletions kernel/context_tracking.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
.dynticks_nesting = 1,
.dynticks = ATOMIC_INIT(1),
#endif
};
Expand Down
31 changes: 16 additions & 15 deletions kernel/rcu/tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@
/* Data structures. */

static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.dynticks_nesting = 1,
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
#ifdef CONFIG_RCU_NOCB_CPU
.cblist.flags = SEGCBLIST_RCU_CORE,
Expand Down Expand Up @@ -436,7 +435,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
lockdep_assert_irqs_disabled();

/* Check for counter underflows */
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
"RCU dynticks_nesting counter underflow!");
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
"RCU dynticks_nmi_nesting counter underflow/zero!");
Expand All @@ -452,7 +451,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
WARN_ON_ONCE(!nesting && !is_idle_task(current));

/* Does CPU appear to be idle from an RCU standpoint? */
return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
return ct_dynticks_nesting() == 0;
}

#define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
Expand Down Expand Up @@ -619,24 +618,24 @@ static noinstr void rcu_eqs_enter(bool user)
WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
rdp->dynticks_nesting == 0);
if (rdp->dynticks_nesting != 1) {
ct_dynticks_nesting() == 0);
if (ct_dynticks_nesting() != 1) {
// RCU will still be watching, so just do accounting and leave.
rdp->dynticks_nesting--;
ct->dynticks_nesting--;
return;
}

instrumentation_begin();
lockdep_assert_irqs_disabled();
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, ct_dynticks());
trace_rcu_dyntick(TPS("Start"), ct_dynticks_nesting(), 0, ct_dynticks());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rcu_preempt_deferred_qs(current);

// instrumentation for the noinstr rcu_dynticks_eqs_enter()
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));

instrumentation_end();
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
WRITE_ONCE(ct->dynticks_nesting, 0); /* Avoid irq-access tearing. */
// RCU is watching here ...
rcu_dynticks_eqs_enter();
// ... but is no longer watching here.
Expand Down Expand Up @@ -793,7 +792,7 @@ void rcu_irq_exit_check_preempt(void)
{
lockdep_assert_irqs_disabled();

RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
"RCU dynticks_nesting counter underflow/zero!");
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
DYNTICK_IRQ_NONIDLE,
Expand All @@ -819,11 +818,11 @@ static void noinstr rcu_eqs_exit(bool user)

WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
rdp = this_cpu_ptr(&rcu_data);
oldval = rdp->dynticks_nesting;
oldval = ct_dynticks_nesting();
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
if (oldval) {
// RCU was already watching, so just do accounting and leave.
rdp->dynticks_nesting++;
ct->dynticks_nesting++;
return;
}
rcu_dynticks_task_exit();
Expand All @@ -835,9 +834,9 @@ static void noinstr rcu_eqs_exit(bool user)
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));

trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, ct_dynticks());
trace_rcu_dyntick(TPS("End"), ct_dynticks_nesting(), 1, ct_dynticks());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(rdp->dynticks_nesting, 1);
WRITE_ONCE(ct->dynticks_nesting, 1);
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
instrumentation_end();
Expand Down Expand Up @@ -4134,12 +4133,13 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
static void __init
rcu_boot_init_percpu_data(int cpu)
{
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);

/* Set up local state, ensuring consistent view of global state. */
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
INIT_WORK(&rdp->strict_work, strict_work_handler);
WARN_ON_ONCE(rdp->dynticks_nesting != 1);
WARN_ON_ONCE(ct->dynticks_nesting != 1);
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
Expand All @@ -4164,6 +4164,7 @@ rcu_boot_init_percpu_data(int cpu)
int rcutree_prepare_cpu(unsigned int cpu)
{
unsigned long flags;
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rcu_get_root();

Expand All @@ -4172,7 +4173,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
rdp->blimit = blimit;
rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
ct->dynticks_nesting = 1; /* CPU not up, no tearing. */
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */

/*
Expand Down
1 change: 0 additions & 1 deletion kernel/rcu/tree.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ struct rcu_data {

/* 3) dynticks interface. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
long dynticks_nesting; /* Track process nesting level. */
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
bool rcu_urgent_qs; /* GP old need light quiescent state. */
Expand Down
2 changes: 1 addition & 1 deletion kernel/rcu/tree_stall.h
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ static void print_cpu_stall_info(int cpu)
"!."[!delta],
ticks_value, ticks_title,
rcu_dynticks_snap(cpu) & 0xfff,
rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
ct_dynticks_nesting_cpu(cpu), rdp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
rcuc_starved ? buf : "",
Expand Down

0 comments on commit 904e600

Please sign in to comment.