Skip to content

Commit

Permalink
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: Fix whitespace inconsistencies
  rcu: Fix thinko, actually initialize full tree
  rcu: Apply results of code inspection of kernel/rcutree_plugin.h
  rcu: Add WARN_ON_ONCE() consistency checks covering state transitions
  rcu: Fix synchronize_rcu() for TREE_PREEMPT_RCU
  rcu: Simplify rcu_read_unlock_special() quiescent-state accounting
  rcu: Add debug checks to TREE_PREEMPT_RCU for premature grace periods
  rcu: Kconfig help needs to say that TREE_PREEMPT_RCU scales down
  rcutorture: Occasionally delay readers enough to make RCU force_quiescent_state
  rcu: Initialize multi-level RCU grace periods holding locks
  rcu: Need to update rnp->gpnum if preemptable RCU is to be reliable
  • Loading branch information
torvalds committed Sep 21, 2009
2 parents f4eccb6 + a71fca5 commit b8c7f1d
Show file tree
Hide file tree
Showing 11 changed files with 195 additions and 156 deletions.
2 changes: 1 addition & 1 deletion include/linux/rculist_nulls.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = rcu_dereference((head)->first); \
(!is_a_nulls(pos)) && \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference(pos->next))

Expand Down
29 changes: 8 additions & 21 deletions include/linux/rcupdate.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Read-Copy Update mechanism for mutual exclusion
* Read-Copy Update mechanism for mutual exclusion
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
Expand All @@ -18,15 +18,15 @@
* Copyright IBM Corporation, 2001
*
* Author: Dipankar Sarma <[email protected]>
*
*
* Based on the original work by Paul McKenney <[email protected]>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
*
* For detailed explanation of Read-Copy Update mechanism see -
* http://lse.sourceforge.net/locking/rcupdate.html
* http://lse.sourceforge.net/locking/rcupdate.html
*
*/

Expand All @@ -52,8 +52,13 @@ struct rcu_head {
};

/* Exported common interfaces */
#ifdef CONFIG_TREE_PREEMPT_RCU
extern void synchronize_rcu(void);
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#define synchronize_rcu synchronize_sched
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
extern void synchronize_rcu_bh(void);
extern void synchronize_sched(void);
extern void rcu_barrier(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
Expand Down Expand Up @@ -261,24 +266,6 @@ struct rcu_synchronize {

extern void wakeme_after_rcu(struct rcu_head *head);

/**
* synchronize_sched - block until all CPUs have exited any non-preemptive
* kernel code sequences.
*
* This means that all preempt_disable code sequences, including NMI and
* hardware-interrupt handlers, in progress on entry will have completed
* before this primitive returns. However, this does not guarantee that
* softirq handlers will have completed, since in some kernels, these
* handlers can run in process context, and can block.
*
* This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
* guarantees that rcu_read_lock() sections will have completed.
* In "classic RCU", these two guarantees happen to be one and
* the same, but can differ in realtime RCU implementations.
*/
#define synchronize_sched() __synchronize_sched()

/**
* call_rcu - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
Expand Down
6 changes: 3 additions & 3 deletions include/linux/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
* Documentation/RCU
*/

#ifndef __LINUX_RCUTREE_H
Expand Down Expand Up @@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void)
preempt_enable();
}

#define __synchronize_sched() synchronize_rcu()

static inline void exit_rcu(void)
{
}
Expand All @@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void)
local_bh_enable();
}

#define __synchronize_sched() synchronize_rcu()

extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));

Expand Down
1 change: 0 additions & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1755,7 +1755,6 @@ extern cputime_t task_gtime(struct task_struct *p);

#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */

static inline void rcu_copy_process(struct task_struct *p)
{
Expand Down
3 changes: 2 additions & 1 deletion init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,8 @@ config TREE_PREEMPT_RCU
This option selects the RCU implementation that is
designed for very large SMP systems with hundreds or
thousands of CPUs, but for which real-time response
is also required.
is also required. It also scales down nicely to
smaller systems.

endchoice

Expand Down
48 changes: 45 additions & 3 deletions kernel/rcupdate.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@
*
* Authors: Dipankar Sarma <[email protected]>
* Manfred Spraul <[email protected]>
*
*
* Based on the original work by Paul McKenney <[email protected]>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
*
* For detailed explanation of Read-Copy Update mechanism see -
* http://lse.sourceforge.net/locking/rcupdate.html
* http://lse.sourceforge.net/locking/rcupdate.html
*
*/
#include <linux/types.h>
Expand Down Expand Up @@ -74,6 +74,8 @@ void wakeme_after_rcu(struct rcu_head *head)
complete(&rcu->completion);
}

#ifdef CONFIG_TREE_PREEMPT_RCU

/**
* synchronize_rcu - wait until a grace period has elapsed.
*
Expand All @@ -87,7 +89,7 @@ void synchronize_rcu(void)
{
struct rcu_synchronize rcu;

if (rcu_blocking_is_gp())
if (!rcu_scheduler_active)
return;

init_completion(&rcu.completion);
Expand All @@ -98,6 +100,46 @@ void synchronize_rcu(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);

#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */

/**
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
*
* Control will return to the caller some time after a full rcu-sched
* grace period has elapsed, in other words after all currently executing
* rcu-sched read-side critical sections have completed. These read-side
* critical sections are delimited by rcu_read_lock_sched() and
* rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
* local_irq_disable(), and so on may be used in place of
* rcu_read_lock_sched().
*
* This means that all preempt_disable code sequences, including NMI and
* hardware-interrupt handlers, in progress on entry will have completed
* before this primitive returns. However, this does not guarantee that
* softirq handlers will have completed, since in some kernels, these
* handlers can run in process context, and can block.
*
* This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
* guarantees that rcu_read_lock() sections will have completed.
* In "classic RCU", these two guarantees happen to be one and
* the same, but can differ in realtime RCU implementations.
*/
void synchronize_sched(void)
{
struct rcu_synchronize rcu;

if (rcu_blocking_is_gp())
return;

init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(synchronize_sched);

/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
Expand Down
43 changes: 24 additions & 19 deletions kernel/rcutorture.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
* Copyright (C) IBM Corporation, 2005, 2006
*
* Authors: Paul E. McKenney <[email protected]>
* Josh Triplett <[email protected]>
* Josh Triplett <[email protected]>
*
* See also: Documentation/RCU/torture.txt
*/
Expand Down Expand Up @@ -50,7 +50,7 @@

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <[email protected]> and "
"Josh Triplett <[email protected]>");
"Josh Triplett <[email protected]>");

static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
static int nfakewriters = 4; /* # fake writer threads */
Expand Down Expand Up @@ -110,8 +110,8 @@ struct rcu_torture {
};

static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture *rcu_torture_current = NULL;
static long rcu_torture_current_version = 0;
static struct rcu_torture *rcu_torture_current;
static long rcu_torture_current_version;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
Expand All @@ -124,11 +124,11 @@ static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_timers = 0;
static long n_rcu_torture_timers;
static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;

static int stutter_pause_test = 0;
static int stutter_pause_test;

#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
#define RCUTORTURE_RUNNABLE_INIT 1
Expand Down Expand Up @@ -267,7 +267,8 @@ struct rcu_torture_ops {
int irq_capable;
char *name;
};
static struct rcu_torture_ops *cur_ops = NULL;

static struct rcu_torture_ops *cur_ops;

/*
* Definitions for rcu torture testing.
Expand All @@ -281,14 +282,17 @@ static int rcu_torture_read_lock(void) __acquires(RCU)

static void rcu_read_delay(struct rcu_random_state *rrsp)
{
long delay;
const long longdelay = 200;
const unsigned long shortdelay_us = 200;
const unsigned long longdelay_ms = 50;

/* We want there to be long-running readers, but not all the time. */
/* We want a short delay sometimes to make a reader delay the grace
* period, and we want a long delay occasionally to trigger
* force_quiescent_state. */

delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
if (!delay)
udelay(longdelay);
if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
}

static void rcu_torture_read_unlock(int idx) __releases(RCU)
Expand Down Expand Up @@ -339,8 +343,8 @@ static struct rcu_torture_ops rcu_ops = {
.sync = synchronize_rcu,
.cb_barrier = rcu_barrier,
.stats = NULL,
.irq_capable = 1,
.name = "rcu"
.irq_capable = 1,
.name = "rcu"
};

static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
Expand Down Expand Up @@ -638,7 +642,8 @@ rcu_torture_writer(void *arg)

do {
schedule_timeout_uninterruptible(1);
if ((rp = rcu_torture_alloc()) == NULL)
rp = rcu_torture_alloc();
if (rp == NULL)
continue;
rp->rtort_pipe_count = 0;
udelay(rcu_random(&rand) & 0x3ff);
Expand Down Expand Up @@ -1110,7 +1115,7 @@ rcu_torture_init(void)
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
torture_type);
mutex_unlock(&fullstop_mutex);
return (-EINVAL);
return -EINVAL;
}
if (cur_ops->init)
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
Expand Down Expand Up @@ -1161,7 +1166,7 @@ rcu_torture_init(void)
goto unwind;
}
fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
GFP_KERNEL);
GFP_KERNEL);
if (fakewriter_tasks == NULL) {
VERBOSE_PRINTK_ERRSTRING("out of memory");
firsterr = -ENOMEM;
Expand All @@ -1170,7 +1175,7 @@ rcu_torture_init(void)
for (i = 0; i < nfakewriters; i++) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
"rcu_torture_fakewriter");
"rcu_torture_fakewriter");
if (IS_ERR(fakewriter_tasks[i])) {
firsterr = PTR_ERR(fakewriter_tasks[i]);
VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
Expand Down
Loading

0 comments on commit b8c7f1d

Please sign in to comment.