Skip to content

Commit

Permalink
sched: Isolate preempt counting in its own config option
Browse files Browse the repository at this point in the history
Create a new CONFIG_PREEMPT_COUNT that handles the inc/dec
of preempt count offset independently. So that the offset
can be updated by preempt_disable() and preempt_enable()
even without the need for CONFIG_PREEMPT beeing set.

This prepares to make CONFIG_DEBUG_SPINLOCK_SLEEP working
with !CONFIG_PREEMPT where it currently doesn't detect
code that sleeps inside explicit preemption disabled
sections.

Signed-off-by: Frederic Weisbecker <[email protected]>
Acked-by: Paul E. McKenney <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
  • Loading branch information
fweisbec committed Jun 10, 2011
1 parent 2da8c8b commit bdd4e85
Show file tree
Hide file tree
Showing 8 changed files with 33 additions and 22 deletions.
2 changes: 1 addition & 1 deletion include/linux/bit_spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
{
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return test_bit(bitnum, addr);
#elif defined CONFIG_PREEMPT
#elif defined CONFIG_PREEMPT_COUNT
return preempt_count();
#else
return 1;
Expand Down
4 changes: 2 additions & 2 deletions include/linux/hardirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)

#if defined(CONFIG_PREEMPT)
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_CHECK_OFFSET 0
Expand All @@ -115,7 +115,7 @@
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
Expand Down
4 changes: 2 additions & 2 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ static inline int page_cache_get_speculative(struct page *page)
VM_BUG_ON(in_interrupt());

#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
/*
Expand Down Expand Up @@ -172,7 +172,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
VM_BUG_ON(in_interrupt());

#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON(page_count(page) == 0);
Expand Down
26 changes: 17 additions & 9 deletions include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,21 @@

asmlinkage void preempt_schedule(void);

#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)

#else /* !CONFIG_PREEMPT */

#define preempt_check_resched() do { } while (0)

#endif /* CONFIG_PREEMPT */


#ifdef CONFIG_PREEMPT_COUNT

#define preempt_disable() \
do { \
inc_preempt_count(); \
Expand All @@ -39,12 +54,6 @@ do { \
dec_preempt_count(); \
} while (0)

#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)

#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
Expand Down Expand Up @@ -80,18 +89,17 @@ do { \
preempt_check_resched(); \
} while (0)

#else
#else /* !CONFIG_PREEMPT_COUNT */

#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)

#define preempt_disable_notrace() do { } while (0)
#define preempt_enable_no_resched_notrace() do { } while (0)
#define preempt_enable_notrace() do { } while (0)

#endif
#endif /* CONFIG_PREEMPT_COUNT */

#ifdef CONFIG_PREEMPT_NOTIFIERS

Expand Down
12 changes: 6 additions & 6 deletions include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ extern int rcu_read_lock_bh_held(void);
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
*/
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
{
int lockdep_opinion = 0;
Expand All @@ -250,12 +250,12 @@ static inline int rcu_read_lock_sched_held(void)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
return 1;
}
#endif /* #else #ifdef CONFIG_PREEMPT */
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

Expand All @@ -276,17 +276,17 @@ static inline int rcu_read_lock_bh_held(void)
return 1;
}

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
{
return preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
return 1;
}
#endif /* #else #ifdef CONFIG_PREEMPT */
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

Expand Down
2 changes: 1 addition & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2502,7 +2502,7 @@ extern int _cond_resched(void);

extern int __cond_resched_lock(spinlock_t *lock);

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
Expand Down
3 changes: 3 additions & 0 deletions kernel/Kconfig.preempt
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY

config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
select PREEMPT_COUNT
help
This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section)
Expand All @@ -52,3 +53,5 @@ config PREEMPT

endchoice

config PREEMPT_COUNT
bool
2 changes: 1 addition & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2843,7 +2843,7 @@ void sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
Expand Down

0 comments on commit bdd4e85

Please sign in to comment.