Skip to content

Commit

Permalink
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull locking changes from Ingo Molnar:
 "Various updates:

   - Futex scalability improvements: remove page lock use for shared
     futex get_futex_key(), which speeds up 'perf bench futex hash'
     benchmarks by over 40% on a 60-core Westmere.  This makes anon-mem
     shared futexes perform close to private futexes.  (Mel Gorman)

   - lockdep hash collision detection and fix (Alfredo Alvarez
     Fernandez)

   - lockdep testing enhancements (Alfredo Alvarez Fernandez)

   - robustify lockdep init by using hlists (Andrew Morton, Andrey
     Ryabinin)

   - mutex and csd_lock micro-optimizations (Davidlohr Bueso)

   - small x86 barriers tweaks (Michael S Tsirkin)

   - qspinlock updates (Waiman Long)"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
  locking/csd_lock: Use smp_cond_acquire() in csd_lock_wait()
  locking/csd_lock: Explicitly inline csd_lock*() helpers
  futex: Replace barrier() in unqueue_me() with READ_ONCE()
  locking/lockdep: Detect chain_key collisions
  locking/lockdep: Prevent chain_key collisions
  tools/lib/lockdep: Fix link creation warning
  tools/lib/lockdep: Add tests for AA and ABBA locking
  tools/lib/lockdep: Add userspace version of READ_ONCE()
  tools/lib/lockdep: Fix the build on recent kernels
  locking/qspinlock: Move __ARCH_SPIN_LOCK_UNLOCKED to qspinlock_types.h
  locking/mutex: Allow next waiter lockless wakeup
  locking/pvqspinlock: Enable slowpath locking count tracking
  locking/qspinlock: Use smp_cond_acquire() in pending code
  locking/pvqspinlock: Move lock stealing count tracking code into pv_queued_spin_steal_lock()
  locking/mcs: Fix mcs_spin_lock() ordering
  futex: Remove requirement for lock_page() in get_futex_key()
  futex: Rename barrier references in ordering guarantees
  locking/atomics: Update comment about READ_ONCE() and structures
  locking/lockdep: Eliminate lockdep_init()
  locking/lockdep: Convert hash tables to hlists
  ...
  • Loading branch information
torvalds committed Mar 14, 2016
2 parents d37a14b + 38460a2 commit fbed0bc
Show file tree
Hide file tree
Showing 32 changed files with 334 additions and 214 deletions.
2 changes: 0 additions & 2 deletions arch/c6x/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,8 +281,6 @@ notrace void __init machine_init(unsigned long dt_ptr)
*/
set_ist(_vectors_start);

lockdep_init();

/*
* dtb is passed in from bootloader.
* fdt is linked in blob.
Expand Down
2 changes: 0 additions & 2 deletions arch/microblaze/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,6 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
memset(__bss_start, 0, __bss_stop-__bss_start);
memset(_ssbss, 0, _esbss-_ssbss);

lockdep_init();

/* initialize device tree for usage in early_printk */
early_init_devtree(_fdt_start);

Expand Down
2 changes: 0 additions & 2 deletions arch/powerpc/kernel/setup_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,6 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */

notrace void __init machine_init(u64 dt_ptr)
{
lockdep_init();

/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();

Expand Down
3 changes: 0 additions & 3 deletions arch/powerpc/kernel/setup_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,9 +255,6 @@ void __init early_setup(unsigned long dt_ptr)
setup_paca(&boot_paca);
fixup_boot_paca();

/* Initialize lockdep early or else spinlocks will blow */
lockdep_init();

/* -------- printk is now safe to use ------- */

/* Enable early debugging if any specified (see udbg.h) */
Expand Down
1 change: 0 additions & 1 deletion arch/s390/kernel/early.c
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,6 @@ void __init startup_init(void)
rescue_initrd();
clear_bss_section();
init_kernel_storage_key();
lockdep_init();
lockdep_off();
setup_lowcore_early();
setup_facility_list();
Expand Down
8 changes: 0 additions & 8 deletions arch/sparc/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -696,14 +696,6 @@ tlb_fixup_done:
call __bzero
sub %o1, %o0, %o1

#ifdef CONFIG_LOCKDEP
/* We have this call this super early, as even prom_init can grab
* spinlocks and thus call into the lockdep code.
*/
call lockdep_init
nop
#endif

call prom_init
mov %l7, %o0 ! OpenPROM cif handler

Expand Down
15 changes: 7 additions & 8 deletions arch/x86/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,17 @@

/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* And yes, this might be required on UP too when we're talking
* to devices.
*/

#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -418,9 +418,9 @@ static void mwait_idle(void)
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
smp_mb(); /* quirk */
mb(); /* quirk */
}

__monitor((void *)&current_thread_info()->flags, 0, 0);
Expand Down
6 changes: 0 additions & 6 deletions arch/x86/lguest/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1520,12 +1520,6 @@ __init void lguest_init(void)
*/
reserve_top_address(lguest_data.reserve_mem);

/*
* If we don't initialize the lock dependency checker now, it crashes
* atomic_notifier_chain_register, then paravirt_disable_iospace.
*/
lockdep_init();

/* Hook in our special panic hypercall code. */
atomic_notifier_chain_register(&panic_notifier_list, &paniced);

Expand Down
5 changes: 0 additions & 5 deletions include/asm-generic/qspinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
}
#endif

/*
* Initializier
*/
#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }

/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
Expand Down
5 changes: 5 additions & 0 deletions include/asm-generic/qspinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ typedef struct qspinlock {
atomic_t val;
} arch_spinlock_t;

/*
* Initializier
*/
#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }

/*
* Bitfields in the atomic value:
*
Expand Down
5 changes: 3 additions & 2 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
* least two memcpy()s: one for the __builtin_memcpy() and then one for
* the macro doing the copy of variable - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
Expand Down
2 changes: 0 additions & 2 deletions include/linux/lockdep.h
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,6 @@ struct held_lock {
/*
* Initialization, self-test and debugging-output methods:
*/
extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
Expand Down Expand Up @@ -392,7 +391,6 @@ static inline void lockdep_on(void)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
Expand Down
5 changes: 0 additions & 5 deletions init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -499,11 +499,6 @@ asmlinkage __visible void __init start_kernel(void)
char *command_line;
char *after_dashes;

/*
* Need to run as early as possible, to initialize the
* lockdep hash:
*/
lockdep_init();
set_task_stack_end_magic(&init_task);
smp_setup_processor_id();
debug_objects_early_init();
Expand Down
Loading

0 comments on commit fbed0bc

Please sign in to comment.