Skip to content

Commit

Permalink
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull core locking changes from Ingo Molnar:
 "Main changes:

   - jump label asm preparatory work for PowerPC (Anton Blanchard)

   - rwsem optimizations and cleanups (Davidlohr Bueso)

   - mutex optimizations and cleanups (Jason Low)

   - futex fix (Oleg Nesterov)

   - remove broken atomicity checks from {READ,WRITE}_ONCE() (Peter
     Zijlstra)"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  powerpc, jump_label: Include linux/jump_label.h to get HAVE_JUMP_LABEL define
  jump_label: Allow jump labels to be used in assembly
  jump_label: Allow asm/jump_label.h to be included in assembly
  locking/mutex: Further simplify mutex_spin_on_owner()
  locking: Remove atomicy checks from {READ,WRITE}_ONCE
  locking/rtmutex: Rename argument in the rt_mutex_adjust_prio_chain() documentation as well
  locking/rwsem: Fix lock optimistic spinning when owner is not running
  locking: Remove ACCESS_ONCE() usage
  locking/rwsem: Check for active lock before bailing on spinning
  locking/rwsem: Avoid deceiving lock spinners
  locking/rwsem: Set lock ownership ASAP
  locking/rwsem: Document barrier need when waking tasks
  locking/futex: Check PF_KTHREAD rather than !p->mm to filter out kthreads
  locking/mutex: Refactor mutex_spin_on_owner()
  locking/mutex: In mutex_spin_on_owner(), return true when owner changes
  • Loading branch information
torvalds committed Apr 13, 2015
2 parents 9c65e12 + 58995a9 commit cc76ee7
Show file tree
Hide file tree
Showing 23 changed files with 160 additions and 147 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,7 @@ KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif

include $(srctree)/scripts/Makefile.kasan
Expand Down
5 changes: 2 additions & 3 deletions arch/arm/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef _ASM_ARM_JUMP_LABEL_H
#define _ASM_ARM_JUMP_LABEL_H

#ifdef __KERNEL__
#ifndef __ASSEMBLY__

#include <linux/types.h>

Expand All @@ -27,8 +27,6 @@ static __always_inline bool arch_static_branch(struct static_key *key)
return true;
}

#endif /* __KERNEL__ */

typedef u32 jump_label_t;

struct jump_entry {
Expand All @@ -37,4 +35,5 @@ struct jump_entry {
jump_label_t key;
};

#endif /* __ASSEMBLY__ */
#endif
8 changes: 4 additions & 4 deletions arch/arm64/include/asm/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@
*/
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H

#ifndef __ASSEMBLY__

#include <linux/types.h>
#include <asm/insn.h>

#ifdef __KERNEL__

#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE

static __always_inline bool arch_static_branch(struct static_key *key)
Expand All @@ -39,8 +40,6 @@ static __always_inline bool arch_static_branch(struct static_key *key)
return true;
}

#endif /* __KERNEL__ */

typedef u64 jump_label_t;

struct jump_entry {
Expand All @@ -49,4 +48,5 @@ struct jump_entry {
jump_label_t key;
};

#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */
7 changes: 3 additions & 4 deletions arch/mips/include/asm/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
#ifndef _ASM_MIPS_JUMP_LABEL_H
#define _ASM_MIPS_JUMP_LABEL_H

#include <linux/types.h>
#ifndef __ASSEMBLY__

#ifdef __KERNEL__
#include <linux/types.h>

#define JUMP_LABEL_NOP_SIZE 4

Expand Down Expand Up @@ -39,8 +39,6 @@ static __always_inline bool arch_static_branch(struct static_key *key)
return true;
}

#endif /* __KERNEL__ */

#ifdef CONFIG_64BIT
typedef u64 jump_label_t;
#else
Expand All @@ -53,4 +51,5 @@ struct jump_entry {
jump_label_t key;
};

#endif /* __ASSEMBLY__ */
#endif /* _ASM_MIPS_JUMP_LABEL_H */
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/powernv/opal-wrappers.S
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@
* 2 of the License, or (at your option) any later version.
*/

#include <linux/jump_label.h>
#include <asm/ppc_asm.h>
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
#include <asm/jump_label.h>

.section ".text"

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/pseries/hvCall.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/jump_label.h>
#include <asm/hvcall.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/jump_label.h>

.section ".text"

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/pseries/lpar.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/console.h>
#include <linux/export.h>
#include <linux/static_key.h>
#include <linux/jump_label.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
Expand Down
3 changes: 3 additions & 0 deletions arch/s390/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H

#ifndef __ASSEMBLY__

#include <linux/types.h>

#define JUMP_LABEL_NOP_SIZE 6
Expand Down Expand Up @@ -39,4 +41,5 @@ struct jump_entry {
jump_label_t key;
};

#endif /* __ASSEMBLY__ */
#endif
5 changes: 2 additions & 3 deletions arch/sparc/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef _ASM_SPARC_JUMP_LABEL_H
#define _ASM_SPARC_JUMP_LABEL_H

#ifdef __KERNEL__
#ifndef __ASSEMBLY__

#include <linux/types.h>

Expand All @@ -22,8 +22,6 @@ static __always_inline bool arch_static_branch(struct static_key *key)
return true;
}

#endif /* __KERNEL__ */

typedef u32 jump_label_t;

struct jump_entry {
Expand All @@ -32,4 +30,5 @@ struct jump_entry {
jump_label_t key;
};

#endif /* __ASSEMBLY__ */
#endif
5 changes: 2 additions & 3 deletions arch/x86/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H

#ifdef __KERNEL__
#ifndef __ASSEMBLY__

#include <linux/stringify.h>
#include <linux/types.h>
Expand Down Expand Up @@ -30,8 +30,6 @@ static __always_inline bool arch_static_branch(struct static_key *key)
return true;
}

#endif /* __KERNEL__ */

#ifdef CONFIG_X86_64
typedef u64 jump_label_t;
#else
Expand All @@ -44,4 +42,5 @@ struct jump_entry {
jump_label_t key;
};

#endif /* __ASSEMBLY__ */
#endif
16 changes: 0 additions & 16 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,29 +192,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

#include <uapi/linux/types.h>

static __always_inline void data_access_exceeds_word_size(void)
#ifdef __compiletime_warning
__compiletime_warning("data access exceeds word size and won't be atomic")
#endif
;

static __always_inline void data_access_exceeds_word_size(void)
{
}

static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
#endif
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
data_access_exceeds_word_size();
barrier();
}
}
Expand All @@ -225,13 +212,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
data_access_exceeds_word_size();
barrier();
}
}
Expand Down
21 changes: 17 additions & 4 deletions include/linux/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,12 @@
* same as using STATIC_KEY_INIT_FALSE.
*/

#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
# define HAVE_JUMP_LABEL
#endif

#ifndef __ASSEMBLY__

#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/bug.h>
Expand All @@ -55,7 +61,7 @@ extern bool static_key_initialized;
"%s used before call to jump_label_init", \
__func__)

#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
#ifdef HAVE_JUMP_LABEL

struct static_key {
atomic_t enabled;
Expand All @@ -66,13 +72,18 @@ struct static_key {
#endif
};

# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#else
struct static_key {
atomic_t enabled;
};
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
#endif /* HAVE_JUMP_LABEL */
#endif /* __ASSEMBLY__ */

#ifdef HAVE_JUMP_LABEL
#include <asm/jump_label.h>
#endif

#ifndef __ASSEMBLY__

enum jump_label_type {
JUMP_LABEL_DISABLE = 0,
Expand Down Expand Up @@ -203,3 +214,5 @@ static inline bool static_key_enabled(struct static_key *key)
}

#endif /* _LINUX_JUMP_LABEL_H */

#endif /* __ASSEMBLY__ */
6 changes: 3 additions & 3 deletions include/linux/seqlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
unsigned ret;

repeat:
ret = ACCESS_ONCE(s->sequence);
ret = READ_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
Expand All @@ -127,7 +127,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
*/
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret;
}
Expand Down Expand Up @@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
if (!p)
return -ESRCH;

if (!p->mm) {
if (unlikely(p->flags & PF_KTHREAD)) {
put_task_struct(p);
return -EPERM;
}
Expand Down
6 changes: 3 additions & 3 deletions kernel/locking/mcs_spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
*/
return;
}
ACCESS_ONCE(prev->next) = node;
WRITE_ONCE(prev->next, node);

/* Wait until the lock holder passes the lock down. */
arch_mcs_spin_lock_contended(&node->locked);
Expand All @@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
static inline
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{
struct mcs_spinlock *next = ACCESS_ONCE(node->next);
struct mcs_spinlock *next = READ_ONCE(node->next);

if (likely(!next)) {
/*
Expand All @@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
if (likely(cmpxchg(lock, node, NULL) == node))
return;
/* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next)))
while (!(next = READ_ONCE(node->next)))
cpu_relax_lowlatency();
}

Expand Down
Loading

0 comments on commit cc76ee7

Please sign in to comment.