Skip to content

Commit

Permalink
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - smp_mb__before_spinlock() changed to smp_mb() on arm64 since the
   generic definition to smp_wmb() is not sufficient

 - avoid a recursive loop with the graph tracer by using using
   preempt_(enable|disable)_notrace in _percpu_(read|write)

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: use preempt_disable_notrace in _percpu_read/write
  arm64: spinlocks: implement smp_mb__before_spinlock() as smp_mb()
  • Loading branch information
torvalds committed Sep 9, 2016
2 parents 2771fc8 + 2b97434 commit e45eeb4
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 4 deletions.
8 changes: 4 additions & 4 deletions arch/arm64/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
preempt_disable(); \
preempt_disable_notrace(); \
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
sizeof(pcp)); \
preempt_enable(); \
preempt_enable_notrace(); \
__retval; \
})

#define _percpu_write(pcp, val) \
do { \
preempt_disable(); \
preempt_disable_notrace(); \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
sizeof(pcp)); \
preempt_enable(); \
preempt_enable_notrace(); \
} while(0) \

#define _pcp_protect(operation, pcp, val) \
Expand Down
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

/*
* Accesses appearing in program order before a spin_lock() operation
* can be reordered with accesses inside the critical section, by virtue
* of arch_spin_lock being constructed using acquire semantics.
*
* In cases where this is problematic (e.g. try_to_wake_up), an
* smp_mb__before_spinlock() can restore the required ordering.
*/
#define smp_mb__before_spinlock() smp_mb()

#endif /* __ASM_SPINLOCK_H */

0 comments on commit e45eeb4

Please sign in to comment.