Skip to content

Commit

Permalink
locking/rwsem: Use acquire/release semantics
Browse files Browse the repository at this point in the history
As of 654672d (locking/atomics: Add _{acquire|release|relaxed}()
variants of some atomic operations) and 6d79ef2 (locking, asm-generic:
Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly
ordered archs can benefit from more relaxed use of barriers when locking
and unlocking, instead of regular full barrier semantics. While currently
only arm64 supports such optimizations, updating corresponding locking
primitives serves for other archs to immediately benefit as well, once the
necessary machinery is implemented of course.

Signed-off-by: Davidlohr Bueso <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Paul E.McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Davidlohr Bueso authored and Ingo Molnar committed Oct 6, 2015
1 parent 3552a07 commit 00eb4ba
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 9 deletions.
21 changes: 14 additions & 7 deletions include/asm-generic/rwsem.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem);
}

Expand All @@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
long tmp;

while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
if (tmp == cmpxchg_acquire(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
Expand All @@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
long tmp;

tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
Expand All @@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;

tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
Expand All @@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;

tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
Expand All @@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
Expand All @@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;

tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
/*
* When downgrading from exclusive to shared ownership,
* anything inside the write-locked region cannot leak
* into the read side. In contrast, anything in the
* read-locked region is ok to be re-ordered into the
* write side. As such, rely on RELEASE semantics.
*/
tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
(atomic_long_t *)&sem->count);
if (tmp < 0)
rwsem_downgrade_wake(sem);
Expand Down
5 changes: 3 additions & 2 deletions kernel/locking/rwsem-xadd.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
* to reduce unnecessary expensive cmpxchg() operations.
*/
if (count == RWSEM_WAITING_BIAS &&
cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
if (!list_is_singular(&sem->wait_list))
rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
Expand All @@ -285,7 +285,8 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
if (!(count == 0 || count == RWSEM_WAITING_BIAS))
return false;

old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
old = cmpxchg_acquire(&sem->count, count,
count + RWSEM_ACTIVE_WRITE_BIAS);
if (old == count) {
rwsem_set_owner(sem);
return true;
Expand Down

0 comments on commit 00eb4ba

Please sign in to comment.