Skip to content

Commit

Permalink
atomics: eliminate mb_read/mb_set
Browse files Browse the repository at this point in the history
qatomic_mb_read and qatomic_mb_set were the very first atomic primitives
introduced for QEMU; their semantics are unclear and they provide a false
sense of safety.

The last use of qatomic_mb_read() has been removed, so delete it.
qatomic_mb_set() instead can survive as an optimized
qatomic_set()+smp_mb(), similar to Linux's smp_store_mb(), but
rename it to qatomic_set_mb() to match the order of the two
operations.

Reviewed-by: Richard Henderson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
bonzini committed Jun 6, 2023
1 parent 09a49af commit 0683100
Show file tree
Hide file tree
Showing 11 changed files with 20 additions and 46 deletions.
2 changes: 1 addition & 1 deletion accel/tcg/cpu-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -774,7 +774,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);

if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/tcg-accel-ops-mttcg.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
}
}

qatomic_mb_set(&cpu->exit_request, 0);
qatomic_set_mb(&cpu->exit_request, 0);
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));

Expand Down
4 changes: 2 additions & 2 deletions accel/tcg/tcg-accel-ops-rr.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ static void *rr_cpu_thread_fn(void *arg)

while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
qatomic_mb_set(&rr_current_cpu, cpu);
qatomic_set_mb(&rr_current_cpu, cpu);

current_cpu = cpu;

Expand Down Expand Up @@ -287,7 +287,7 @@ static void *rr_cpu_thread_fn(void *arg)
qatomic_set(&rr_current_cpu, NULL);

if (cpu && cpu->exit_request) {
qatomic_mb_set(&cpu->exit_request, 0);
qatomic_set_mb(&cpu->exit_request, 0);
}

if (icount_enabled() && all_cpu_threads_idle()) {
Expand Down
27 changes: 4 additions & 23 deletions docs/devel/atomics.rst
Original file line number Diff line number Diff line change
Expand Up @@ -102,28 +102,10 @@ Similar operations return the new value of ``*ptr``::
typeof(*ptr) qatomic_or_fetch(ptr, val)
typeof(*ptr) qatomic_xor_fetch(ptr, val)

``qemu/atomic.h`` also provides loads and stores that cannot be reordered
with each other::
``qemu/atomic.h`` also provides an optimized shortcut for
``qatomic_set`` followed by ``smp_mb``::

typeof(*ptr) qatomic_mb_read(ptr)
void qatomic_mb_set(ptr, val)

However these do not provide sequential consistency and, in particular,
they do not participate in the total ordering enforced by
sequentially-consistent operations. For this reason they are deprecated.
They should instead be replaced with any of the following (ordered from
easiest to hardest):

- accesses inside a mutex or spinlock

- lightweight synchronization primitives such as ``QemuEvent``

- RCU operations (``qatomic_rcu_read``, ``qatomic_rcu_set``) when publishing
or accessing a new version of a data structure

- other atomic accesses: ``qatomic_read`` and ``qatomic_load_acquire`` for
loads, ``qatomic_set`` and ``qatomic_store_release`` for stores, ``smp_mb``
to forbid reordering subsequent loads before a store.
void qatomic_set_mb(ptr, val)


Weak atomic access and manual memory barriers
Expand Down Expand Up @@ -523,8 +505,7 @@ and memory barriers, and the equivalents in QEMU:
| :: |
| |
| a = qatomic_read(&x); |
| qatomic_set(&x, a + 2); |
| smp_mb(); |
| qatomic_set_mb(&x, a + 2); |
| b = qatomic_read(&y); |
+--------------------------------+

Expand Down
17 changes: 5 additions & 12 deletions include/qemu/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -259,24 +259,17 @@
# define smp_mb__after_rmw() smp_mb()
#endif

/* qatomic_mb_read/set semantics map Java volatile variables. They are
* less expensive on some platforms (notably POWER) than fully
* sequentially consistent operations.
*
* As long as they are used as paired operations they are safe to
* use. See docs/devel/atomics.rst for more discussion.
/*
* On some architectures, qatomic_set_mb is more efficient than a store
* plus a fence.
*/

#define qatomic_mb_read(ptr) \
qatomic_load_acquire(ptr)

#if !defined(QEMU_SANITIZE_THREAD) && \
(defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
/* This is more efficient than a store plus a fence. */
# define qatomic_mb_set(ptr, i) \
# define qatomic_set_mb(ptr, i) \
({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
#else
# define qatomic_mb_set(ptr, i) \
# define qatomic_set_mb(ptr, i) \
({ qatomic_store_release(ptr, i); smp_mb(); })
#endif

Expand Down
2 changes: 1 addition & 1 deletion monitor/qmp.c
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ static QMPRequest *monitor_qmp_dispatcher_pop_any(void)
*
* Clear qmp_dispatcher_co_busy before reading request.
*/
qatomic_mb_set(&qmp_dispatcher_co_busy, false);
qatomic_set_mb(&qmp_dispatcher_co_busy, false);

WITH_QEMU_LOCK_GUARD(&monitor_lock) {
QMPRequest *req_obj;
Expand Down
2 changes: 1 addition & 1 deletion softmmu/cpus.c
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ static void qemu_cpu_stop(CPUState *cpu, bool exit)

void qemu_wait_io_event_common(CPUState *cpu)
{
qatomic_mb_set(&cpu->thread_kicked, false);
qatomic_set_mb(&cpu->thread_kicked, false);
if (cpu->stop) {
qemu_cpu_stop(cpu, false);
}
Expand Down
2 changes: 1 addition & 1 deletion softmmu/physmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -3132,7 +3132,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
/* Clear in_use before reading map_client_list. */
qatomic_mb_set(&bounce.in_use, false);
qatomic_set_mb(&bounce.in_use, false);
cpu_notify_map_clients();
}

Expand Down
2 changes: 1 addition & 1 deletion target/arm/hvf/hvf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1229,7 +1229,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
* Use pselect to sleep so that other threads can IPI us while we're
* sleeping.
*/
qatomic_mb_set(&cpu->thread_kicked, false);
qatomic_set_mb(&cpu->thread_kicked, false);
qemu_mutex_unlock_iothread();
pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
qemu_mutex_lock_iothread();
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/test-aio-multithread.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
n = g_test_rand_int_range(0, NUM_CONTEXTS);
schedule_next(n);

qatomic_mb_set(&to_schedule[id], qemu_coroutine_self());
qatomic_set_mb(&to_schedule[id], qemu_coroutine_self());
/* finish_cb can run here. */
qemu_coroutine_yield();
g_assert(to_schedule[id] == NULL);
Expand Down
4 changes: 2 additions & 2 deletions util/qemu-coroutine-lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
push_waiter(mutex, &w);

/*
* Add waiter before reading mutex->handoff. Pairs with qatomic_mb_set
* Add waiter before reading mutex->handoff. Pairs with qatomic_set_mb
* in qemu_co_mutex_unlock.
*/
smp_mb__after_rmw();
Expand Down Expand Up @@ -310,7 +310,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)

our_handoff = mutex->sequence;
/* Set handoff before checking for waiters. */
qatomic_mb_set(&mutex->handoff, our_handoff);
qatomic_set_mb(&mutex->handoff, our_handoff);
if (!has_waiters(mutex)) {
/* The concurrent lock has not added itself yet, so it
* will be able to pick our handoff.
Expand Down

0 comments on commit 0683100

Please sign in to comment.