Skip to content

Commit

Permalink
ovs-atomic: Use raw types, not structs, when locks are required.
Browse files Browse the repository at this point in the history
Until now, the GCC 4+ and pthreads implementations of atomics have used
struct wrappers for their atomic types.  This had the advantage of allowing
a mutex to be wrapped in, in some cases, and of better type-checking by
preventing stray uses of atomic variables other than through one of the
atomic_*() functions or macros.  However, the mutex meant that an
atomic_destroy() function-like macro needed to be used.  The struct wrapper
also made it impossible to define new atomic types that were compatible
with each other without using a typedef.  For example, one could not simply
define a macro like
    #define ATOMIC(TYPE) struct { TYPE value; }
and then have two declarations like:
    ATOMIC(void *) x;
    ATOMIC(void *) y;
and do anything with these objects that require type-compatibility, even
"&x == &y", because the two structs are not compatible.  One can do it
through a typedef:
    typedef ATOMIC(void *) atomic_voidp;
    atomic_voidp x, y;
but that is inconvenient, especially because of the need to invent a name
for the type.

This commit aims to ease the problem by getting rid of the wrapper structs
in the cases where the atomic library used them.  It gets rid of the
mutexes, in the cases where they are still needed, by using a global
array of mutexes instead.

This commit also defines the ATOMIC macro described above and documents
its use in ovs-atomic.h.

Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Andy Zhou <[email protected]>
  • Loading branch information
blp committed Mar 13, 2014
1 parent 7d53f6b commit 1bd2c9e
Show file tree
Hide file tree
Showing 7 changed files with 175 additions and 258 deletions.
3 changes: 2 additions & 1 deletion lib/automake.mk
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,10 @@ lib_libopenvswitch_la_SOURCES = \
lib/ovs-atomic-c11.h \
lib/ovs-atomic-clang.h \
lib/ovs-atomic-flag-gcc4.7+.h \
lib/ovs-atomic-gcc4+.c \
lib/ovs-atomic-gcc4+.h \
lib/ovs-atomic-gcc4.7+.h \
lib/ovs-atomic-locked.c \
lib/ovs-atomic-locked.h \
lib/ovs-atomic-pthreads.c \
lib/ovs-atomic-pthreads.h \
lib/ovs-atomic-types.h \
Expand Down
68 changes: 0 additions & 68 deletions lib/ovs-atomic-gcc4+.c

This file was deleted.

198 changes: 53 additions & 145 deletions lib/ovs-atomic-gcc4+.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Nicira, Inc.
* Copyright (c) 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -19,88 +19,19 @@
#error "This header should only be included indirectly via ovs-atomic.h."
#endif

#include "ovs-atomic-locked.h"
#define OVS_ATOMIC_GCC4P_IMPL 1

#define DEFINE_LOCKLESS_ATOMIC(TYPE, NAME) typedef struct { TYPE value; } NAME
#define ATOMIC(TYPE) TYPE
#include "ovs-atomic-types.h"

#define ATOMIC_BOOL_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(bool, atomic_bool);

#define ATOMIC_CHAR_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(char, atomic_char);
DEFINE_LOCKLESS_ATOMIC(signed char, atomic_schar);
DEFINE_LOCKLESS_ATOMIC(unsigned char, atomic_uchar);

#define ATOMIC_SHORT_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(short, atomic_short);
DEFINE_LOCKLESS_ATOMIC(unsigned short, atomic_ushort);

#define ATOMIC_INT_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(int, atomic_int);
DEFINE_LOCKLESS_ATOMIC(unsigned int, atomic_uint);

#if ULONG_MAX <= UINTPTR_MAX
#define ATOMIC_LONG_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(long, atomic_long);
DEFINE_LOCKLESS_ATOMIC(unsigned long, atomic_ulong);
#elif ULONG_MAX == UINT64_MAX
#define ATOMIC_LONG_LOCK_FREE 0
typedef struct locked_int64 atomic_long;
typedef struct locked_uint64 atomic_ulong;
#else
#error "not implemented"
#endif

#if ULLONG_MAX <= UINTPTR_MAX
#define ATOMIC_LLONG_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(long long, atomic_llong);
DEFINE_LOCKLESS_ATOMIC(unsigned long long, atomic_ullong);
#elif ULLONG_MAX == UINT64_MAX
#define ATOMIC_LLONG_LOCK_FREE 0
typedef struct locked_int64 atomic_llong;
typedef struct locked_uint64 atomic_ullong;
#else
#error "not implemented"
#endif

#if SIZE_MAX <= UINTPTR_MAX
DEFINE_LOCKLESS_ATOMIC(size_t, atomic_size_t);
DEFINE_LOCKLESS_ATOMIC(ptrdiff_t, atomic_ptrdiff_t);
#elif SIZE_MAX == UINT64_MAX
typedef struct locked_uint64 atomic_size_t;
typedef struct locked_int64 atomic_ptrdiff_t;
#else
#error "not implemented"
#endif

#if UINTMAX_MAX <= UINTPTR_MAX
DEFINE_LOCKLESS_ATOMIC(intmax_t, atomic_intmax_t);
DEFINE_LOCKLESS_ATOMIC(uintmax_t, atomic_uintmax_t);
#elif UINTMAX_MAX == UINT64_MAX
typedef struct locked_int64 atomic_intmax_t;
typedef struct locked_uint64 atomic_uintmax_t;
#else
#error "not implemented"
#endif

#define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
#define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
#define ATOMIC_POINTER_LOCK_FREE 2
DEFINE_LOCKLESS_ATOMIC(intptr_t, atomic_intptr_t);
DEFINE_LOCKLESS_ATOMIC(uintptr_t, atomic_uintptr_t);

/* Nonstandard atomic types. */
DEFINE_LOCKLESS_ATOMIC(uint8_t, atomic_uint8_t);
DEFINE_LOCKLESS_ATOMIC(uint16_t, atomic_uint16_t);
DEFINE_LOCKLESS_ATOMIC(uint32_t, atomic_uint32_t);
DEFINE_LOCKLESS_ATOMIC(int8_t, atomic_int8_t);
DEFINE_LOCKLESS_ATOMIC(int16_t, atomic_int16_t);
DEFINE_LOCKLESS_ATOMIC(int32_t, atomic_int32_t);
#if UINT64_MAX <= UINTPTR_MAX
DEFINE_LOCKLESS_ATOMIC(uint64_t, atomic_uint64_t);
DEFINE_LOCKLESS_ATOMIC(int64_t, atomic_int64_t);
#else
typedef struct locked_uint64 atomic_uint64_t;
typedef struct locked_int64 atomic_int64_t;
#endif

typedef enum {
memory_order_relaxed,
Expand All @@ -111,45 +42,10 @@ typedef enum {
memory_order_seq_cst
} memory_order;

/* locked_uint64. */

#define IF_LOCKED_UINT64(OBJECT, THEN, ELSE) \
__builtin_choose_expr( \
__builtin_types_compatible_p(typeof(OBJECT), struct locked_uint64), \
(THEN), (ELSE))
#define AS_LOCKED_UINT64(OBJECT) ((struct locked_uint64 *) (void *) (OBJECT))
#define AS_UINT64(OBJECT) ((uint64_t *) (OBJECT))
struct locked_uint64 {
uint64_t value;
};

uint64_t locked_uint64_load(const struct locked_uint64 *);
void locked_uint64_store(struct locked_uint64 *, uint64_t);
uint64_t locked_uint64_add(struct locked_uint64 *, uint64_t arg);
uint64_t locked_uint64_sub(struct locked_uint64 *, uint64_t arg);
uint64_t locked_uint64_or(struct locked_uint64 *, uint64_t arg);
uint64_t locked_uint64_xor(struct locked_uint64 *, uint64_t arg);
uint64_t locked_uint64_and(struct locked_uint64 *, uint64_t arg);

#define IF_LOCKED_INT64(OBJECT, THEN, ELSE) \
__builtin_choose_expr( \
__builtin_types_compatible_p(typeof(OBJECT), struct locked_int64), \
(THEN), (ELSE))
#define AS_LOCKED_INT64(OBJECT) ((struct locked_int64 *) (void *) (OBJECT))
#define AS_INT64(OBJECT) ((int64_t *) (OBJECT))
struct locked_int64 {
int64_t value;
};
int64_t locked_int64_load(const struct locked_int64 *);
void locked_int64_store(struct locked_int64 *, int64_t);
int64_t locked_int64_add(struct locked_int64 *, int64_t arg);
int64_t locked_int64_sub(struct locked_int64 *, int64_t arg);
int64_t locked_int64_or(struct locked_int64 *, int64_t arg);
int64_t locked_int64_xor(struct locked_int64 *, int64_t arg);
int64_t locked_int64_and(struct locked_int64 *, int64_t arg);
#define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))

#define ATOMIC_VAR_INIT(VALUE) { .value = (VALUE) }
#define atomic_init(OBJECT, VALUE) ((OBJECT)->value = (VALUE), (void) 0)
#define ATOMIC_VAR_INIT(VALUE) VALUE
#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
#define atomic_destroy(OBJECT) ((void) (OBJECT))

static inline void
Expand All @@ -176,44 +72,56 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
}
}

#define ATOMIC_SWITCH(OBJECT, LOCKLESS_CASE, \
LOCKED_UINT64_CASE, LOCKED_INT64_CASE) \
IF_LOCKED_UINT64(OBJECT, LOCKED_UINT64_CASE, \
IF_LOCKED_INT64(OBJECT, LOCKED_INT64_CASE, \
LOCKLESS_CASE))

#define atomic_is_lock_free(OBJ) \
((void) (OBJ)->value, \
ATOMIC_SWITCH(OBJ, true, false, false))
((void) *(OBJ), \
IF_LOCKLESS_ATOMIC(OBJ, true, false))

#define atomic_store(DST, SRC) \
atomic_store_explicit(DST, SRC, memory_order_seq_cst)
#define atomic_store_explicit(DST, SRC, ORDER) \
(ATOMIC_SWITCH(DST, \
(atomic_thread_fence(ORDER), \
(DST)->value = (SRC), \
atomic_thread_fence_if_seq_cst(ORDER)), \
locked_uint64_store(AS_LOCKED_UINT64(DST), SRC), \
locked_int64_store(AS_LOCKED_INT64(DST), SRC)), \
(void) 0)

#define atomic_store_explicit(DST, SRC, ORDER) \
({ \
typeof(DST) dst__ = (DST); \
typeof(SRC) src__ = (SRC); \
memory_order order__ = (ORDER); \
\
if (IS_LOCKLESS_ATOMIC(*dst__)) { \
atomic_thread_fence(order__); \
*dst__ = src__; \
atomic_thread_fence_if_seq_cst(order__); \
} else { \
atomic_store_locked(DST, SRC); \
} \
(void) 0; \
})
#define atomic_read(SRC, DST) \
atomic_read_explicit(SRC, DST, memory_order_seq_cst)
#define atomic_read_explicit(SRC, DST, ORDER) \
(ATOMIC_SWITCH(SRC, \
(atomic_thread_fence_if_seq_cst(ORDER), \
*(DST) = (SRC)->value, \
atomic_thread_fence(ORDER)), \
*(DST) = locked_uint64_load(AS_LOCKED_UINT64(SRC)), \
*(DST) = locked_int64_load(AS_LOCKED_INT64(SRC))), \
(void) 0)

#define atomic_op__(RMW, OP, ARG, ORIG) \
(ATOMIC_SWITCH(RMW, \
*(ORIG) = __sync_fetch_and_##OP(&(RMW)->value, ARG), \
*(ORIG) = locked_uint64_##OP(AS_LOCKED_UINT64(RMW), ARG), \
*(ORIG) = locked_int64_##OP(AS_LOCKED_INT64(RMW), ARG)), \
(void) 0)
#define atomic_read_explicit(SRC, DST, ORDER) \
({ \
typeof(DST) dst__ = (DST); \
typeof(SRC) src__ = (SRC); \
memory_order order__ = (ORDER); \
\
if (IS_LOCKLESS_ATOMIC(*src__)) { \
atomic_thread_fence_if_seq_cst(order__); \
*dst__ = *src__; \
} else { \
atomic_read_locked(SRC, DST); \
} \
(void) 0; \
})

#define atomic_op__(RMW, OP, ARG, ORIG) \
({ \
typeof(RMW) rmw__ = (RMW); \
typeof(ARG) arg__ = (ARG); \
typeof(ORIG) orig__ = (ORIG); \
\
if (IS_LOCKLESS_ATOMIC(*rmw__)) { \
*orig__ = __sync_fetch_and_##OP(rmw__, arg__); \
} else { \
atomic_op_locked(RMW, OP, ARG, ORIG); \
} \
})

#define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
#define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
Expand Down
58 changes: 58 additions & 0 deletions lib/ovs-atomic-locked.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Copyright (c) 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <config.h>

#include "ovs-atomic.h"
#include "hash.h"
#include "ovs-thread.h"

#ifdef OVS_ATOMIC_LOCKED_IMPL
static struct ovs_mutex *
mutex_for_pointer(void *p)
{
OVS_ALIGNED_STRUCT(CACHE_LINE_SIZE, aligned_mutex) {
struct ovs_mutex mutex;
char pad[PAD_SIZE(sizeof(struct ovs_mutex), CACHE_LINE_SIZE)];
};

static struct aligned_mutex atomic_mutexes[] = {
#define MUTEX_INIT { .mutex = OVS_MUTEX_INITIALIZER }
#define MUTEX_INIT4 MUTEX_INIT, MUTEX_INIT, MUTEX_INIT, MUTEX_INIT
#define MUTEX_INIT16 MUTEX_INIT4, MUTEX_INIT4, MUTEX_INIT4, MUTEX_INIT4
MUTEX_INIT16, MUTEX_INIT16,
};
BUILD_ASSERT_DECL(IS_POW2(ARRAY_SIZE(atomic_mutexes)));

uint32_t hash = hash_pointer(p, 0);
uint32_t indx = hash & (ARRAY_SIZE(atomic_mutexes) - 1);
return &atomic_mutexes[indx].mutex;
}

void
atomic_lock__(void *p)
OVS_ACQUIRES(mutex_for_pointer(p))
{
ovs_mutex_lock(mutex_for_pointer(p));
}

void
atomic_unlock__(void *p)
OVS_RELEASES(mutex_for_pointer(p))
{
ovs_mutex_unlock(mutex_for_pointer(p));
}
#endif /* OVS_ATOMIC_LOCKED_IMPL */
Loading

0 comments on commit 1bd2c9e

Please sign in to comment.