mirror of
https://github.com/isar/libmdbx.git
synced 2025-09-16 15:42:19 +08:00
mdbx: новые настройки clang-format (косметика).
This commit is contained in:
@@ -8,43 +8,36 @@
|
||||
#ifndef __cplusplus
|
||||
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
#define osal_memory_fence(order, write) \
|
||||
atomic_thread_fence((write) ? mo_c11_store(order) : mo_c11_load(order))
|
||||
#define osal_memory_fence(order, write) atomic_thread_fence((write) ? mo_c11_store(order) : mo_c11_load(order))
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
#define osal_memory_fence(order, write) \
|
||||
do { \
|
||||
osal_compiler_barrier(); \
|
||||
if (write && order > (MDBX_CPU_WRITEBACK_INCOHERENT ? mo_Relaxed \
|
||||
: mo_AcquireRelease)) \
|
||||
osal_memory_barrier(); \
|
||||
#define osal_memory_fence(order, write) \
|
||||
do { \
|
||||
osal_compiler_barrier(); \
|
||||
if (write && order > (MDBX_CPU_WRITEBACK_INCOHERENT ? mo_Relaxed : mo_AcquireRelease)) \
|
||||
osal_memory_barrier(); \
|
||||
} while (0)
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
|
||||
#if defined(MDBX_HAVE_C11ATOMICS) && defined(__LCC__)
|
||||
#define atomic_store32(p, value, order) \
|
||||
({ \
|
||||
const uint32_t value_to_store = (value); \
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint32_t, p), value_to_store, \
|
||||
mo_c11_store(order)); \
|
||||
value_to_store; \
|
||||
#define atomic_store32(p, value, order) \
|
||||
({ \
|
||||
const uint32_t value_to_store = (value); \
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint32_t, p), value_to_store, mo_c11_store(order)); \
|
||||
value_to_store; \
|
||||
})
|
||||
#define atomic_load32(p, order) \
|
||||
atomic_load_explicit(MDBX_c11a_ro(uint32_t, p), mo_c11_load(order))
|
||||
#define atomic_store64(p, value, order) \
|
||||
({ \
|
||||
const uint64_t value_to_store = (value); \
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint64_t, p), value_to_store, \
|
||||
mo_c11_store(order)); \
|
||||
value_to_store; \
|
||||
#define atomic_load32(p, order) atomic_load_explicit(MDBX_c11a_ro(uint32_t, p), mo_c11_load(order))
|
||||
#define atomic_store64(p, value, order) \
|
||||
({ \
|
||||
const uint64_t value_to_store = (value); \
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint64_t, p), value_to_store, mo_c11_store(order)); \
|
||||
value_to_store; \
|
||||
})
|
||||
#define atomic_load64(p, order) \
|
||||
atomic_load_explicit(MDBX_c11a_ro(uint64_t, p), mo_c11_load(order))
|
||||
#define atomic_load64(p, order) atomic_load_explicit(MDBX_c11a_ro(uint64_t, p), mo_c11_load(order))
|
||||
#endif /* LCC && MDBX_HAVE_C11ATOMICS */
|
||||
|
||||
#ifndef atomic_store32
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint32_t
|
||||
atomic_store32(mdbx_atomic_uint32_t *p, const uint32_t value,
|
||||
enum mdbx_memory_order order) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_store32(mdbx_atomic_uint32_t *p, const uint32_t value,
|
||||
enum mdbx_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(mdbx_atomic_uint32_t) == 4);
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
|
||||
@@ -60,8 +53,8 @@ atomic_store32(mdbx_atomic_uint32_t *p, const uint32_t value,
|
||||
#endif /* atomic_store32 */
|
||||
|
||||
#ifndef atomic_load32
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_load32(
|
||||
const volatile mdbx_atomic_uint32_t *p, enum mdbx_memory_order order) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_load32(const volatile mdbx_atomic_uint32_t *p,
|
||||
enum mdbx_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(mdbx_atomic_uint32_t) == 4);
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_ro(uint32_t, p)));
|
||||
@@ -90,9 +83,8 @@ MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_load32(
|
||||
#endif /* xMDBX_TXNID_STEP */
|
||||
|
||||
#ifndef atomic_store64
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint64_t
|
||||
atomic_store64(mdbx_atomic_uint64_t *p, const uint64_t value,
|
||||
enum mdbx_memory_order order) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint64_t atomic_store64(mdbx_atomic_uint64_t *p, const uint64_t value,
|
||||
enum mdbx_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(mdbx_atomic_uint64_t) == 8);
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
#if __GNUC_PREREQ(11, 0)
|
||||
@@ -124,8 +116,7 @@ MDBX_MAYBE_UNUSED static
|
||||
__always_inline
|
||||
#endif /* MDBX_64BIT_ATOMIC */
|
||||
uint64_t
|
||||
atomic_load64(const volatile mdbx_atomic_uint64_t *p,
|
||||
enum mdbx_memory_order order) {
|
||||
atomic_load64(const volatile mdbx_atomic_uint64_t *p, enum mdbx_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(mdbx_atomic_uint64_t) == 8);
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
@@ -142,15 +133,13 @@ MDBX_MAYBE_UNUSED static
|
||||
osal_compiler_barrier();
|
||||
uint64_t value = (uint64_t)atomic_load32(&p->high, order) << 32;
|
||||
jitter4testing(true);
|
||||
value |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed
|
||||
: mo_AcquireRelease);
|
||||
value |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed : mo_AcquireRelease);
|
||||
jitter4testing(true);
|
||||
for (;;) {
|
||||
osal_compiler_barrier();
|
||||
uint64_t again = (uint64_t)atomic_load32(&p->high, order) << 32;
|
||||
jitter4testing(true);
|
||||
again |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed
|
||||
: mo_AcquireRelease);
|
||||
again |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed : mo_AcquireRelease);
|
||||
jitter4testing(true);
|
||||
if (likely(value == again))
|
||||
return value;
|
||||
@@ -171,19 +160,16 @@ MDBX_MAYBE_UNUSED static __always_inline void atomic_yield(void) {
|
||||
#else
|
||||
__asm__ __volatile__("hint @pause");
|
||||
#endif
|
||||
#elif defined(__aarch64__) || (defined(__ARM_ARCH) && __ARM_ARCH > 6) || \
|
||||
defined(__ARM_ARCH_6K__)
|
||||
#elif defined(__aarch64__) || (defined(__ARM_ARCH) && __ARM_ARCH > 6) || defined(__ARM_ARCH_6K__)
|
||||
#ifdef __CC_ARM
|
||||
__yield();
|
||||
#else
|
||||
__asm__ __volatile__("yield");
|
||||
#endif
|
||||
#elif (defined(__mips64) || defined(__mips64__)) && defined(__mips_isa_rev) && \
|
||||
__mips_isa_rev >= 2
|
||||
#elif (defined(__mips64) || defined(__mips64__)) && defined(__mips_isa_rev) && __mips_isa_rev >= 2
|
||||
__asm__ __volatile__("pause");
|
||||
#elif defined(__mips) || defined(__mips__) || defined(__mips64) || \
|
||||
defined(__mips64__) || defined(_M_MRX000) || defined(_MIPS_) || \
|
||||
defined(__MWERKS__) || defined(__sgi)
|
||||
#elif defined(__mips) || defined(__mips__) || defined(__mips64) || defined(__mips64__) || defined(_M_MRX000) || \
|
||||
defined(_MIPS_) || defined(__MWERKS__) || defined(__sgi)
|
||||
__asm__ __volatile__(".word 0x00000140");
|
||||
#elif defined(__linux__) || defined(__gnu_linux__) || defined(_UNIX03_SOURCE)
|
||||
sched_yield();
|
||||
@@ -193,8 +179,7 @@ MDBX_MAYBE_UNUSED static __always_inline void atomic_yield(void) {
|
||||
}
|
||||
|
||||
#if MDBX_64BIT_CAS
|
||||
MDBX_MAYBE_UNUSED static __always_inline bool
|
||||
atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline bool atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
STATIC_ASSERT(sizeof(long long) >= sizeof(uint64_t));
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint64_t, p)));
|
||||
@@ -202,8 +187,7 @@ atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
return __sync_bool_compare_and_swap(&p->weak, c, v);
|
||||
#elif defined(_MSC_VER)
|
||||
return c == (uint64_t)_InterlockedCompareExchange64(
|
||||
(volatile __int64 *)&p->weak, v, c);
|
||||
return c == (uint64_t)_InterlockedCompareExchange64((volatile __int64 *)&p->weak, v, c);
|
||||
#elif defined(__APPLE__)
|
||||
return OSAtomicCompareAndSwap64Barrier(c, v, &p->weak);
|
||||
#else
|
||||
@@ -212,8 +196,7 @@ atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
|
||||
}
|
||||
#endif /* MDBX_64BIT_CAS */
|
||||
|
||||
MDBX_MAYBE_UNUSED static __always_inline bool
|
||||
atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline bool atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
STATIC_ASSERT(sizeof(int) >= sizeof(uint32_t));
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
|
||||
@@ -222,8 +205,7 @@ atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
|
||||
return __sync_bool_compare_and_swap(&p->weak, c, v);
|
||||
#elif defined(_MSC_VER)
|
||||
STATIC_ASSERT(sizeof(volatile long) == sizeof(volatile uint32_t));
|
||||
return c ==
|
||||
(uint32_t)_InterlockedCompareExchange((volatile long *)&p->weak, v, c);
|
||||
return c == (uint32_t)_InterlockedCompareExchange((volatile long *)&p->weak, v, c);
|
||||
#elif defined(__APPLE__)
|
||||
return OSAtomicCompareAndSwap32Barrier(c, v, &p->weak);
|
||||
#else
|
||||
@@ -231,8 +213,7 @@ atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
|
||||
#endif
|
||||
}
|
||||
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint32_t
|
||||
atomic_add32(mdbx_atomic_uint32_t *p, uint32_t v) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_add32(mdbx_atomic_uint32_t *p, uint32_t v) {
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
STATIC_ASSERT(sizeof(int) >= sizeof(uint32_t));
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
|
||||
@@ -251,8 +232,7 @@ atomic_add32(mdbx_atomic_uint32_t *p, uint32_t v) {
|
||||
|
||||
#define atomic_sub32(p, v) atomic_add32(p, 0 - (v))
|
||||
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint64_t
|
||||
safe64_txnid_next(uint64_t txnid) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint64_t safe64_txnid_next(uint64_t txnid) {
|
||||
txnid += xMDBX_TXNID_STEP;
|
||||
#if !MDBX_64BIT_CAS
|
||||
/* avoid overflow of low-part in safe64_reset() */
|
||||
@@ -262,8 +242,7 @@ safe64_txnid_next(uint64_t txnid) {
|
||||
}
|
||||
|
||||
/* Atomically make target value >= SAFE64_INVALID_THRESHOLD */
|
||||
MDBX_MAYBE_UNUSED static __always_inline void
|
||||
safe64_reset(mdbx_atomic_uint64_t *p, bool single_writer) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline void safe64_reset(mdbx_atomic_uint64_t *p, bool single_writer) {
|
||||
if (single_writer) {
|
||||
#if MDBX_64BIT_ATOMIC && MDBX_WORDBITS >= 64
|
||||
atomic_store64(p, UINT64_MAX, mo_AcquireRelease);
|
||||
@@ -290,8 +269,7 @@ safe64_reset(mdbx_atomic_uint64_t *p, bool single_writer) {
|
||||
jitter4testing(true);
|
||||
}
|
||||
|
||||
MDBX_MAYBE_UNUSED static __always_inline bool
|
||||
safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline bool safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
|
||||
/* LY: This function is used to reset `txnid` from hsr-handler in case
|
||||
* the asynchronously cancellation of read transaction. Therefore,
|
||||
* there may be a collision between the cleanup performed here and
|
||||
@@ -307,8 +285,7 @@ safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
|
||||
bool rc = false;
|
||||
if (likely(atomic_load32(&p->low, mo_AcquireRelease) == (uint32_t)compare &&
|
||||
atomic_cas32(&p->high, (uint32_t)(compare >> 32), UINT32_MAX))) {
|
||||
if (unlikely(atomic_load32(&p->low, mo_AcquireRelease) !=
|
||||
(uint32_t)compare))
|
||||
if (unlikely(atomic_load32(&p->low, mo_AcquireRelease) != (uint32_t)compare))
|
||||
atomic_cas32(&p->high, UINT32_MAX, (uint32_t)(compare >> 32));
|
||||
else
|
||||
rc = true;
|
||||
@@ -318,8 +295,7 @@ safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
MDBX_MAYBE_UNUSED static __always_inline void
|
||||
safe64_write(mdbx_atomic_uint64_t *p, const uint64_t v) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline void safe64_write(mdbx_atomic_uint64_t *p, const uint64_t v) {
|
||||
assert(p->weak >= SAFE64_INVALID_THRESHOLD);
|
||||
#if MDBX_64BIT_ATOMIC && MDBX_64BIT_CAS
|
||||
atomic_store64(p, v, mo_AcquireRelease);
|
||||
@@ -336,8 +312,7 @@ safe64_write(mdbx_atomic_uint64_t *p, const uint64_t v) {
|
||||
jitter4testing(true);
|
||||
}
|
||||
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint64_t
|
||||
safe64_read(const mdbx_atomic_uint64_t *p) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline uint64_t safe64_read(const mdbx_atomic_uint64_t *p) {
|
||||
jitter4testing(true);
|
||||
uint64_t v;
|
||||
do
|
||||
@@ -366,8 +341,7 @@ MDBX_MAYBE_UNUSED static __always_inline bool
|
||||
#endif /* unused for now */
|
||||
|
||||
/* non-atomic write with safety for reading a half-updated value */
|
||||
MDBX_MAYBE_UNUSED static __always_inline void
|
||||
safe64_update(mdbx_atomic_uint64_t *p, const uint64_t v) {
|
||||
MDBX_MAYBE_UNUSED static __always_inline void safe64_update(mdbx_atomic_uint64_t *p, const uint64_t v) {
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
atomic_store64(p, v, mo_Relaxed);
|
||||
#else
|
||||
|
Reference in New Issue
Block a user