mirror of
https://github.com/isar/libmdbx.git
synced 2024-10-30 11:29:19 +08:00
mdbx: fix non-alloy build for windows.
Change-Id: Id93eb069543f081c3b1c9734ed65f4afd80a444d
This commit is contained in:
parent
4703dac49d
commit
b7110eb4d4
151
src/core.c
151
src/core.c
@ -823,157 +823,6 @@ size_t __hot mdbx_e2k_strnlen_bug_workaround(const char *s, size_t maxlen) {
|
||||
/*------------------------------------------------------------------------------
|
||||
* safe read/write volatile 64-bit fields on 32-bit architectures. */
|
||||
|
||||
enum MDBX_memory_order {
|
||||
mo_Relaxed,
|
||||
mo_AcquireRelease,
|
||||
mo_SequentialConsistency
|
||||
};
|
||||
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
|
||||
/* Crutches for C11 atomic compiler's bugs */
|
||||
#if defined(__e2k__) && defined(__LCC__) && __LCC__ < /* FIXME */ 127
|
||||
#define MDBX_c11a_ro(type, ptr) (&(ptr)->weak)
|
||||
#define MDBX_c11a_rw(type, ptr) (&(ptr)->weak)
|
||||
#elif defined(__clang__) && __clang__ < 8
|
||||
#define MDBX_c11a_ro(type, ptr) ((volatile _Atomic(type) *)&(ptr)->c11a)
|
||||
#define MDBX_c11a_rw(type, ptr) (&(ptr)->c11a)
|
||||
#else
|
||||
#define MDBX_c11a_ro(type, ptr) (&(ptr)->c11a)
|
||||
#define MDBX_c11a_rw(type, ptr) (&(ptr)->c11a)
|
||||
#endif /* Crutches for C11 atomic compiler's bugs */
|
||||
|
||||
static __always_inline memory_order mo_c11_store(enum MDBX_memory_order fence) {
|
||||
switch (fence) {
|
||||
default:
|
||||
assert(false);
|
||||
__unreachable();
|
||||
case mo_Relaxed:
|
||||
return memory_order_relaxed;
|
||||
case mo_AcquireRelease:
|
||||
return memory_order_release;
|
||||
case mo_SequentialConsistency:
|
||||
return memory_order_seq_cst;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline memory_order mo_c11_load(enum MDBX_memory_order fence) {
|
||||
switch (fence) {
|
||||
default:
|
||||
assert(false);
|
||||
__unreachable();
|
||||
case mo_Relaxed:
|
||||
return memory_order_relaxed;
|
||||
case mo_AcquireRelease:
|
||||
return memory_order_acquire;
|
||||
case mo_SequentialConsistency:
|
||||
return memory_order_seq_cst;
|
||||
}
|
||||
}
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
|
||||
static __maybe_unused __always_inline void
|
||||
mdbx_memory_fence(enum MDBX_memory_order order, bool write) {
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
atomic_thread_fence(write ? mo_c11_store(order) : mo_c11_load(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
mdbx_compiler_barrier();
|
||||
if (write &&
|
||||
order > (MDBX_CPU_WRITEBACK_INCOHERENT ? mo_Relaxed : mo_AcquireRelease))
|
||||
mdbx_memory_barrier();
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
}
|
||||
|
||||
static __maybe_unused __always_inline uint32_t
|
||||
atomic_store32(MDBX_atomic_uint32_t *p, const uint32_t value,
|
||||
enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint32_t) == 4);
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint32_t, p), value, mo_c11_store(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
p->weak = value;
|
||||
mdbx_memory_fence(order, true);
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
return value;
|
||||
}
|
||||
|
||||
static __maybe_unused __always_inline uint32_t
|
||||
atomic_load32(const MDBX_atomic_uint32_t *p, enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint32_t) == 4);
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_ro(uint32_t, p)));
|
||||
return atomic_load_explicit(MDBX_c11a_ro(uint32_t, p), mo_c11_load(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
mdbx_memory_fence(order, false);
|
||||
const uint32_t value = p->weak;
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
return value;
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
}
|
||||
|
||||
static __always_inline uint64_t atomic_store64(MDBX_atomic_uint64_t *p,
|
||||
const uint64_t value,
|
||||
enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint64_t) == 8);
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint64_t, p)));
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint64_t, p), value, mo_c11_store(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
p->weak = value;
|
||||
mdbx_memory_fence(order, true);
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
#else /* !MDBX_64BIT_ATOMIC */
|
||||
mdbx_compiler_barrier();
|
||||
atomic_store32(&p->low, (uint32_t)value, mo_Relaxed);
|
||||
mdbx_jitter4testing(true);
|
||||
atomic_store32(&p->high, (uint32_t)(value >> 32), order);
|
||||
mdbx_jitter4testing(true);
|
||||
#endif /* !MDBX_64BIT_ATOMIC */
|
||||
return value;
|
||||
}
|
||||
|
||||
static __always_inline uint64_t atomic_load64(const MDBX_atomic_uint64_t *p,
|
||||
enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint64_t) == 8);
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_ro(uint64_t, p)));
|
||||
return atomic_load_explicit(MDBX_c11a_ro(uint64_t, p), mo_c11_load(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
mdbx_memory_fence(order, false);
|
||||
const uint64_t value = p->weak;
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
return value;
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
#else /* !MDBX_64BIT_ATOMIC */
|
||||
mdbx_compiler_barrier();
|
||||
uint64_t value = (uint64_t)atomic_load32(&p->high, order) << 32;
|
||||
mdbx_jitter4testing(true);
|
||||
value |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed
|
||||
: mo_AcquireRelease);
|
||||
mdbx_jitter4testing(true);
|
||||
for (;;) {
|
||||
mdbx_compiler_barrier();
|
||||
uint64_t again = (uint64_t)atomic_load32(&p->high, order) << 32;
|
||||
mdbx_jitter4testing(true);
|
||||
again |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed
|
||||
: mo_AcquireRelease);
|
||||
mdbx_jitter4testing(true);
|
||||
if (likely(value == again))
|
||||
return value;
|
||||
value = again;
|
||||
}
|
||||
#endif /* !MDBX_64BIT_ATOMIC */
|
||||
}
|
||||
|
||||
static __always_inline void atomic_yield(void) {
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
YieldProcessor();
|
||||
|
162
src/internals.h
162
src/internals.h
@ -184,7 +184,13 @@ extern LIBMDBX_API const char *const mdbx_sourcery_anchor;
|
||||
#include "options.h"
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
/* Basic constants and types */
|
||||
/* Atomics */
|
||||
|
||||
enum MDBX_memory_order {
|
||||
mo_Relaxed,
|
||||
mo_AcquireRelease,
|
||||
mo_SequentialConsistency
|
||||
};
|
||||
|
||||
typedef union {
|
||||
volatile uint32_t weak;
|
||||
@ -211,6 +217,160 @@ typedef union {
|
||||
#endif
|
||||
} MDBX_atomic_uint64_t;
|
||||
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
|
||||
/* Crutches for C11 atomic compiler's bugs */
|
||||
#if defined(__e2k__) && defined(__LCC__) && __LCC__ < /* FIXME */ 127
|
||||
#define MDBX_c11a_ro(type, ptr) (&(ptr)->weak)
|
||||
#define MDBX_c11a_rw(type, ptr) (&(ptr)->weak)
|
||||
#elif defined(__clang__) && __clang__ < 8
|
||||
#define MDBX_c11a_ro(type, ptr) ((volatile _Atomic(type) *)&(ptr)->c11a)
|
||||
#define MDBX_c11a_rw(type, ptr) (&(ptr)->c11a)
|
||||
#else
|
||||
#define MDBX_c11a_ro(type, ptr) (&(ptr)->c11a)
|
||||
#define MDBX_c11a_rw(type, ptr) (&(ptr)->c11a)
|
||||
#endif /* Crutches for C11 atomic compiler's bugs */
|
||||
|
||||
static __always_inline memory_order mo_c11_store(enum MDBX_memory_order fence) {
|
||||
switch (fence) {
|
||||
default:
|
||||
assert(false);
|
||||
__unreachable();
|
||||
case mo_Relaxed:
|
||||
return memory_order_relaxed;
|
||||
case mo_AcquireRelease:
|
||||
return memory_order_release;
|
||||
case mo_SequentialConsistency:
|
||||
return memory_order_seq_cst;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline memory_order mo_c11_load(enum MDBX_memory_order fence) {
|
||||
switch (fence) {
|
||||
default:
|
||||
assert(false);
|
||||
__unreachable();
|
||||
case mo_Relaxed:
|
||||
return memory_order_relaxed;
|
||||
case mo_AcquireRelease:
|
||||
return memory_order_acquire;
|
||||
case mo_SequentialConsistency:
|
||||
return memory_order_seq_cst;
|
||||
}
|
||||
}
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
|
||||
#ifndef __cplusplus
|
||||
|
||||
static __inline void mdbx_jitter4testing(bool tiny);
|
||||
|
||||
static __maybe_unused __always_inline void
|
||||
mdbx_memory_fence(enum MDBX_memory_order order, bool write) {
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
atomic_thread_fence(write ? mo_c11_store(order) : mo_c11_load(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
mdbx_compiler_barrier();
|
||||
if (write &&
|
||||
order > (MDBX_CPU_WRITEBACK_INCOHERENT ? mo_Relaxed : mo_AcquireRelease))
|
||||
mdbx_memory_barrier();
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
}
|
||||
|
||||
static __maybe_unused __always_inline uint32_t
|
||||
atomic_store32(MDBX_atomic_uint32_t *p, const uint32_t value,
|
||||
enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint32_t) == 4);
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint32_t, p), value, mo_c11_store(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
p->weak = value;
|
||||
mdbx_memory_fence(order, true);
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
return value;
|
||||
}
|
||||
|
||||
static __maybe_unused __always_inline uint32_t
|
||||
atomic_load32(const MDBX_atomic_uint32_t *p, enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint32_t) == 4);
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_ro(uint32_t, p)));
|
||||
return atomic_load_explicit(MDBX_c11a_ro(uint32_t, p), mo_c11_load(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
mdbx_memory_fence(order, false);
|
||||
const uint32_t value = p->weak;
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
return value;
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
}
|
||||
|
||||
static __maybe_unused __always_inline uint64_t
|
||||
atomic_store64(MDBX_atomic_uint64_t *p, const uint64_t value,
|
||||
enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint64_t) == 8);
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_rw(uint64_t, p)));
|
||||
atomic_store_explicit(MDBX_c11a_rw(uint64_t, p), value, mo_c11_store(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
p->weak = value;
|
||||
mdbx_memory_fence(order, true);
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
#else /* !MDBX_64BIT_ATOMIC */
|
||||
mdbx_compiler_barrier();
|
||||
atomic_store32(&p->low, (uint32_t)value, mo_Relaxed);
|
||||
mdbx_jitter4testing(true);
|
||||
atomic_store32(&p->high, (uint32_t)(value >> 32), order);
|
||||
mdbx_jitter4testing(true);
|
||||
#endif /* !MDBX_64BIT_ATOMIC */
|
||||
return value;
|
||||
}
|
||||
|
||||
static __maybe_unused __always_inline uint64_t
|
||||
atomic_load64(const MDBX_atomic_uint64_t *p, enum MDBX_memory_order order) {
|
||||
STATIC_ASSERT(sizeof(MDBX_atomic_uint64_t) == 8);
|
||||
#if MDBX_64BIT_ATOMIC
|
||||
#ifdef MDBX_HAVE_C11ATOMICS
|
||||
assert(atomic_is_lock_free(MDBX_c11a_ro(uint64_t, p)));
|
||||
return atomic_load_explicit(MDBX_c11a_ro(uint64_t, p), mo_c11_load(order));
|
||||
#else /* MDBX_HAVE_C11ATOMICS */
|
||||
mdbx_memory_fence(order, false);
|
||||
const uint64_t value = p->weak;
|
||||
if (order != mo_Relaxed)
|
||||
mdbx_compiler_barrier();
|
||||
return value;
|
||||
#endif /* MDBX_HAVE_C11ATOMICS */
|
||||
#else /* !MDBX_64BIT_ATOMIC */
|
||||
mdbx_compiler_barrier();
|
||||
uint64_t value = (uint64_t)atomic_load32(&p->high, order) << 32;
|
||||
mdbx_jitter4testing(true);
|
||||
value |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed
|
||||
: mo_AcquireRelease);
|
||||
mdbx_jitter4testing(true);
|
||||
for (;;) {
|
||||
mdbx_compiler_barrier();
|
||||
uint64_t again = (uint64_t)atomic_load32(&p->high, order) << 32;
|
||||
mdbx_jitter4testing(true);
|
||||
again |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed
|
||||
: mo_AcquireRelease);
|
||||
mdbx_jitter4testing(true);
|
||||
if (likely(value == again))
|
||||
return value;
|
||||
value = again;
|
||||
}
|
||||
#endif /* !MDBX_64BIT_ATOMIC */
|
||||
}
|
||||
|
||||
#endif /* !__cplusplus */
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
/* Basic constants and types */
|
||||
|
||||
/* A stamp that identifies a file as an MDBX file.
|
||||
* There's nothing special about this value other than that it is easily
|
||||
* recognizable, and it will reflect any byte order mismatches. */
|
||||
|
Loading…
Reference in New Issue
Block a user