mdbx: rework functions for meta-pages, split-off non-volatile, more const.

This commit is contained in:
Леонид Юрьев (Leonid Yuriev) 2022-02-09 18:10:08 +03:00
parent ef7b4289c0
commit 2b6fd968d2
3 changed files with 217 additions and 214 deletions

View File

@ -222,7 +222,7 @@ MDBX_NOTHROW_PURE_FUNCTION static __always_inline uint64_t unaligned_peek_u64(
static __always_inline uint64_t
unaligned_peek_u64_volatile(const unsigned expected_alignment,
const volatile void *const __restrict ptr) {
volatile const void *const __restrict ptr) {
assert((uintptr_t)ptr % expected_alignment == 0);
assert(expected_alignment % sizeof(uint32_t) == 0);
if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(uint64_t)) == 0)
@ -230,11 +230,11 @@ unaligned_peek_u64_volatile(const unsigned expected_alignment,
else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \
defined(_M_X64) || defined(_M_IA64)
return *(const volatile __unaligned uint64_t *)ptr;
return *(volatile const __unaligned uint64_t *)ptr;
#else
const uint32_t lo = ((const volatile uint32_t *)
const uint32_t lo = ((volatile const uint32_t *)
ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__];
const uint32_t hi = ((const volatile uint32_t *)
const uint32_t hi = ((volatile const uint32_t *)
ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
return lo | (uint64_t)hi << 32;
#endif /* _MSC_VER || __unaligned */
@ -5538,7 +5538,7 @@ static int mdbx_cursor_spill(MDBX_cursor *mc, const MDBX_val *key,
/*----------------------------------------------------------------------------*/
static __always_inline bool meta_bootid_match(const MDBX_meta *meta) {
static bool meta_bootid_match(const MDBX_meta *meta) {
return memcmp(&meta->mm_bootid, &bootid, 16) == 0 &&
(bootid.x | bootid.y) != 0;
}
@ -5554,18 +5554,18 @@ static bool meta_weak_acceptable(const MDBX_env *env, const MDBX_meta *meta,
#define METAPAGE(env, n) page_meta(pgno2page(env, n))
#define METAPAGE_END(env) METAPAGE(env, NUM_METAS)
static __inline txnid_t mdbx_meta_txnid_stable(const MDBX_env *env,
const MDBX_meta *meta) {
MDBX_NOTHROW_PURE_FUNCTION static __inline txnid_t
constmeta_txnid(const MDBX_env *env, const MDBX_meta *meta) {
mdbx_memory_fence(mo_AcquireRelease, false);
txnid_t a = unaligned_peek_u64(4, &meta->mm_txnid_a);
txnid_t b = unaligned_peek_u64(4, &meta->mm_txnid_b);
mdbx_assert(env, a == b);
(void)env;
return a;
return (a == b) ? a : 0;
}
static __inline txnid_t mdbx_meta_txnid_fluid(const MDBX_env *env,
volatile const MDBX_meta *meta) {
static __inline txnid_t meta_txnid(const MDBX_env *env,
volatile const MDBX_meta *meta) {
(void)env;
mdbx_memory_fence(mo_AcquireRelease, false);
txnid_t a = unaligned_peek_u64_volatile(4, &meta->mm_txnid_a);
@ -5573,8 +5573,8 @@ static __inline txnid_t mdbx_meta_txnid_fluid(const MDBX_env *env,
return (a == b) ? a : 0;
}
static __inline void mdbx_meta_update_begin(const MDBX_env *env,
MDBX_meta *meta, txnid_t txnid) {
static __inline void meta_update_begin(const MDBX_env *env, MDBX_meta *meta,
txnid_t txnid) {
mdbx_assert(env, meta >= METAPAGE(env, 0) && meta < METAPAGE_END(env));
mdbx_assert(env, unaligned_peek_u64(4, meta->mm_txnid_a) < txnid &&
unaligned_peek_u64(4, meta->mm_txnid_b) < txnid);
@ -5584,8 +5584,8 @@ static __inline void mdbx_meta_update_begin(const MDBX_env *env,
unaligned_poke_u64(4, meta->mm_txnid_a, txnid);
}
static __inline void mdbx_meta_update_end(const MDBX_env *env, MDBX_meta *meta,
txnid_t txnid) {
static __inline void meta_update_end(const MDBX_env *env, MDBX_meta *meta,
txnid_t txnid) {
mdbx_assert(env, meta >= METAPAGE(env, 0) && meta < METAPAGE_END(env));
mdbx_assert(env, unaligned_peek_u64(4, meta->mm_txnid_a) == txnid);
mdbx_assert(env, unaligned_peek_u64(4, meta->mm_txnid_b) < txnid);
@ -5596,19 +5596,19 @@ static __inline void mdbx_meta_update_end(const MDBX_env *env, MDBX_meta *meta,
mdbx_memory_fence(mo_AcquireRelease, true);
}
static __inline void mdbx_meta_set_txnid(const MDBX_env *env, MDBX_meta *meta,
txnid_t txnid) {
static __inline void meta_set_txnid(const MDBX_env *env, MDBX_meta *meta,
const txnid_t txnid) {
mdbx_assert(env, !env->me_map || meta < METAPAGE(env, 0) ||
meta >= METAPAGE_END(env));
(void)env;
/* update inconsistent since this function used ONLY for filling meta-image
/* update inconsistently since this function used ONLY for filling meta-image
* for writing, but not the actual meta-page */
memcpy(&meta->mm_bootid, &bootid, 16);
unaligned_poke_u64(4, meta->mm_txnid_a, txnid);
unaligned_poke_u64(4, meta->mm_txnid_b, txnid);
}
static __inline uint64_t mdbx_meta_sign(const MDBX_meta *meta) {
static __inline uint64_t meta_sign(const MDBX_meta *meta) {
uint64_t sign = MDBX_DATASIGN_NONE;
#if 0 /* TODO */
sign = hippeus_hash64(...);
@ -5621,38 +5621,34 @@ static __inline uint64_t mdbx_meta_sign(const MDBX_meta *meta) {
enum meta_choise_mode { prefer_last, prefer_steady };
static __inline bool mdbx_meta_ot(const enum meta_choise_mode mode,
const MDBX_env *env, const MDBX_meta *a,
const MDBX_meta *b) {
static __inline bool meta_ot(const enum meta_choise_mode mode,
const MDBX_env *env, volatile const MDBX_meta *a,
volatile const MDBX_meta *b) {
mdbx_jitter4testing(true);
txnid_t txnid_a = mdbx_meta_txnid_fluid(env, a);
txnid_t txnid_b = mdbx_meta_txnid_fluid(env, b);
const txnid_t txnid_a = meta_txnid(env, a);
mdbx_jitter4testing(true);
const txnid_t txnid_b = meta_txnid(env, b);
mdbx_jitter4testing(true);
const bool is_stead_b = META_IS_STEADY(b);
mdbx_jitter4testing(true);
switch (mode) {
default:
assert(false);
__unreachable();
/* fall through */
__fallthrough;
case prefer_steady:
if (META_IS_STEADY(a) != META_IS_STEADY(b))
return META_IS_STEADY(b);
/* fall through */
__fallthrough;
case prefer_last:
if (mode == prefer_steady) {
mdbx_jitter4testing(true);
if (txnid_a == txnid_b)
return META_IS_STEADY(b);
return txnid_a < txnid_b;
const bool is_stead_a = META_IS_STEADY(a);
if (is_stead_a != is_stead_b)
return is_stead_b;
} else {
mdbx_assert(env, mode == prefer_last);
}
if (txnid_a == txnid_b)
return is_stead_b;
return txnid_a < txnid_b;
}
static __inline bool mdbx_meta_eq(const MDBX_env *env, const MDBX_meta *a,
const MDBX_meta *b) {
static bool meta_eq(const MDBX_env *env, volatile const MDBX_meta *a,
volatile const MDBX_meta *b) {
mdbx_jitter4testing(true);
const txnid_t txnid = mdbx_meta_txnid_fluid(env, a);
if (!txnid || txnid != mdbx_meta_txnid_fluid(env, b))
const txnid_t txnid = meta_txnid(env, a);
if (!txnid || txnid != meta_txnid(env, b))
return false;
mdbx_jitter4testing(true);
@ -5663,80 +5659,90 @@ static __inline bool mdbx_meta_eq(const MDBX_env *env, const MDBX_meta *a,
return true;
}
static int mdbx_meta_eq_mask(const MDBX_env *env) {
MDBX_meta *m0 = METAPAGE(env, 0);
MDBX_meta *m1 = METAPAGE(env, 1);
MDBX_meta *m2 = METAPAGE(env, 2);
static int meta_eq_mask(const MDBX_env *env) {
volatile const MDBX_meta *m0 = METAPAGE(env, 0);
volatile const MDBX_meta *m1 = METAPAGE(env, 1);
volatile const MDBX_meta *m2 = METAPAGE(env, 2);
int rc = mdbx_meta_eq(env, m0, m1) ? 1 : 0;
if (mdbx_meta_eq(env, m1, m2))
int rc = meta_eq(env, m0, m1) ? 1 : 0;
if (meta_eq(env, m1, m2))
rc += 2;
if (mdbx_meta_eq(env, m2, m0))
if (meta_eq(env, m2, m0))
rc += 4;
return rc;
}
static __inline MDBX_meta *mdbx_meta_recent(const enum meta_choise_mode mode,
const MDBX_env *env, MDBX_meta *a,
MDBX_meta *b) {
const bool a_older_that_b = mdbx_meta_ot(mode, env, a, b);
mdbx_assert(env, !mdbx_meta_eq(env, a, b));
static __inline volatile const MDBX_meta *
meta_recent(const enum meta_choise_mode mode, const MDBX_env *env,
volatile const MDBX_meta *a, volatile const MDBX_meta *b) {
const bool a_older_that_b = meta_ot(mode, env, a, b);
mdbx_assert(env, !meta_eq(env, a, b));
return a_older_that_b ? b : a;
}
static __inline MDBX_meta *mdbx_meta_ancient(const enum meta_choise_mode mode,
const MDBX_env *env, MDBX_meta *a,
MDBX_meta *b) {
const bool a_older_that_b = mdbx_meta_ot(mode, env, a, b);
mdbx_assert(env, !mdbx_meta_eq(env, a, b));
static const MDBX_meta *meta_ancient_prefer_weak(const MDBX_env *env,
const MDBX_meta *a,
const MDBX_meta *b) {
const bool a_older_that_b = meta_ot(prefer_steady, env, a, b);
mdbx_assert(env, !meta_eq(env, a, b));
return a_older_that_b ? a : b;
}
static __inline MDBX_meta *
mdbx_meta_mostrecent(const enum meta_choise_mode mode, const MDBX_env *env) {
MDBX_meta *m0 = METAPAGE(env, 0);
MDBX_meta *m1 = METAPAGE(env, 1);
MDBX_meta *m2 = METAPAGE(env, 2);
static __inline volatile const MDBX_meta *
meta_mostrecent(const enum meta_choise_mode mode, const MDBX_env *env) {
volatile const MDBX_meta *m0 = METAPAGE(env, 0);
volatile const MDBX_meta *m1 = METAPAGE(env, 1);
volatile const MDBX_meta *m2 = METAPAGE(env, 2);
MDBX_meta *head = mdbx_meta_recent(mode, env, m0, m1);
head = mdbx_meta_recent(mode, env, head, m2);
volatile const MDBX_meta *head = meta_recent(mode, env, m0, m1);
head = meta_recent(mode, env, head, m2);
return head;
}
static MDBX_meta *mdbx_meta_steady(const MDBX_env *env) {
return mdbx_meta_mostrecent(prefer_steady, env);
static volatile const MDBX_meta *meta_prefer_steady(const MDBX_env *env) {
return meta_mostrecent(prefer_steady, env);
}
static MDBX_meta *mdbx_meta_head(const MDBX_env *env) {
return mdbx_meta_mostrecent(prefer_last, env);
MDBX_NOTHROW_PURE_FUNCTION static const MDBX_meta *
constmeta_prefer_steady(const MDBX_env *env) {
return (const MDBX_meta *)meta_mostrecent(prefer_steady, env);
}
static volatile const MDBX_meta *meta_prefer_last(const MDBX_env *env) {
return meta_mostrecent(prefer_last, env);
}
MDBX_NOTHROW_PURE_FUNCTION static const MDBX_meta *
constmeta_prefer_last(const MDBX_env *env) {
return (const MDBX_meta *)meta_mostrecent(prefer_last, env);
}
static txnid_t mdbx_recent_committed_txnid(const MDBX_env *env) {
while (true) {
const volatile MDBX_meta *head = mdbx_meta_head(env);
const txnid_t recent = mdbx_meta_txnid_fluid(env, head);
volatile const MDBX_meta *head = meta_prefer_last(env);
const txnid_t recent = meta_txnid(env, head);
mdbx_compiler_barrier();
if (likely(head == mdbx_meta_head(env) &&
recent == mdbx_meta_txnid_fluid(env, head)))
if (likely(head == meta_prefer_last(env) &&
recent == meta_txnid(env, head)))
return recent;
}
}
static txnid_t mdbx_recent_steady_txnid(const MDBX_env *env) {
while (true) {
const volatile MDBX_meta *head = mdbx_meta_steady(env);
const txnid_t recent = mdbx_meta_txnid_fluid(env, head);
volatile const MDBX_meta *head = meta_prefer_steady(env);
const txnid_t recent = meta_txnid(env, head);
mdbx_compiler_barrier();
if (likely(head == mdbx_meta_steady(env) &&
recent == mdbx_meta_txnid_fluid(env, head)))
if (likely(head == meta_prefer_steady(env) &&
recent == meta_txnid(env, head)))
return recent;
}
}
static const char *mdbx_durable_str(const MDBX_meta *const meta) {
static const char *mdbx_durable_str(volatile const MDBX_meta *const meta) {
if (META_IS_STEADY(meta))
return (unaligned_peek_u64(4, meta->mm_datasync_sign) ==
mdbx_meta_sign(meta))
return (unaligned_peek_u64_volatile(4, meta->mm_datasync_sign) ==
meta_sign((const MDBX_meta *)meta))
? "Steady"
: "Tainted";
return "Weak";
@ -6256,7 +6262,7 @@ static int mdbx_meta_unsteady(MDBX_env *env, const txnid_t last_steady,
MDBX_meta *const meta, mdbx_filehandle_t fd) {
const uint64_t wipe = MDBX_DATASIGN_NONE;
if (unlikely(META_IS_STEADY(meta)) &&
mdbx_meta_txnid_stable(env, meta) <= last_steady) {
constmeta_txnid(env, meta) <= last_steady) {
mdbx_warning("wipe txn #%" PRIaTXN ", meta %" PRIaPGNO, last_steady,
data_page(meta)->mp_pgno);
if (env->me_flags & MDBX_WRITEMAP)
@ -6661,16 +6667,16 @@ no_loose:
const size_t next = (size_t)pgno + num;
if (flags & MDBX_ALLOC_GC) {
const MDBX_meta *const head = mdbx_meta_head(env);
MDBX_meta *const steady = mdbx_meta_steady(env);
const MDBX_meta *const head = constmeta_prefer_last(env);
const MDBX_meta *const steady = constmeta_prefer_steady(env);
/* does reclaiming stopped at the last steady point? */
if (head != steady && META_IS_STEADY(steady) &&
oldest == mdbx_meta_txnid_stable(env, steady)) {
oldest == constmeta_txnid(env, steady)) {
mdbx_debug("gc-kick-steady: head %" PRIaTXN "-%s, tail %" PRIaTXN
"-%s, oldest %" PRIaTXN,
mdbx_meta_txnid_stable(env, head), mdbx_durable_str(head),
mdbx_meta_txnid_stable(env, steady),
mdbx_durable_str(steady), oldest);
constmeta_txnid(env, head), mdbx_durable_str(head),
constmeta_txnid(env, steady), mdbx_durable_str(steady),
oldest);
ret.err = MDBX_RESULT_TRUE;
const pgno_t autosync_threshold =
atomic_load32(&env->me_lck->mti_autosync_threshold, mo_Relaxed);
@ -6691,7 +6697,7 @@ no_loose:
* without any auto-sync threshold(s). */
ret.err = mdbx_wipe_steady(env, oldest);
mdbx_debug("gc-wipe-steady, rc %d", ret.err);
mdbx_assert(env, steady != mdbx_meta_steady(env));
mdbx_assert(env, steady != meta_prefer_steady(env));
} else if ((flags & MDBX_ALLOC_NEW) == 0 ||
(autosync_threshold &&
atomic_load32(&env->me_lck->mti_unsynced_pages,
@ -6708,7 +6714,7 @@ no_loose:
MDBX_meta meta = *head;
ret.err = mdbx_sync_locked(env, env->me_flags & MDBX_WRITEMAP, &meta);
mdbx_debug("gc-make-steady, rc %d", ret.err);
mdbx_assert(env, steady != mdbx_meta_steady(env));
mdbx_assert(env, steady != meta_prefer_steady(env));
}
if (ret.err == MDBX_SUCCESS) {
if (mdbx_find_oldest(txn) > oldest)
@ -7070,8 +7076,8 @@ retry:;
const pgno_t unsynced_pages =
atomic_load32(&env->me_lck->mti_unsynced_pages, mo_Relaxed);
const MDBX_meta *head = mdbx_meta_head(env);
const txnid_t head_txnid = mdbx_meta_txnid_fluid(env, head);
volatile const MDBX_meta *head = meta_prefer_last(env);
const txnid_t head_txnid = meta_txnid(env, head);
const uint32_t synched_meta_txnid_u32 =
atomic_load32(&env->me_lck->mti_meta_sync_txnid, mo_Relaxed);
if (unsynced_pages == 0 && synched_meta_txnid_u32 == (uint32_t)head_txnid &&
@ -7138,7 +7144,7 @@ retry:;
goto retry;
}
env->me_txn0->mt_txnid = head_txnid;
mdbx_assert(env, head_txnid == mdbx_meta_txnid_fluid(env, head));
mdbx_assert(env, head_txnid == meta_txnid(env, head));
mdbx_assert(env, head_txnid == mdbx_recent_committed_txnid(env));
mdbx_find_oldest(env->me_txn0);
flags |= MDBX_SHRINK_ALLOWED;
@ -7150,7 +7156,7 @@ retry:;
if (!META_IS_STEADY(head) ||
((flags & MDBX_SAFE_NOSYNC) == 0 && unsynced_pages)) {
mdbx_debug("meta-head %" PRIaPGNO ", %s, sync_pending %" PRIaPGNO,
data_page(head)->mp_pgno, mdbx_durable_str(head),
data_page((const void *)head)->mp_pgno, mdbx_durable_str(head),
unsynced_pages);
MDBX_meta meta = *head;
rc = mdbx_sync_locked(env, flags, &meta);
@ -7614,9 +7620,9 @@ static int mdbx_txn_renew0(MDBX_txn *txn, const unsigned flags) {
/* Seek & fetch the last meta */
if (likely(/* not recovery mode */ env->me_stuck_meta < 0)) {
while (1) {
const volatile MDBX_meta *const meta = mdbx_meta_head(env);
volatile const MDBX_meta *const meta = meta_prefer_last(env);
mdbx_jitter4testing(false);
const txnid_t snap = mdbx_meta_txnid_fluid(env, meta);
const txnid_t snap = meta_txnid(env, meta);
mdbx_jitter4testing(false);
if (likely(r)) {
safe64_reset(&r->mr_txnid, false);
@ -7650,8 +7656,8 @@ static int mdbx_txn_renew0(MDBX_txn *txn, const unsigned flags) {
* but it is reasonable paranoia to avoid compiler misoptimization
* and makes clarity for code readers. */
mdbx_compiler_barrier();
if (likely(meta == mdbx_meta_head(env) &&
snap == mdbx_meta_txnid_fluid(env, meta) &&
if (likely(meta == meta_prefer_last(env) &&
snap == meta_txnid(env, meta) &&
snap >= atomic_load64(&env->me_lck->mti_oldest_reader,
mo_AcquireRelease))) {
mdbx_jitter4testing(false);
@ -7661,7 +7667,7 @@ static int mdbx_txn_renew0(MDBX_txn *txn, const unsigned flags) {
} else {
/* r/o recovery mode */
MDBX_meta *const meta = METAPAGE(env, env->me_stuck_meta);
txn->mt_txnid = mdbx_meta_txnid_stable(env, meta);
txn->mt_txnid = constmeta_txnid(env, meta);
txn->mt_geo = meta->mm_geo;
memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDBX_db));
txn->mt_canary = meta->mm_canary;
@ -7734,10 +7740,10 @@ static int mdbx_txn_renew0(MDBX_txn *txn, const unsigned flags) {
#endif /* Windows */
mdbx_jitter4testing(false);
MDBX_meta *meta = mdbx_meta_head(env);
const MDBX_meta *meta = constmeta_prefer_last(env);
mdbx_jitter4testing(false);
txn->mt_canary = meta->mm_canary;
const txnid_t snap = mdbx_meta_txnid_stable(env, meta);
const txnid_t snap = constmeta_txnid(env, meta);
txn->mt_txnid = safe64_txnid_next(snap);
if (unlikely(txn->mt_txnid > MAX_TXNID)) {
rc = MDBX_TXN_FULL;
@ -8144,21 +8150,22 @@ int mdbx_txn_info(const MDBX_txn *txn, MDBX_txn_info *info, bool scan_rlt) {
info->txn_space_used = pgno2bytes(env, txn->mt_geo.next);
if (txn->mt_flags & MDBX_TXN_RDONLY) {
const MDBX_meta *head_meta;
volatile const MDBX_meta *head_meta;
txnid_t head_txnid;
uint64_t head_retired;
do {
/* fetch info from volatile head */
head_meta = mdbx_meta_head(env);
head_txnid = mdbx_meta_txnid_fluid(env, head_meta);
head_retired = unaligned_peek_u64(4, head_meta->mm_pages_retired);
head_meta = meta_prefer_last(env);
head_txnid = meta_txnid(env, head_meta);
head_retired =
unaligned_peek_u64_volatile(4, head_meta->mm_pages_retired);
info->txn_space_limit_soft = pgno2bytes(env, head_meta->mm_geo.now);
info->txn_space_limit_hard = pgno2bytes(env, head_meta->mm_geo.upper);
info->txn_space_leftover =
pgno2bytes(env, head_meta->mm_geo.now - head_meta->mm_geo.next);
mdbx_compiler_barrier();
} while (unlikely(head_meta != mdbx_meta_head(env) ||
head_txnid != mdbx_meta_txnid_fluid(env, head_meta)));
} while (unlikely(head_meta != meta_prefer_last(env) ||
head_txnid != meta_txnid(env, head_meta)));
info->txn_reader_lag = head_txnid - info->txn_id;
info->txn_space_dirty = info->txn_space_retired = 0;
@ -10266,7 +10273,8 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
if (likely(rc == MDBX_SUCCESS)) {
MDBX_meta meta, *head = mdbx_meta_head(env);
const MDBX_meta *head = constmeta_prefer_last(env);
MDBX_meta meta;
memcpy(meta.mm_magic_and_version, head->mm_magic_and_version, 8);
meta.mm_extra_flags = head->mm_extra_flags;
meta.mm_validator_id = head->mm_validator_id;
@ -10287,7 +10295,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
? txn->mt_txnid
: txn->mt_dbs[MAIN_DBI].md_mod_txnid;
meta.mm_canary = txn->mt_canary;
mdbx_meta_set_txnid(env, &meta, txn->mt_txnid);
meta_set_txnid(env, &meta, txn->mt_txnid);
rc = mdbx_sync_locked(
env, env->me_flags | txn->mt_flags | MDBX_SHRINK_ALLOWED, &meta);
@ -10373,11 +10381,11 @@ static int mdbx_validate_meta(MDBX_env *env, MDBX_meta *const meta,
/* LY: check signature as a checksum */
if (META_IS_STEADY(meta) &&
unlikely(unaligned_peek_u64(4, &meta->mm_datasync_sign) !=
mdbx_meta_sign(meta))) {
meta_sign(meta))) {
mdbx_warning("meta[%u] has invalid steady-checksum (0x%" PRIx64
" != 0x%" PRIx64 "), skip it",
meta_number, unaligned_peek_u64(4, &meta->mm_datasync_sign),
mdbx_meta_sign(meta));
meta_sign(meta));
return MDBX_RESULT_TRUE;
}
@ -10641,9 +10649,8 @@ __cold static int mdbx_read_header(MDBX_env *env, MDBX_meta *dest,
continue;
if ((env->me_stuck_meta < 0)
? mdbx_meta_ot(meta_bootid_match(meta) ? prefer_last
: prefer_steady,
env, dest, meta)
? meta_ot(meta_bootid_match(meta) ? prefer_last : prefer_steady,
env, dest, meta)
: (meta_number == (unsigned)env->me_stuck_meta)) {
*dest = *meta;
if (!lck_exclusive && !META_IS_STEADY(dest))
@ -10707,9 +10714,8 @@ __cold static MDBX_page *mdbx_meta_model(const MDBX_env *env, MDBX_page *model,
model_meta->mm_dbs[FREE_DBI].md_flags = MDBX_INTEGERKEY;
model_meta->mm_dbs[FREE_DBI].md_root = P_INVALID;
model_meta->mm_dbs[MAIN_DBI].md_root = P_INVALID;
mdbx_meta_set_txnid(env, model_meta, MIN_TXNID + num);
unaligned_poke_u64(4, model_meta->mm_datasync_sign,
mdbx_meta_sign(model_meta));
meta_set_txnid(env, model_meta, MIN_TXNID + num);
unaligned_poke_u64(4, model_meta->mm_datasync_sign, meta_sign(model_meta));
return (MDBX_page *)((uint8_t *)model + env->me_psize);
}
@ -10720,9 +10726,9 @@ __cold static MDBX_meta *mdbx_init_metas(const MDBX_env *env, void *buffer) {
MDBX_page *page1 = mdbx_meta_model(env, page0, 0);
MDBX_page *page2 = mdbx_meta_model(env, page1, 1);
mdbx_meta_model(env, page2, 2);
mdbx_assert(env, !mdbx_meta_eq(env, page_meta(page0), page_meta(page1)));
mdbx_assert(env, !mdbx_meta_eq(env, page_meta(page1), page_meta(page2)));
mdbx_assert(env, !mdbx_meta_eq(env, page_meta(page2), page_meta(page0)));
mdbx_assert(env, !meta_eq(env, page_meta(page0), page_meta(page1)));
mdbx_assert(env, !meta_eq(env, page_meta(page1), page_meta(page2)));
mdbx_assert(env, !meta_eq(env, page_meta(page2), page_meta(page0)));
return page_meta(page2);
}
@ -10743,13 +10749,13 @@ static size_t mdbx_madvise_threshold(const MDBX_env *env,
static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
MDBX_meta *const pending) {
mdbx_assert(env, ((env->me_flags ^ flags) & MDBX_WRITEMAP) == 0);
MDBX_meta *const meta0 = METAPAGE(env, 0);
MDBX_meta *const meta1 = METAPAGE(env, 1);
MDBX_meta *const meta2 = METAPAGE(env, 2);
MDBX_meta *const head = mdbx_meta_head(env);
const MDBX_meta *const meta0 = METAPAGE(env, 0);
const MDBX_meta *const meta1 = METAPAGE(env, 1);
const MDBX_meta *const meta2 = METAPAGE(env, 2);
const MDBX_meta *const head = constmeta_prefer_last(env);
int rc;
mdbx_assert(env, mdbx_meta_eq_mask(env) == 0);
mdbx_assert(env, meta_eq_mask(env) == 0);
mdbx_assert(env,
pending < METAPAGE(env, 0) || pending > METAPAGE(env, NUM_METAS));
mdbx_assert(env, (env->me_flags & (MDBX_RDONLY | MDBX_FATAL_ERROR)) == 0);
@ -10853,12 +10859,12 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
const pgno_t bottom =
(aligned > pending->mm_geo.lower) ? aligned : pending->mm_geo.lower;
if (pending->mm_geo.now > bottom) {
if (META_IS_STEADY(mdbx_meta_steady(env)))
if (META_IS_STEADY(meta_prefer_steady(env)))
/* force steady, but only if steady-checkpoint is present */
flags &= MDBX_WRITEMAP | MDBX_SHRINK_ALLOWED;
shrink = pending->mm_geo.now - bottom;
pending->mm_geo.now = bottom;
if (unlikely(mdbx_meta_txnid_stable(env, head) ==
if (unlikely(constmeta_txnid(env, head) ==
unaligned_peek_u64(4, pending->mm_txnid_a))) {
const txnid_t txnid =
safe64_txnid_next(unaligned_peek_u64(4, pending->mm_txnid_a));
@ -10871,7 +10877,7 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
mdbx_error("txnid overflow, raise %d", rc);
goto fail;
}
mdbx_meta_set_txnid(env, pending, txnid);
meta_set_txnid(env, pending, txnid);
}
}
}
@ -10885,7 +10891,7 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
enum mdbx_syncmode_bits mode_bits = MDBX_SYNC_NONE;
if ((flags & MDBX_SAFE_NOSYNC) == 0) {
mode_bits = MDBX_SYNC_DATA;
if (pending->mm_geo.next > mdbx_meta_steady(env)->mm_geo.now)
if (pending->mm_geo.next > meta_prefer_steady(env)->mm_geo.now)
mode_bits |= MDBX_SYNC_SIZE;
if (flags & MDBX_NOMETASYNC)
mode_bits |= MDBX_SYNC_IODQ;
@ -10909,7 +10915,7 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
if (rc == MDBX_RESULT_FALSE /* carry steady */) {
atomic_store64(&env->me_lck->mti_sync_timestamp, mdbx_osal_monotime(),
mo_Relaxed);
unaligned_poke_u64(4, pending->mm_datasync_sign, mdbx_meta_sign(pending));
unaligned_poke_u64(4, pending->mm_datasync_sign, meta_sign(pending));
atomic_store32(&env->me_lck->mti_unsynced_pages, 0, mo_Relaxed);
} else {
assert(rc == MDBX_RESULT_TRUE /* carry non-steady */);
@ -10917,7 +10923,7 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
}
MDBX_meta *target = nullptr;
if (mdbx_meta_txnid_stable(env, head) ==
if (constmeta_txnid(env, head) ==
unaligned_peek_u64(4, pending->mm_txnid_a)) {
mdbx_assert(env, memcmp(&head->mm_dbs, &pending->mm_dbs,
sizeof(head->mm_dbs)) == 0);
@ -10926,19 +10932,19 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
mdbx_assert(env, memcmp(&head->mm_geo, &pending->mm_geo,
sizeof(pending->mm_geo)) == 0);
if (!META_IS_STEADY(head) && META_IS_STEADY(pending))
target = head;
target = (MDBX_meta *)head;
else {
mdbx_ensure(env, mdbx_meta_eq(env, head, pending));
mdbx_ensure(env, meta_eq(env, head, pending));
mdbx_debug("%s", "skip update meta");
return MDBX_SUCCESS;
}
} else if (head == meta0)
target = mdbx_meta_ancient(prefer_steady, env, meta1, meta2);
target = (MDBX_meta *)meta_ancient_prefer_weak(env, meta1, meta2);
else if (head == meta1)
target = mdbx_meta_ancient(prefer_steady, env, meta0, meta2);
target = (MDBX_meta *)meta_ancient_prefer_weak(env, meta0, meta2);
else {
mdbx_assert(env, head == meta2);
target = mdbx_meta_ancient(prefer_steady, env, meta0, meta1);
target = (MDBX_meta *)meta_ancient_prefer_weak(env, meta0, meta1);
}
/* LY: step#2 - update meta-page. */
@ -10957,30 +10963,30 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
(meta0 == head) ? "head"
: (meta0 == target) ? "tail"
: "stay",
mdbx_durable_str(meta0), mdbx_meta_txnid_fluid(env, meta0),
mdbx_durable_str(meta0), meta_txnid(env, meta0),
meta0->mm_dbs[MAIN_DBI].md_root, meta0->mm_dbs[FREE_DBI].md_root);
mdbx_debug("meta1: %s, %s, txn_id %" PRIaTXN ", root %" PRIaPGNO
"/%" PRIaPGNO,
(meta1 == head) ? "head"
: (meta1 == target) ? "tail"
: "stay",
mdbx_durable_str(meta1), mdbx_meta_txnid_fluid(env, meta1),
mdbx_durable_str(meta1), meta_txnid(env, meta1),
meta1->mm_dbs[MAIN_DBI].md_root, meta1->mm_dbs[FREE_DBI].md_root);
mdbx_debug("meta2: %s, %s, txn_id %" PRIaTXN ", root %" PRIaPGNO
"/%" PRIaPGNO,
(meta2 == head) ? "head"
: (meta2 == target) ? "tail"
: "stay",
mdbx_durable_str(meta2), mdbx_meta_txnid_fluid(env, meta2),
mdbx_durable_str(meta2), meta_txnid(env, meta2),
meta2->mm_dbs[MAIN_DBI].md_root, meta2->mm_dbs[FREE_DBI].md_root);
mdbx_assert(env, !mdbx_meta_eq(env, pending, meta0));
mdbx_assert(env, !mdbx_meta_eq(env, pending, meta1));
mdbx_assert(env, !mdbx_meta_eq(env, pending, meta2));
mdbx_assert(env, !meta_eq(env, pending, meta0));
mdbx_assert(env, !meta_eq(env, pending, meta1));
mdbx_assert(env, !meta_eq(env, pending, meta2));
mdbx_assert(env, ((env->me_flags ^ flags) & MDBX_WRITEMAP) == 0);
mdbx_ensure(env,
target == head || mdbx_meta_txnid_stable(env, target) <
target == head || constmeta_txnid(env, target) <
unaligned_peek_u64(4, pending->mm_txnid_a));
#if MDBX_ENABLE_PGOP_STAT
env->me_lck->mti_pgop_stat.wops.weak += 1;
@ -10989,8 +10995,8 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
mdbx_jitter4testing(true);
if (likely(target != head)) {
/* LY: 'invalidate' the meta. */
mdbx_meta_update_begin(env, target,
unaligned_peek_u64(4, pending->mm_txnid_a));
meta_update_begin(env, target,
unaligned_peek_u64(4, pending->mm_txnid_a));
unaligned_poke_u64(4, target->mm_datasync_sign, MDBX_DATASIGN_WEAK);
#ifndef NDEBUG
/* debug: provoke failure to catch a violators, but don't touch mm_psize
@ -11010,13 +11016,12 @@ static int mdbx_sync_locked(MDBX_env *env, unsigned flags,
mdbx_jitter4testing(true);
/* LY: 'commit' the meta */
mdbx_meta_update_end(env, target,
unaligned_peek_u64(4, pending->mm_txnid_b));
meta_update_end(env, target, unaligned_peek_u64(4, pending->mm_txnid_b));
mdbx_jitter4testing(true);
} else {
/* dangerous case (target == head), only mm_datasync_sign could
* me updated, check assertions once again */
mdbx_ensure(env, mdbx_meta_txnid_stable(env, head) ==
mdbx_ensure(env, constmeta_txnid(env, head) ==
unaligned_peek_u64(4, pending->mm_txnid_a) &&
!META_IS_STEADY(head) && META_IS_STEADY(pending));
mdbx_ensure(env, memcmp(&head->mm_geo, &pending->mm_geo,
@ -11308,9 +11313,9 @@ mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, intptr_t size_now,
return err;
need_unlock = true;
}
MDBX_meta *head = mdbx_meta_head(env);
const MDBX_meta *head = constmeta_prefer_last(env);
if (!inside_txn) {
env->me_txn0->mt_txnid = mdbx_meta_txnid_stable(env, head);
env->me_txn0->mt_txnid = constmeta_txnid(env, head);
mdbx_find_oldest(env->me_txn0);
}
@ -11522,12 +11527,12 @@ mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, intptr_t size_now,
/* apply new params to opened environment */
mdbx_ensure(env, pagesize == (intptr_t)env->me_psize);
MDBX_meta meta;
MDBX_meta *head = nullptr;
const MDBX_meta *head = nullptr;
const MDBX_geo *current_geo;
if (inside_txn) {
current_geo = &env->me_txn->mt_geo;
} else {
head = mdbx_meta_head(env);
head = constmeta_prefer_last(env);
meta = *head;
current_geo = &meta.mm_geo;
}
@ -11602,20 +11607,19 @@ mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, intptr_t size_now,
goto bailout;
mdbx_assert(env, (head == nullptr) == inside_txn);
if (head)
head = /* base address could be changed */ mdbx_meta_head(env);
head = /* base address could be changed */ constmeta_prefer_last(env);
}
if (inside_txn) {
env->me_txn->mt_geo = new_geo;
env->me_txn->mt_flags |= MDBX_TXN_DIRTY;
} else {
meta.mm_geo = new_geo;
const txnid_t txnid =
safe64_txnid_next(mdbx_meta_txnid_stable(env, head));
const txnid_t txnid = safe64_txnid_next(constmeta_txnid(env, head));
if (unlikely(txnid > MAX_TXNID)) {
rc = MDBX_TXN_FULL;
mdbx_error("txnid overflow, raise %d", rc);
} else {
mdbx_meta_set_txnid(env, &meta, txnid);
meta_set_txnid(env, &meta, txnid);
rc = mdbx_sync_locked(env, env->me_flags, &meta);
}
}
@ -11908,7 +11912,7 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
}
} else /* not recovery mode */
while (1) {
const unsigned meta_clash_mask = mdbx_meta_eq_mask(env);
const unsigned meta_clash_mask = meta_eq_mask(env);
if (unlikely(meta_clash_mask)) {
mdbx_error("meta-pages are clashed: mask 0x%d", meta_clash_mask);
return MDBX_CORRUPTED;
@ -11917,10 +11921,10 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
if (lck_rc != /* lck exclusive */ MDBX_RESULT_TRUE) {
/* non-exclusive mode,
* meta-pages should be validated by a first process opened the DB */
MDBX_meta *const head = mdbx_meta_head(env);
MDBX_meta *const steady = mdbx_meta_steady(env);
const txnid_t head_txnid = mdbx_meta_txnid_fluid(env, head);
const txnid_t steady_txnid = mdbx_meta_txnid_fluid(env, steady);
volatile const MDBX_meta *const head = meta_prefer_last(env);
volatile const MDBX_meta *const steady = meta_prefer_steady(env);
const txnid_t head_txnid = meta_txnid(env, head);
const txnid_t steady_txnid = meta_txnid(env, steady);
if (head_txnid == steady_txnid)
break;
@ -11940,9 +11944,9 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
/* exclusive mode */
MDBX_meta clone;
MDBX_meta const *const steady = mdbx_meta_steady(env);
MDBX_meta const *const head = mdbx_meta_head(env);
const txnid_t steady_txnid = mdbx_meta_txnid_fluid(env, steady);
const MDBX_meta *const steady = constmeta_prefer_steady(env);
const MDBX_meta *const head = constmeta_prefer_last(env);
const txnid_t steady_txnid = meta_txnid(env, steady);
if (META_IS_STEADY(steady)) {
err = mdbx_validate_meta_copy(env, steady, &clone);
if (unlikely(err != MDBX_SUCCESS)) {
@ -11957,7 +11961,7 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
}
const pgno_t pgno = bytes2pgno(env, (uint8_t *)head - env->me_map);
const txnid_t head_txnid = mdbx_meta_txnid_fluid(env, head);
const txnid_t head_txnid = meta_txnid(env, head);
const bool head_valid =
mdbx_validate_meta_copy(env, head, &clone) == MDBX_SUCCESS;
mdbx_assert(env, !META_IS_STEADY(steady) || head_txnid != steady_txnid);
@ -12020,13 +12024,13 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
pgno, head_txnid, err);
return err;
}
mdbx_ensure(env, 0 == mdbx_meta_txnid_fluid(env, head));
mdbx_ensure(env, 0 == mdbx_meta_eq_mask(env));
mdbx_ensure(env, 0 == meta_txnid(env, head));
mdbx_ensure(env, 0 == meta_eq_mask(env));
}
if (lck_rc == /* lck exclusive */ MDBX_RESULT_TRUE) {
//-------------------------------------------------- shrink DB & update geo
const MDBX_meta *head = mdbx_meta_head(env);
const MDBX_meta *head = constmeta_prefer_last(env);
/* re-check size after mmap */
if ((env->me_dxb_mmap.current & (env->me_os_psize - 1)) != 0 ||
env->me_dxb_mmap.current < used_bytes) {
@ -12054,7 +12058,7 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
meta.mm_geo.lower, meta.mm_geo.now, meta.mm_geo.upper,
pv2pages(meta.mm_geo.shrink_pv), pv2pages(meta.mm_geo.grow_pv));
} else {
const txnid_t txnid = mdbx_meta_txnid_stable(env, head);
const txnid_t txnid = constmeta_txnid(env, head);
const txnid_t next_txnid = safe64_txnid_next(txnid);
if (unlikely(txnid > MAX_TXNID)) {
mdbx_error("txnid overflow, raise %d", MDBX_TXN_FULL);
@ -12072,8 +12076,8 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
pv2pages(meta.mm_geo.shrink_pv),
pv2pages(meta.mm_geo.grow_pv), next_txnid);
mdbx_ensure(env, mdbx_meta_eq(env, &meta, head));
mdbx_meta_set_txnid(env, &meta, next_txnid);
mdbx_ensure(env, meta_eq(env, &meta, head));
meta_set_txnid(env, &meta, next_txnid);
err = mdbx_sync_locked(env, env->me_flags | MDBX_SHRINK_ALLOWED, &meta);
if (err) {
mdbx_error("error %d, while updating meta.geo: "
@ -12100,7 +12104,7 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
MDBX_meta *const pmeta = METAPAGE(env, n);
if (unlikely(unaligned_peek_u64(4, &pmeta->mm_magic_and_version) !=
MDBX_DATA_MAGIC)) {
const txnid_t txnid = mdbx_meta_txnid_fluid(env, pmeta);
const txnid_t txnid = meta_txnid(env, pmeta);
mdbx_notice("%s %s"
"meta[%u], txnid %" PRIaTXN,
"updating db-format signature for",
@ -12431,7 +12435,7 @@ __cold static int __must_check_result mdbx_override_meta(
MDBX_page *const page = env->me_pbuf;
mdbx_meta_model(env, page, target);
MDBX_meta *const model = page_meta(page);
mdbx_meta_set_txnid(env, model, txnid);
meta_set_txnid(env, model, txnid);
if (shape) {
model->mm_extra_flags = shape->mm_extra_flags;
model->mm_validator_id = shape->mm_validator_id;
@ -12442,7 +12446,7 @@ __cold static int __must_check_result mdbx_override_meta(
memcpy(&model->mm_pages_retired, &shape->mm_pages_retired,
sizeof(model->mm_pages_retired));
}
unaligned_poke_u64(4, model->mm_datasync_sign, mdbx_meta_sign(model));
unaligned_poke_u64(4, model->mm_datasync_sign, meta_sign(model));
rc = mdbx_validate_meta(env, model, page, target, nullptr);
if (unlikely(MDBX_IS_ERROR(rc)))
return MDBX_PROBLEM;
@ -12488,8 +12492,7 @@ __cold int mdbx_env_turn_for_recovery(MDBX_env *env, unsigned target) {
return MDBX_EPERM;
const MDBX_meta *target_meta = METAPAGE(env, target);
txnid_t new_txnid =
safe64_txnid_next(mdbx_meta_txnid_stable(env, target_meta));
txnid_t new_txnid = safe64_txnid_next(constmeta_txnid(env, target_meta));
for (unsigned n = 0; n < NUM_METAS; ++n) {
MDBX_page *page = pgno2page(env, n);
MDBX_meta meta = *page_meta(page);
@ -12500,7 +12503,7 @@ __cold int mdbx_env_turn_for_recovery(MDBX_env *env, unsigned target) {
if (unlikely(err != MDBX_SUCCESS))
return err;
} else {
txnid_t txnid = mdbx_meta_txnid_stable(env, &meta);
txnid_t txnid = constmeta_txnid(env, &meta);
if (new_txnid <= txnid)
new_txnid = safe64_txnid_next(txnid);
}
@ -12944,14 +12947,14 @@ __cold int mdbx_env_open(MDBX_env *env, const char *pathname,
#if MDBX_DEBUG
if (rc == MDBX_SUCCESS) {
MDBX_meta *meta = mdbx_meta_head(env);
MDBX_db *db = &meta->mm_dbs[MAIN_DBI];
const MDBX_meta *meta = constmeta_prefer_last(env);
const MDBX_db *db = &meta->mm_dbs[MAIN_DBI];
mdbx_debug("opened database version %u, pagesize %u",
(uint8_t)unaligned_peek_u64(4, meta->mm_magic_and_version),
env->me_psize);
mdbx_debug("using meta page %" PRIaPGNO ", txn %" PRIaTXN,
data_page(meta)->mp_pgno, mdbx_meta_txnid_fluid(env, meta));
data_page(meta)->mp_pgno, meta_txnid(env, meta));
mdbx_debug("depth: %u", db->md_depth);
mdbx_debug("entries: %" PRIu64, db->md_entries);
mdbx_debug("branch pages: %" PRIaPGNO, db->md_branch_pages);
@ -18997,7 +19000,7 @@ __cold static void compact_fixup_meta(MDBX_env *env, MDBX_meta *meta) {
/* Update signature */
assert(meta->mm_geo.now >= meta->mm_geo.next);
unaligned_poke_u64(4, meta->mm_datasync_sign, mdbx_meta_sign(meta));
unaligned_poke_u64(4, meta->mm_datasync_sign, meta_sign(meta));
}
/* Make resizeable */
@ -19021,7 +19024,7 @@ __cold static int mdbx_env_compact(MDBX_env *env, MDBX_txn *read_txn,
uint8_t *const data_buffer =
buffer + ceil_powerof2(meta_bytes, env->me_os_psize);
MDBX_meta *const meta = mdbx_init_metas(env, buffer);
mdbx_meta_set_txnid(env, meta, read_txn->mt_txnid);
meta_set_txnid(env, meta, read_txn->mt_txnid);
if (flags & MDBX_CP_FORCE_DYNAMIC_SIZE)
make_sizeable(meta);
@ -19029,7 +19032,7 @@ __cold static int mdbx_env_compact(MDBX_env *env, MDBX_txn *read_txn,
/* copy canary sequences if present */
if (read_txn->mt_canary.v) {
meta->mm_canary = read_txn->mt_canary;
meta->mm_canary.v = mdbx_meta_txnid_stable(env, meta);
meta->mm_canary.v = constmeta_txnid(env, meta);
}
/* Set metapage 1 with current main DB */
@ -19182,13 +19185,13 @@ __cold static int mdbx_env_copy_asis(MDBX_env *env, MDBX_txn *read_txn,
* but writing ones after the data was flushed */
memcpy(buffer, env->me_map, meta_bytes);
MDBX_meta *const headcopy = /* LY: get pointer to the snapshot copy */
(MDBX_meta *)(buffer + ((uint8_t *)mdbx_meta_head(env) - env->me_map));
(MDBX_meta *)(buffer + ((uint8_t *)meta_prefer_last(env) - env->me_map));
mdbx_txn_unlock(env);
if (flags & MDBX_CP_FORCE_DYNAMIC_SIZE)
make_sizeable(headcopy);
/* Update signature to steady */
unaligned_poke_u64(4, headcopy->mm_datasync_sign, mdbx_meta_sign(headcopy));
unaligned_poke_u64(4, headcopy->mm_datasync_sign, meta_sign(headcopy));
/* Copy the data */
const size_t whole_size = pgno_align2os_bytes(env, read_txn->mt_end_pgno);
@ -19729,13 +19732,13 @@ __cold static int fetch_envinfo_ex(const MDBX_env *env, const MDBX_txn *txn,
if (unlikely(env->me_flags & MDBX_FATAL_ERROR))
return MDBX_PANIC;
const MDBX_meta *const recent_meta = mdbx_meta_head(env);
arg->mi_recent_txnid = mdbx_meta_txnid_fluid(env, recent_meta);
arg->mi_meta0_txnid = mdbx_meta_txnid_fluid(env, meta0);
volatile const MDBX_meta *const recent_meta = meta_prefer_last(env);
arg->mi_recent_txnid = meta_txnid(env, recent_meta);
arg->mi_meta0_txnid = meta_txnid(env, meta0);
arg->mi_meta0_sign = unaligned_peek_u64(4, meta0->mm_datasync_sign);
arg->mi_meta1_txnid = mdbx_meta_txnid_fluid(env, meta1);
arg->mi_meta1_txnid = meta_txnid(env, meta1);
arg->mi_meta1_sign = unaligned_peek_u64(4, meta1->mm_datasync_sign);
arg->mi_meta2_txnid = mdbx_meta_txnid_fluid(env, meta2);
arg->mi_meta2_txnid = meta_txnid(env, meta2);
arg->mi_meta2_sign = unaligned_peek_u64(4, meta2->mm_datasync_sign);
if (likely(bytes > size_before_bootid)) {
memcpy(&arg->mi_bootid.meta0, &meta0->mm_bootid, 16);
@ -19743,7 +19746,7 @@ __cold static int fetch_envinfo_ex(const MDBX_env *env, const MDBX_txn *txn,
memcpy(&arg->mi_bootid.meta2, &meta2->mm_bootid, 16);
}
const MDBX_meta *txn_meta = recent_meta;
volatile const MDBX_meta *txn_meta = recent_meta;
arg->mi_last_pgno = txn_meta->mm_geo.next - 1;
arg->mi_geo.current = pgno2bytes(env, txn_meta->mm_geo.now);
if (txn) {
@ -20487,16 +20490,16 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func,
uint64_t lag = 0;
if (txnid) {
retry_header:;
const MDBX_meta *const recent_meta = mdbx_meta_head(env);
volatile const MDBX_meta *const recent_meta = meta_prefer_last(env);
const uint64_t head_pages_retired =
unaligned_peek_u64(4, recent_meta->mm_pages_retired);
const txnid_t head_txnid = mdbx_meta_txnid_fluid(env, recent_meta);
unaligned_peek_u64_volatile(4, recent_meta->mm_pages_retired);
const txnid_t head_txnid = meta_txnid(env, recent_meta);
mdbx_compiler_barrier();
if (unlikely(
recent_meta != mdbx_meta_head(env) ||
head_pages_retired !=
unaligned_peek_u64(4, recent_meta->mm_pages_retired)) ||
head_txnid != mdbx_meta_txnid_fluid(env, recent_meta))
if (unlikely(recent_meta != meta_prefer_last(env) ||
head_pages_retired !=
unaligned_peek_u64_volatile(
4, recent_meta->mm_pages_retired)) ||
head_txnid != meta_txnid(env, recent_meta))
goto retry_header;
lag = (head_txnid - txnid) / xMDBX_TXNID_STEP;
@ -20748,9 +20751,9 @@ __cold static txnid_t mdbx_kick_longlived_readers(MDBX_env *env,
if (safe64_read(&asleep->mr_txnid) != laggard || pid <= 0)
continue;
const MDBX_meta *head_meta = mdbx_meta_head(env);
const MDBX_meta *head_meta = constmeta_prefer_last(env);
const txnid_t gap =
(mdbx_meta_txnid_stable(env, head_meta) - laggard) / xMDBX_TXNID_STEP;
(constmeta_txnid(env, head_meta) - laggard) / xMDBX_TXNID_STEP;
const uint64_t head_retired =
unaligned_peek_u64(4, head_meta->mm_pages_retired);
const size_t space =
@ -20826,16 +20829,16 @@ int mdbx_txn_straggler(const MDBX_txn *txn, int *percent)
return 0;
}
txnid_t recent;
MDBX_meta *meta;
txnid_t recent = 0;
volatile const MDBX_meta *meta = nullptr;
do {
meta = mdbx_meta_head(env);
recent = mdbx_meta_txnid_fluid(env, meta);
meta = meta_prefer_last(env);
recent = meta_txnid(env, meta);
if (percent) {
const pgno_t maxpg = meta->mm_geo.now;
*percent = (int)((meta->mm_geo.next * UINT64_C(100) + maxpg / 2) / maxpg);
}
} while (unlikely(recent != mdbx_meta_txnid_fluid(env, meta)));
} while (unlikely(recent != meta_txnid(env, meta)));
txnid_t lag = (recent - txn->mt_txnid) / xMDBX_TXNID_STEP;
return (lag > INT_MAX) ? INT_MAX : (int)lag;

View File

@ -445,7 +445,7 @@ typedef struct MDBX_meta {
#define MDBX_DATASIGN_WEAK 1u
#define SIGN_IS_STEADY(sign) ((sign) > MDBX_DATASIGN_WEAK)
#define META_IS_STEADY(meta) \
SIGN_IS_STEADY(unaligned_peek_u64(4, (meta)->mm_datasync_sign))
SIGN_IS_STEADY(unaligned_peek_u64_volatile(4, (meta)->mm_datasync_sign))
uint32_t mm_datasync_sign[2];
/* txnid that committed this page, the second of a two-phase-update pair */

View File

@ -935,8 +935,8 @@ static void usage(char *prog) {
exit(EXIT_INTERRUPTED);
}
static __inline bool meta_ot(txnid_t txn_a, uint64_t sign_a, txnid_t txn_b,
uint64_t sign_b, const bool wanna_steady) {
static bool meta_ot(txnid_t txn_a, uint64_t sign_a, txnid_t txn_b,
uint64_t sign_b, const bool wanna_steady) {
if (txn_a == txn_b)
return SIGN_IS_STEADY(sign_b);
@ -946,8 +946,8 @@ static __inline bool meta_ot(txnid_t txn_a, uint64_t sign_a, txnid_t txn_b,
return txn_a < txn_b;
}
static __inline bool meta_eq(txnid_t txn_a, uint64_t sign_a, txnid_t txn_b,
uint64_t sign_b) {
static bool meta_eq(txnid_t txn_a, uint64_t sign_a, txnid_t txn_b,
uint64_t sign_b) {
if (!txn_a || txn_a != txn_b)
return false;
@ -957,7 +957,7 @@ static __inline bool meta_eq(txnid_t txn_a, uint64_t sign_a, txnid_t txn_b,
return true;
}
static __inline int meta_recent(const bool wanna_steady) {
static int meta_recent(const bool wanna_steady) {
if (meta_ot(envinfo.mi_meta0_txnid, envinfo.mi_meta0_sign,
envinfo.mi_meta1_txnid, envinfo.mi_meta1_sign, wanna_steady))
return meta_ot(envinfo.mi_meta2_txnid, envinfo.mi_meta2_sign,
@ -971,7 +971,7 @@ static __inline int meta_recent(const bool wanna_steady) {
: 0;
}
static __inline int meta_tail(int head) {
static int meta_tail(int head) {
switch (head) {
case 0:
return meta_ot(envinfo.mi_meta1_txnid, envinfo.mi_meta1_sign,