mdbx: don't mimics the MDBX's features for MDB.

Change-Id: Ie3865434c741da77b9a285e43b7d6a1d9ec0c5e2
This commit is contained in:
Leo Yuriev
2016-05-02 16:25:13 +03:00
parent 3294da86d7
commit 50f5475185
4 changed files with 188 additions and 188 deletions

218
mdb.c
View File

@@ -100,14 +100,14 @@
#include "./midl.h"
#if ! MDBX_MODE_ENABLED
# define MDB_COALESCE 0
# define MDB_LIFORECLAIM 0
# define MDB_DBG_ASSERT 0
# define MDB_DBG_PRINT 0
# define MDB_DBG_TRACE 0
# define MDB_DBG_EXTRA 0
# define MDB_DBG_AUDIT 0
# define MDB_DBG_EDGE 0
# define MDBX_COALESCE 0
# define MDBX_LIFORECLAIM 0
# define MDBX_DBG_ASSERT 0
# define MDBX_DBG_PRINT 0
# define MDBX_DBG_TRACE 0
# define MDBX_DBG_EXTRA 0
# define MDBX_DBG_AUDIT 0
# define MDBX_DBG_EDGE 0
#endif /* ! MDBX_MODE_ENABLED */
#if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN)
@@ -979,7 +979,7 @@ struct MDB_env {
uint64_t me_sync_pending; /**< Total dirty/commited bytes since the last mdb_env_sync() */
uint64_t me_sync_threshold; /**< Treshold of above to force synchronous flush */
#if MDBX_MODE_ENABLED
MDB_oom_func *me_oom_func; /**< Callback for kicking laggard readers */
MDBX_oom_func *me_oom_func; /**< Callback for kicking laggard readers */
#endif
#ifdef USE_VALGRIND
int me_valgrind_handle;
@@ -1147,22 +1147,22 @@ mdb_strerror(int err)
#if MDBX_MODE_ENABLED
int mdb_runtime_flags = MDB_DBG_PRINT
int mdb_runtime_flags = MDBX_DBG_PRINT
#if MDB_DEBUG
| MDB_DBG_ASSERT
| MDBX_DBG_ASSERT
#endif
#if MDB_DEBUG > 1
| MDB_DBG_TRACE
| MDBX_DBG_TRACE
#endif
#if MDB_DEBUG > 2
| MDB_DBG_AUDIT
| MDBX_DBG_AUDIT
#endif
#if MDB_DEBUG > 3
| MDB_DBG_EXTRA
| MDBX_DBG_EXTRA
#endif
;
static MDB_debug_func *mdb_debug_logger;
static MDBX_debug_func *mdb_debug_logger;
#else /* MDBX_MODE_ENABLED */
# define mdb_runtime_flags 0
@@ -1182,20 +1182,20 @@ static MDB_debug_func *mdb_debug_logger;
env->me_assert_func(env, msg, func, line);
else {
if (mdb_debug_logger)
mdb_debug_log(MDB_DBG_ASSERT, func, line, "assert: %s\n", msg);
mdb_debug_log(MDBX_DBG_ASSERT, func, line, "assert: %s\n", msg);
__assert_fail(msg, __FILE__, line, func);
}
}
# define mdb_assert_enabled() \
unlikely(mdb_runtime_flags & MDB_DBG_ASSERT)
unlikely(mdb_runtime_flags & MDBX_DBG_ASSERT)
# define mdb_audit_enabled() \
unlikely(mdb_runtime_flags & MDB_DBG_AUDIT)
unlikely(mdb_runtime_flags & MDBX_DBG_AUDIT)
# define mdb_debug_enabled(type) \
unlikely(mdb_runtime_flags & \
(type & (MDB_DBG_TRACE | MDB_DBG_EXTRA)))
(type & (MDBX_DBG_TRACE | MDBX_DBG_EXTRA)))
#else
# define mdb_debug_enabled(type) (0)
@@ -1207,14 +1207,14 @@ static MDB_debug_func *mdb_debug_logger;
#if MDBX_MODE_ENABLED
int __cold
mdbx_setup_debug(int flags, MDB_debug_func* logger, long edge_txn) {
mdbx_setup_debug(int flags, MDBX_debug_func* logger, long edge_txn) {
unsigned ret = mdb_runtime_flags;
if (flags != (int) MDB_DBG_DNT)
if (flags != (int) MDBX_DBG_DNT)
mdb_runtime_flags = flags;
if (logger != (MDB_debug_func*) MDB_DBG_DNT)
if (logger != (MDBX_debug_func*) MDBX_DBG_DNT)
mdb_debug_logger = logger;
#if MDB_DEBUG
if (edge_txn != (long) MDB_DBG_DNT)
if (edge_txn != (long) MDBX_DBG_DNT)
mdb_debug_edge = edge_txn;
#else
(void) edge_txn;
@@ -1241,26 +1241,26 @@ mdb_debug_log(int type, const char *function, int line,
}
#define mdb_print(fmt, ...) \
mdb_debug_log(MDB_DBG_PRINT, NULL, 0, fmt, ##__VA_ARGS__)
mdb_debug_log(MDBX_DBG_PRINT, NULL, 0, fmt, ##__VA_ARGS__)
#define mdb_debug(fmt, ...) do { \
if (mdb_debug_enabled(MDB_DBG_TRACE)) \
mdb_debug_log(MDB_DBG_TRACE, __FUNCTION__, __LINE__, fmt "\n", ##__VA_ARGS__); \
if (mdb_debug_enabled(MDBX_DBG_TRACE)) \
mdb_debug_log(MDBX_DBG_TRACE, __FUNCTION__, __LINE__, fmt "\n", ##__VA_ARGS__); \
} while(0)
#define mdb_debug_print(fmt, ...) do { \
if (mdb_debug_enabled(MDB_DBG_TRACE)) \
mdb_debug_log(MDB_DBG_TRACE, NULL, 0, fmt, ##__VA_ARGS__); \
if (mdb_debug_enabled(MDBX_DBG_TRACE)) \
mdb_debug_log(MDBX_DBG_TRACE, NULL, 0, fmt, ##__VA_ARGS__); \
} while(0)
#define mdb_debug_extra(fmt, ...) do { \
if (mdb_debug_enabled(MDB_DBG_EXTRA)) \
mdb_debug_log(MDB_DBG_EXTRA, __FUNCTION__, __LINE__, fmt, ##__VA_ARGS__); \
if (mdb_debug_enabled(MDBX_DBG_EXTRA)) \
mdb_debug_log(MDBX_DBG_EXTRA, __FUNCTION__, __LINE__, fmt, ##__VA_ARGS__); \
} while(0)
#define mdb_debug_extra_print(fmt, ...) do { \
if (mdb_debug_enabled(MDB_DBG_EXTRA)) \
mdb_debug_log(MDB_DBG_EXTRA, NULL, 0, fmt, ##__VA_ARGS__); \
if (mdb_debug_enabled(MDBX_DBG_EXTRA)) \
mdb_debug_log(MDBX_DBG_EXTRA, NULL, 0, fmt, ##__VA_ARGS__); \
} while(0)
#define mdb_ensure_msg(env, expr, msg) \
@@ -1288,7 +1288,7 @@ mdb_debug_log(int type, const char *function, int line,
mdb_assert((txn)->mt_env, expr)
/** Return the page number of \b mp which may be sub-page, for debug output */
static MDB_INLINE pgno_t
static MDBX_INLINE pgno_t
mdb_dbg_pgno(MDB_page *mp)
{
pgno_t ret;
@@ -1538,7 +1538,7 @@ mdb_page_malloc(MDB_txn *txn, unsigned num)
* Saves single pages to a list, for future reuse.
* (This is not used for multi-page overflow pages.)
*/
static MDB_INLINE void
static MDBX_INLINE void
mdb_page_free(MDB_env *env, MDB_page *mp)
{
mp->mp_next = env->me_dpages;
@@ -1636,7 +1636,7 @@ mdb_page_loose(MDB_cursor *mc, MDB_page *mp)
}
if (loose) {
mdb_debug("loosen db %d page %zu", DDBI(mc), mp->mp_pgno);
if (unlikely(txn->mt_env->me_flags & MDB_PAGEPERTURB))
if (unlikely(txn->mt_env->me_flags & MDBX_PAGEPERTURB))
mdb_kill_page(txn->mt_env, pgno);
NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs;
txn->mt_loose_pgs = mp;
@@ -1852,7 +1852,7 @@ bailout:
return rc;
}
static MDB_INLINE uint64_t
static MDBX_INLINE uint64_t
mdb_meta_sign(MDB_meta *meta) {
uint64_t sign = MDB_DATASIGN_NONE;
#if 0 /* TODO */
@@ -1868,7 +1868,7 @@ mdb_meta_sign(MDB_meta *meta) {
return (sign > MDB_DATASIGN_WEAK) ? sign : ~sign;
}
static MDB_INLINE MDB_meta*
static MDBX_INLINE MDB_meta*
mdb_meta_head_w(MDB_env *env) {
MDB_meta* a = METAPAGE_1(env);
MDB_meta* b = METAPAGE_2(env);
@@ -1903,7 +1903,7 @@ mdb_meta_head_r(MDB_env *env) {
h = b;
} else {
/* LY: seems got a collision with mdb_env_sync0() */
mdb_coherent_barrier();
mdbx_coherent_barrier();
head_txnid = env->me_txns->mti_txnid;
mdb_assert(env, a->mm_txnid != b->mm_txnid || head_txnid == 0);
@@ -1927,12 +1927,12 @@ mdb_meta_head_r(MDB_env *env) {
return h;
}
static MDB_INLINE MDB_meta*
static MDBX_INLINE MDB_meta*
mdb_env_meta_flipflop(const MDB_env *env, MDB_meta* meta) {
return (meta == METAPAGE_1(env)) ? METAPAGE_2(env) : METAPAGE_1(env);
}
static MDB_INLINE int
static MDBX_INLINE int
mdb_meta_lt(MDB_meta* a, MDB_meta* b) {
return (META_IS_STEADY(a) == META_IS_STEADY(b))
? a->mm_txnid < b->mm_txnid : META_IS_STEADY(b);
@@ -2019,7 +2019,7 @@ mdb_oomkick(MDB_env *env, txnid_t oldest)
if (rc > 1) {
r->mr_tid = 0;
r->mr_pid = 0;
mdb_coherent_barrier();
mdbx_coherent_barrier();
}
}
}
@@ -2067,10 +2067,10 @@ mdb_page_dirty(MDB_txn *txn, MDB_page *mp)
* @return 0 on success, non-zero on failure.
*/
#define MDB_ALLOC_CACHE 1
#define MDB_ALLOC_GC 2
#define MDB_ALLOC_NEW 4
#define MDB_ALLOC_ALL (MDB_ALLOC_CACHE|MDB_ALLOC_GC|MDB_ALLOC_NEW)
#define MDBX_ALLOC_CACHE 1
#define MDBX_ALLOC_GC 2
#define MDBX_ALLOC_NEW 4
#define MDBX_ALLOC_ALL (MDBX_ALLOC_CACHE|MDBX_ALLOC_GC|MDBX_ALLOC_NEW)
static int
mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
@@ -2086,16 +2086,16 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
MDB_cursor m2;
int found_old;
if (likely(flags & MDB_ALLOC_GC)) {
flags |= env->me_flags & (MDB_COALESCE | MDB_LIFORECLAIM);
if (likely(flags & MDBX_ALLOC_GC)) {
flags |= env->me_flags & (MDBX_COALESCE | MDBX_LIFORECLAIM);
if (unlikely(mc->mc_flags & C_RECLAIMING)) {
/* If mc is updating the freeDB, then the freelist cannot play
* catch-up with itself by growing while trying to save it. */
flags &= ~(MDB_ALLOC_GC | MDB_COALESCE | MDB_LIFORECLAIM);
flags &= ~(MDBX_ALLOC_GC | MDBX_COALESCE | MDBX_LIFORECLAIM);
}
}
if (likely(flags & MDB_ALLOC_CACHE)) {
if (likely(flags & MDBX_ALLOC_CACHE)) {
/* If there are any loose pages, just use them */
assert(mp && num);
if (likely(num == 1 && txn->mt_loose_pgs)) {
@@ -2116,7 +2116,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
for (;;) { /* oom-kick retry loop */
found_old = 0;
for (op = MDB_FIRST;; op = (flags & MDB_LIFORECLAIM) ? MDB_PREV : MDB_NEXT) {
for (op = MDB_FIRST;; op = (flags & MDBX_LIFORECLAIM) ? MDB_PREV : MDB_NEXT) {
MDB_val key, data;
MDB_node *leaf;
pgno_t *idl;
@@ -2124,9 +2124,9 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
/* Seek a big enough contiguous page range. Prefer
* pages at the tail, just truncating the list.
*/
if (likely(flags & MDB_ALLOC_CACHE)
if (likely(flags & MDBX_ALLOC_CACHE)
&& mop_len > n2
&& ( !(flags & MDB_COALESCE) || op == MDB_FIRST)) {
&& ( !(flags & MDBX_COALESCE) || op == MDB_FIRST)) {
i = mop_len;
do {
pgno = mop[i];
@@ -2137,12 +2137,12 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
if (op == MDB_FIRST) { /* 1st iteration */
/* Prepare to fetch more and coalesce */
if (unlikely( !(flags & MDB_ALLOC_GC) ))
if (unlikely( !(flags & MDBX_ALLOC_GC) ))
break;
oldest = env->me_pgoldest;
mdb_cursor_init(&m2, txn, FREE_DBI, NULL);
if (flags & MDB_LIFORECLAIM) {
if (flags & MDBX_LIFORECLAIM) {
if (env->me_pglast > 1) {
/* Continue lookup from env->me_pglast to lower/first */
last = env->me_pglast - 1;
@@ -2167,7 +2167,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
key.mv_size = sizeof(last);
}
if (! (flags & MDB_LIFORECLAIM) ) {
if (! (flags & MDBX_LIFORECLAIM) ) {
/* Do not fetch more if the record will be too recent */
if (op != MDB_FIRST && ++last >= oldest) {
if (!found_old) {
@@ -2181,7 +2181,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
}
rc = mdb_cursor_get(&m2, &key, NULL, op);
if (rc == MDB_NOTFOUND && (flags & MDB_LIFORECLAIM)) {
if (rc == MDB_NOTFOUND && (flags & MDBX_LIFORECLAIM)) {
if (op == MDB_SET_RANGE)
continue;
env->me_pgoldest = mdb_find_oldest(env, NULL);
@@ -2209,13 +2209,13 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
found_old = 1;
}
if (oldest <= last) {
if (flags & MDB_LIFORECLAIM)
if (flags & MDBX_LIFORECLAIM)
continue;
break;
}
}
if (flags & MDB_LIFORECLAIM) {
if (flags & MDBX_LIFORECLAIM) {
if (txn->mt_lifo_reclaimed) {
for(j = txn->mt_lifo_reclaimed[0]; j > 0; --j)
if (txn->mt_lifo_reclaimed[j] == last)
@@ -2230,7 +2230,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
if (unlikely((rc = mdb_node_read(txn, leaf, &data)) != MDB_SUCCESS))
goto fail;
if ((flags & MDB_LIFORECLAIM) && !txn->mt_lifo_reclaimed) {
if ((flags & MDBX_LIFORECLAIM) && !txn->mt_lifo_reclaimed) {
txn->mt_lifo_reclaimed = mdb_midl_alloc(env->me_maxfree_1pg);
if (unlikely(!txn->mt_lifo_reclaimed)) {
rc = ENOMEM;
@@ -2251,13 +2251,13 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
goto fail;
mop = env->me_pghead;
}
if (flags & MDB_LIFORECLAIM) {
if (flags & MDBX_LIFORECLAIM) {
if ((rc = mdb_midl_append(&txn->mt_lifo_reclaimed, last)) != 0)
goto fail;
}
env->me_pglast = last;
if (mdb_debug_enabled(MDB_DBG_EXTRA)) {
if (mdb_debug_enabled(MDBX_DBG_EXTRA)) {
mdb_debug_extra("IDL read txn %zu root %zu num %u, IDL",
last, txn->mt_dbs[FREE_DBI].md_root, i);
for (j = i; j; j--)
@@ -2269,7 +2269,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
mdb_midl_xmerge(mop, idl);
mop_len = mop[0];
if (unlikely((flags & MDB_ALLOC_CACHE) == 0)) {
if (unlikely((flags & MDBX_ALLOC_CACHE) == 0)) {
/* force gc reclaim mode */
return MDB_SUCCESS;
}
@@ -2277,14 +2277,14 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
/* Don't try to coalesce too much. */
if (mop_len > MDB_IDL_UM_SIZE / 2)
break;
if (flags & MDB_COALESCE) {
if (flags & MDBX_COALESCE) {
if (mop_len /* current size */ >= env->me_maxfree_1pg / 2
|| i /* prev size */ >= env->me_maxfree_1pg / 4)
flags &= ~MDB_COALESCE;
flags &= ~MDBX_COALESCE;
}
}
if ((flags & (MDB_COALESCE|MDB_ALLOC_CACHE)) == (MDB_COALESCE|MDB_ALLOC_CACHE)
if ((flags & (MDBX_COALESCE|MDBX_ALLOC_CACHE)) == (MDBX_COALESCE|MDBX_ALLOC_CACHE)
&& mop_len > n2) {
i = mop_len;
do {
@@ -2296,7 +2296,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
i = 0;
rc = MDB_NOTFOUND;
if (likely(flags & MDB_ALLOC_NEW)) {
if (likely(flags & MDBX_ALLOC_NEW)) {
/* Use new pages from the map when nothing suitable in the freeDB */
pgno = txn->mt_next_pgno;
if (likely(pgno + num <= env->me_maxpg))
@@ -2305,7 +2305,7 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
rc = MDB_MAP_FULL;
}
if (flags & MDB_ALLOC_GC) {
if (flags & MDBX_ALLOC_GC) {
MDB_meta* head = mdb_meta_head_w(env);
MDB_meta* tail = mdb_env_meta_flipflop(env, head);
@@ -2328,8 +2328,8 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp, int flags)
oldest, env->me_txns->mt1.mtb.mtb_txnid );
int flags = env->me_flags & MDB_WRITEMAP;
if ((env->me_flags & MDB_UTTERLY_NOSYNC) == MDB_UTTERLY_NOSYNC)
flags |= MDB_UTTERLY_NOSYNC;
if ((env->me_flags & MDBX_UTTERLY_NOSYNC) == MDBX_UTTERLY_NOSYNC)
flags |= MDBX_UTTERLY_NOSYNC;
mdb_assert(env, env->me_sync_pending > 0);
if (mdb_env_sync0(env, flags, &meta) == MDB_SUCCESS) {
@@ -2373,7 +2373,7 @@ done:
txn->mt_next_pgno = pgno + num;
}
if (env->me_flags & MDB_PAGEPERTURB)
if (env->me_flags & MDBX_PAGEPERTURB)
memset(np, 0x71 /* 'q', 113 */, env->me_psize * num);
VALGRIND_MAKE_MEM_UNDEFINED(np, env->me_psize * num);
@@ -2496,7 +2496,7 @@ mdb_page_touch(MDB_cursor *mc)
goto done;
}
if (unlikely((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) ||
(rc = mdb_page_alloc(mc, 1, &np, MDB_ALLOC_ALL))))
(rc = mdb_page_alloc(mc, 1, &np, MDBX_ALLOC_ALL))))
goto fail;
pgno = np->mp_pgno;
mdb_debug("touched db %d page %zu -> %zu", DDBI(mc), mp->mp_pgno, pgno);
@@ -2830,7 +2830,7 @@ mdb_txn_renew0(MDB_txn *txn, unsigned flags)
r->mr_pid = 0;
r->mr_txnid = ~(txnid_t)0;
r->mr_tid = tid;
mdb_coherent_barrier();
mdbx_coherent_barrier();
#ifdef __SANITIZE_THREAD__
pthread_mutex_lock(&tsan_mutex);
#endif
@@ -2856,7 +2856,7 @@ mdb_txn_renew0(MDB_txn *txn, unsigned flags)
MDB_meta *meta = mdb_meta_head_r(txn->mt_env);
txnid_t lead = meta->mm_txnid;
r->mr_txnid = lead;
mdb_coherent_barrier();
mdbx_coherent_barrier();
txnid_t snap = txn->mt_env->me_txns->mti_txnid;
/* LY: Retry on a race, ITS#7970. */
@@ -2890,9 +2890,9 @@ mdb_txn_renew0(MDB_txn *txn, unsigned flags)
#if MDB_DEBUG
if (unlikely(txn->mt_txnid == mdb_debug_edge)) {
if (! mdb_debug_logger)
mdb_runtime_flags |= MDB_DBG_TRACE | MDB_DBG_EXTRA
| MDB_DBG_AUDIT | MDB_DBG_ASSERT;
mdb_debug_log(MDB_DBG_EDGE, __FUNCTION__, __LINE__,
mdb_runtime_flags |= MDBX_DBG_TRACE | MDBX_DBG_EXTRA
| MDBX_DBG_AUDIT | MDBX_DBG_ASSERT;
mdb_debug_log(MDBX_DBG_EDGE, __FUNCTION__, __LINE__,
"on/off edge (txn %zu)", txn->mt_txnid);
}
#endif
@@ -3189,7 +3189,7 @@ mdb_txn_end(MDB_txn *txn, unsigned mode)
pthread_mutex_unlock(&tsan_mutex);
#endif
}
mdb_coherent_barrier();
mdbx_coherent_barrier();
txn->mt_numdbs = 0; /* prevent further DBI activity */
txn->mt_flags |= MDB_TXN_FINISHED;
@@ -3275,7 +3275,7 @@ mdb_txn_abort(MDB_txn *txn)
return mdb_txn_end(txn, MDB_END_ABORT|MDB_END_SLOT|MDB_END_FREE);
}
static MDB_INLINE int
static MDBX_INLINE int
mdb_backlog_size(MDB_txn *txn)
{
int reclaimed = txn->mt_env->me_pghead ? txn->mt_env->me_pghead[0] : 0;
@@ -3289,7 +3289,7 @@ static int
mdb_prep_backlog(MDB_txn *txn, MDB_cursor *mc)
{
/* LY: extra page(s) for b-tree rebalancing */
const int extra = (txn->mt_env->me_flags & MDB_LIFORECLAIM) ? 2 : 1;
const int extra = (txn->mt_env->me_flags & MDBX_LIFORECLAIM) ? 2 : 1;
if (mdb_backlog_size(txn) < mc->mc_db->md_depth + extra) {
int rc = mdb_cursor_touch(mc);
@@ -3297,7 +3297,7 @@ mdb_prep_backlog(MDB_txn *txn, MDB_cursor *mc)
return rc;
while (unlikely(mdb_backlog_size(txn) < extra)) {
rc = mdb_page_alloc(mc, 1, NULL, MDB_ALLOC_GC);
rc = mdb_page_alloc(mc, 1, NULL, MDBX_ALLOC_GC);
if (unlikely(rc)) {
if (unlikely(rc != MDB_NOTFOUND))
return rc;
@@ -3326,7 +3326,7 @@ mdb_freelist_save(MDB_txn *txn)
pgno_t freecnt = 0, *free_pgs, *mop;
ssize_t head_room = 0, total_room = 0, mop_len, clean_limit;
unsigned cleanup_idx = 0, refill_idx = 0;
const int lifo = (env->me_flags & MDB_LIFORECLAIM) != 0;
const int lifo = (env->me_flags & MDBX_LIFORECLAIM) != 0;
mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
@@ -3422,7 +3422,7 @@ again:
mdb_midl_sort(free_pgs);
memcpy(data.mv_data, free_pgs, data.mv_size);
if (mdb_debug_enabled(MDB_DBG_EXTRA)) {
if (mdb_debug_enabled(MDBX_DBG_EXTRA)) {
unsigned i = free_pgs[0];
mdb_debug_extra("IDL write txn %zu root %zu num %u, IDL",
txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i);
@@ -3456,7 +3456,7 @@ again:
if (lifo) {
if (refill_idx > (txn->mt_lifo_reclaimed ? txn->mt_lifo_reclaimed[0] : 0)) {
/* LY: need more just a txn-id for save page list. */
rc = mdb_page_alloc(&mc, 0, NULL, MDB_ALLOC_GC);
rc = mdb_page_alloc(&mc, 0, NULL, MDBX_ALLOC_GC);
if (likely(rc == 0))
/* LY: ok, reclaimed from freedb. */
continue;
@@ -4196,7 +4196,7 @@ mdb_env_sync0(MDB_env *env, unsigned flags, MDB_meta *pending)
pending->mm_datasync_sign = mdb_meta_sign(pending);
} else {
pending->mm_datasync_sign =
(flags & MDB_UTTERLY_NOSYNC) == MDB_UTTERLY_NOSYNC
(flags & MDBX_UTTERLY_NOSYNC) == MDBX_UTTERLY_NOSYNC
? MDB_DATASIGN_NONE : MDB_DATASIGN_WEAK;
}
mdb_debug("writing meta %d, root %zu, txn_id %zu, %s",
@@ -4354,7 +4354,7 @@ mdb_env_map(MDB_env *env, void *addr, size_t usedsize)
#endif
#ifdef MADV_DONTDUMP
if (! (flags & MDB_PAGEPERTURB)) {
if (! (flags & MDBX_PAGEPERTURB)) {
(void) madvise(env->me_map, env->me_mapsize, MADV_DONTDUMP);
}
#endif
@@ -4583,9 +4583,9 @@ void mdb_env_reader_destr(void *ptr)
mdb_ensure(NULL, reader->mr_rthc == rthc);
rthc->rc_reader = NULL;
reader->mr_rthc = NULL;
mdb_compiler_barrier();
mdbx_compiler_barrier();
reader->mr_pid = 0;
mdb_coherent_barrier();
mdbx_coherent_barrier();
}
mdb_ensure(NULL, pthread_mutex_unlock(&mdb_rthc_lock) == 0);
free(rthc);
@@ -4847,9 +4847,9 @@ mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl)
* environment and re-opening it with the new flags.
*/
#define CHANGEABLE (MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC| \
MDB_NOMEMINIT|MDB_COALESCE|MDB_PAGEPERTURB)
MDB_NOMEMINIT|MDBX_COALESCE|MDBX_PAGEPERTURB)
#define CHANGELESS (MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY| \
MDB_WRITEMAP|MDB_NOTLS|MDB_NORDAHEAD|MDB_LIFORECLAIM)
MDB_WRITEMAP|MDB_NOTLS|MDB_NORDAHEAD|MDBX_LIFORECLAIM)
#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS)
# error "Persistent DB flags & env flags overlap, but both go in mm_flags"
@@ -4870,12 +4870,12 @@ mdbx_env_open_ex(MDB_env *env, const char *path, unsigned flags, mode_t mode, in
if (unlikely(env->me_signature != MDBX_ME_SIGNATURE))
return MDB_VERSION_MISMATCH;
#if MDB_LIFORECLAIM
#if MDBX_LIFORECLAIM
/* LY: don't allow LIFO with just NOMETASYNC */
if ((flags & (MDB_NOMETASYNC | MDB_LIFORECLAIM | MDB_NOSYNC))
== (MDB_NOMETASYNC | MDB_LIFORECLAIM))
if ((flags & (MDB_NOMETASYNC | MDBX_LIFORECLAIM | MDB_NOSYNC))
== (MDB_NOMETASYNC | MDBX_LIFORECLAIM))
return EINVAL;
#endif /* MDB_LIFORECLAIM */
#endif /* MDBX_LIFORECLAIM */
if (env->me_fd != INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS)))
return EINVAL;
@@ -4904,7 +4904,7 @@ mdbx_env_open_ex(MDB_env *env, const char *path, unsigned flags, mode_t mode, in
if (flags & MDB_RDONLY) {
/* LY: silently ignore irrelevant flags when we're only getting read access */
flags &= ~(MDB_WRITEMAP | MDB_MAPASYNC | MDB_NOSYNC | MDB_NOMETASYNC
| MDB_COALESCE | MDB_LIFORECLAIM | MDB_NOMEMINIT);
| MDBX_COALESCE | MDBX_LIFORECLAIM | MDB_NOMEMINIT);
} else {
if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX))
&& (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2)))))
@@ -5078,7 +5078,7 @@ mdb_env_close0(MDB_env *env)
reader->mr_pid = 0;
}
}
mdb_coherent_barrier();
mdbx_coherent_barrier();
mdb_ensure(env, pthread_mutex_unlock(&mdb_rthc_lock) == 0);
}
@@ -5740,7 +5740,7 @@ release:
* @param[out] data Updated to point to the node's data.
* @return 0 on success, non-zero on failure.
*/
static MDB_INLINE int
static MDBX_INLINE int
mdb_node_read(MDB_txn *txn, MDB_node *leaf, MDB_val *data)
{
MDB_page *omp; /* overflow page */
@@ -6825,7 +6825,7 @@ prep_subDB:
dummy.md_entries = NUMKEYS(fp);
xdata.mv_size = sizeof(MDB_db);
xdata.mv_data = &dummy;
if ((rc = mdb_page_alloc(mc, 1, &mp, MDB_ALLOC_ALL)))
if ((rc = mdb_page_alloc(mc, 1, &mp, MDBX_ALLOC_ALL)))
return rc;
offset = env->me_psize - olddata.mv_size;
flags |= F_DUPDATA|F_SUBDATA;
@@ -7199,7 +7199,7 @@ mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp)
MDB_page *np;
int rc;
if (unlikely((rc = mdb_page_alloc(mc, num, &np, MDB_ALLOC_ALL))))
if (unlikely((rc = mdb_page_alloc(mc, num, &np, MDBX_ALLOC_ALL))))
return rc;
mdb_debug("allocated new mpage %zu, page size %u",
np->mp_pgno, mc->mc_txn->mt_env->me_psize);
@@ -7231,7 +7231,7 @@ mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp)
* @param[in] data The data for the node.
* @return The number of bytes needed to store the node.
*/
static MDB_INLINE size_t
static MDBX_INLINE size_t
mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data)
{
size_t sz;
@@ -7255,7 +7255,7 @@ mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data)
* @param[in] key The key for the node.
* @return The number of bytes needed to store the node.
*/
static MDB_INLINE size_t
static MDBX_INLINE size_t
mdb_branch_size(MDB_env *env, MDB_val *key)
{
size_t sz;
@@ -9614,12 +9614,12 @@ mdb_env_set_flags(MDB_env *env, unsigned flags, int onoff)
else
flags = env->me_flags & ~flags;
#if MDB_LIFORECLAIM
#if MDBX_LIFORECLAIM
/* LY: don't allow LIFO with just NOMETASYNC */
if ((flags & (MDB_NOMETASYNC | MDB_LIFORECLAIM | MDB_NOSYNC))
== (MDB_NOMETASYNC | MDB_LIFORECLAIM))
if ((flags & (MDB_NOMETASYNC | MDBX_LIFORECLAIM | MDB_NOSYNC))
== (MDB_NOMETASYNC | MDBX_LIFORECLAIM))
return EINVAL;
#endif /* MDB_LIFORECLAIM */
#endif /* MDBX_LIFORECLAIM */
env->me_flags = flags;
mdb_mutex_unlock(env, mutex);
@@ -10370,7 +10370,7 @@ mdb_reader_check0(MDB_env *env, int rlocked, int *dead)
mdb_debug("clear stale reader pid %u txn %zd",
(unsigned) pid, mr[j].mr_txnid);
mr[j].mr_rthc = NULL;
mdb_compiler_barrier();
mdbx_compiler_barrier();
mr[j].mr_pid = 0;
count++;
}
@@ -10453,13 +10453,13 @@ static void mdb_mutex_unlock(MDB_env *env, pthread_mutex_t *mutex) {
#if MDBX_MODE_ENABLED
void __cold
mdbx_env_set_oomfunc(MDB_env *env, MDB_oom_func *oomfunc)
mdbx_env_set_oomfunc(MDB_env *env, MDBX_oom_func *oomfunc)
{
if (likely(env && env->me_signature == MDBX_ME_SIGNATURE))
env->me_oom_func = oomfunc;
}
MDB_oom_func* __cold
MDBX_oom_func* __cold
mdbx_env_get_oomfunc(MDB_env *env)
{
return likely(env && env->me_signature == MDBX_ME_SIGNATURE)
@@ -10469,7 +10469,7 @@ mdbx_env_get_oomfunc(MDB_env *env)
struct mdb_walk_ctx {
MDB_txn *mw_txn;
void *mw_user;
MDB_pgvisitor_func *mw_visitor;
MDBX_pgvisitor_func *mw_visitor;
};
typedef struct mdb_walk_ctx mdb_walk_ctx_t;
@@ -10601,7 +10601,7 @@ mdb_env_walk(mdb_walk_ctx_t *ctx, const char* dbi, pgno_t pg, int flags, int dee
}
int __cold
mdbx_env_pgwalk(MDB_txn *txn, MDB_pgvisitor_func* visitor, void* user)
mdbx_env_pgwalk(MDB_txn *txn, MDBX_pgvisitor_func* visitor, void* user)
{
mdb_walk_ctx_t ctx;
int rc;