mdbx: новые настройки clang-format (косметика).

This commit is contained in:
Леонид Юрьев (Leonid Yuriev) 2024-12-11 21:22:04 +03:00
parent 3c4d019d00
commit 8867c2ddc2
129 changed files with 6727 additions and 12640 deletions

View File

@ -1,3 +1,3 @@
BasedOnStyle: LLVM BasedOnStyle: LLVM
Standard: Cpp11 Standard: c++20
ReflowComments: true ColumnLimit: 120

View File

@ -18,8 +18,7 @@
* <http://www.OpenLDAP.org/license.html>. * <http://www.OpenLDAP.org/license.html>.
*/ */
#if (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) && \ #if (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) && !defined(__USE_MINGW_ANSI_STDIO)
!defined(__USE_MINGW_ANSI_STDIO)
#define __USE_MINGW_ANSI_STDIO 1 #define __USE_MINGW_ANSI_STDIO 1
#endif /* MinGW */ #endif /* MinGW */
@ -59,33 +58,23 @@ int main(int argc, char *argv[]) {
pagesize_min, pagesize_max, pagesize_default); pagesize_min, pagesize_max, pagesize_default);
printf("\tKey size: minimum %zu, maximum ≈¼ pagesize (%zu bytes for default" printf("\tKey size: minimum %zu, maximum ≈¼ pagesize (%zu bytes for default"
" %zuK pagesize, %zu bytes for %zuK pagesize).\n", " %zuK pagesize, %zu bytes for %zuK pagesize).\n",
(size_t)0, mdbx_limits_keysize_max(-1, MDBX_DB_DEFAULTS), (size_t)0, mdbx_limits_keysize_max(-1, MDBX_DB_DEFAULTS), pagesize_default / 1024,
pagesize_default / 1024, mdbx_limits_keysize_max(pagesize_max, MDBX_DB_DEFAULTS), pagesize_max / 1024);
mdbx_limits_keysize_max(pagesize_max, MDBX_DB_DEFAULTS),
pagesize_max / 1024);
printf("\tValue size: minimum %zu, maximum %zu (0x%08zX) bytes for maps," printf("\tValue size: minimum %zu, maximum %zu (0x%08zX) bytes for maps,"
" ≈¼ pagesize for multimaps (%zu bytes for default %zuK pagesize," " ≈¼ pagesize for multimaps (%zu bytes for default %zuK pagesize,"
" %zu bytes for %zuK pagesize).\n", " %zu bytes for %zuK pagesize).\n",
(size_t)0, mdbx_limits_valsize_max(pagesize_min, MDBX_DB_DEFAULTS), (size_t)0, mdbx_limits_valsize_max(pagesize_min, MDBX_DB_DEFAULTS),
mdbx_limits_valsize_max(pagesize_min, MDBX_DB_DEFAULTS), mdbx_limits_valsize_max(pagesize_min, MDBX_DB_DEFAULTS), mdbx_limits_valsize_max(-1, MDBX_DUPSORT),
mdbx_limits_valsize_max(-1, MDBX_DUPSORT), pagesize_default / 1024, pagesize_default / 1024, mdbx_limits_valsize_max(pagesize_max, MDBX_DUPSORT), pagesize_max / 1024);
mdbx_limits_valsize_max(pagesize_max, MDBX_DUPSORT),
pagesize_max / 1024);
printf("\tWrite transaction size: up to %zu (0x%zX) pages (%f %s for default " printf("\tWrite transaction size: up to %zu (0x%zX) pages (%f %s for default "
"%zuK pagesize, %f %s for %zuK pagesize).\n", "%zuK pagesize, %f %s for %zuK pagesize).\n",
mdbx_limits_txnsize_max(pagesize_min) / pagesize_min, mdbx_limits_txnsize_max(pagesize_min) / pagesize_min, mdbx_limits_txnsize_max(pagesize_min) / pagesize_min,
mdbx_limits_txnsize_max(pagesize_min) / pagesize_min, mdbx_limits_txnsize_max(-1) / scale_factor, scale_unit, pagesize_default / 1024,
mdbx_limits_txnsize_max(-1) / scale_factor, scale_unit, mdbx_limits_txnsize_max(pagesize_max) / scale_factor, scale_unit, pagesize_max / 1024);
pagesize_default / 1024,
mdbx_limits_txnsize_max(pagesize_max) / scale_factor, scale_unit,
pagesize_max / 1024);
printf("\tDatabase size: up to %zu pages (%f %s for default %zuK " printf("\tDatabase size: up to %zu pages (%f %s for default %zuK "
"pagesize, %f %s for %zuK pagesize).\n", "pagesize, %f %s for %zuK pagesize).\n",
mdbx_limits_dbsize_max(pagesize_min) / pagesize_min, mdbx_limits_dbsize_max(pagesize_min) / pagesize_min, mdbx_limits_dbsize_max(-1) / scale_factor, scale_unit,
mdbx_limits_dbsize_max(-1) / scale_factor, scale_unit, pagesize_default / 1024, mdbx_limits_dbsize_max(pagesize_max) / scale_factor, scale_unit, pagesize_max / 1024);
pagesize_default / 1024,
mdbx_limits_dbsize_max(pagesize_max) / scale_factor, scale_unit,
pagesize_max / 1024);
printf("\tMaximum sub-databases: %u.\n", MDBX_MAX_DBI); printf("\tMaximum sub-databases: %u.\n", MDBX_MAX_DBI);
printf("-----\n"); printf("-----\n");
@ -94,8 +83,7 @@ int main(int argc, char *argv[]) {
fprintf(stderr, "mdbx_env_create: (%d) %s\n", rc, mdbx_strerror(rc)); fprintf(stderr, "mdbx_env_create: (%d) %s\n", rc, mdbx_strerror(rc));
goto bailout; goto bailout;
} }
rc = mdbx_env_open(env, "./example-db", MDBX_NOSUBDIR | MDBX_LIFORECLAIM, rc = mdbx_env_open(env, "./example-db", MDBX_NOSUBDIR | MDBX_LIFORECLAIM, 0664);
0664);
if (rc != MDBX_SUCCESS) { if (rc != MDBX_SUCCESS) {
fprintf(stderr, "mdbx_env_open: (%d) %s\n", rc, mdbx_strerror(rc)); fprintf(stderr, "mdbx_env_open: (%d) %s\n", rc, mdbx_strerror(rc));
goto bailout; goto bailout;
@ -143,9 +131,8 @@ int main(int argc, char *argv[]) {
int found = 0; int found = 0;
while ((rc = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT)) == 0) { while ((rc = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT)) == 0) {
printf("key: %p %.*s, data: %p %.*s\n", key.iov_base, (int)key.iov_len, printf("key: %p %.*s, data: %p %.*s\n", key.iov_base, (int)key.iov_len, (char *)key.iov_base, data.iov_base,
(char *)key.iov_base, data.iov_base, (int)data.iov_len, (int)data.iov_len, (char *)data.iov_base);
(char *)data.iov_base);
found += 1; found += 1;
} }
if (rc != MDBX_NOTFOUND || found == 0) { if (rc != MDBX_NOTFOUND || found == 0) {

735
mdbx.h

File diff suppressed because it is too large Load Diff

3149
mdbx.h++

File diff suppressed because it is too large Load Diff

View File

@ -14,30 +14,24 @@ MDBX_cursor *mdbx_cursor_create(void *context) {
couple->userctx = context; couple->userctx = context;
couple->outer.top_and_flags = z_poor_mark; couple->outer.top_and_flags = z_poor_mark;
couple->inner.cursor.top_and_flags = z_poor_mark | z_inner; couple->inner.cursor.top_and_flags = z_poor_mark | z_inner;
VALGRIND_MAKE_MEM_DEFINED(&couple->outer.backup, VALGRIND_MAKE_MEM_DEFINED(&couple->outer.backup, sizeof(couple->outer.backup));
sizeof(couple->outer.backup));
VALGRIND_MAKE_MEM_DEFINED(&couple->outer.tree, sizeof(couple->outer.tree)); VALGRIND_MAKE_MEM_DEFINED(&couple->outer.tree, sizeof(couple->outer.tree));
VALGRIND_MAKE_MEM_DEFINED(&couple->outer.clc, sizeof(couple->outer.clc)); VALGRIND_MAKE_MEM_DEFINED(&couple->outer.clc, sizeof(couple->outer.clc));
VALGRIND_MAKE_MEM_DEFINED(&couple->outer.dbi_state, VALGRIND_MAKE_MEM_DEFINED(&couple->outer.dbi_state, sizeof(couple->outer.dbi_state));
sizeof(couple->outer.dbi_state)); VALGRIND_MAKE_MEM_DEFINED(&couple->outer.subcur, sizeof(couple->outer.subcur));
VALGRIND_MAKE_MEM_DEFINED(&couple->outer.subcur,
sizeof(couple->outer.subcur));
VALGRIND_MAKE_MEM_DEFINED(&couple->outer.txn, sizeof(couple->outer.txn)); VALGRIND_MAKE_MEM_DEFINED(&couple->outer.txn, sizeof(couple->outer.txn));
return &couple->outer; return &couple->outer;
} }
int mdbx_cursor_renew(const MDBX_txn *txn, MDBX_cursor *mc) { int mdbx_cursor_renew(const MDBX_txn *txn, MDBX_cursor *mc) {
return likely(mc) return likely(mc) ? mdbx_cursor_bind(txn, mc, (kvx_t *)mc->clc - txn->env->kvs) : LOG_IFERR(MDBX_EINVAL);
? mdbx_cursor_bind(txn, mc, (kvx_t *)mc->clc - txn->env->kvs)
: LOG_IFERR(MDBX_EINVAL);
} }
int mdbx_cursor_reset(MDBX_cursor *mc) { int mdbx_cursor_reset(MDBX_cursor *mc) {
if (unlikely(!mc)) if (unlikely(!mc))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_ready4dispose && if (unlikely(mc->signature != cur_signature_ready4dispose && mc->signature != cur_signature_live))
mc->signature != cur_signature_live))
return LOG_IFERR(MDBX_EBADSIGN); return LOG_IFERR(MDBX_EBADSIGN);
cursor_couple_t *couple = (cursor_couple_t *)mc; cursor_couple_t *couple = (cursor_couple_t *)mc;
@ -50,8 +44,7 @@ int mdbx_cursor_bind(const MDBX_txn *txn, MDBX_cursor *mc, MDBX_dbi dbi) {
if (unlikely(!mc)) if (unlikely(!mc))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_ready4dispose && if (unlikely(mc->signature != cur_signature_ready4dispose && mc->signature != cur_signature_live))
mc->signature != cur_signature_live))
return LOG_IFERR(MDBX_EBADSIGN); return LOG_IFERR(MDBX_EBADSIGN);
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
@ -68,16 +61,14 @@ int mdbx_cursor_bind(const MDBX_txn *txn, MDBX_cursor *mc, MDBX_dbi dbi) {
if (unlikely(mc->backup)) /* Cursor from parent transaction */ { if (unlikely(mc->backup)) /* Cursor from parent transaction */ {
cASSERT(mc, mc->signature == cur_signature_live); cASSERT(mc, mc->signature == cur_signature_live);
if (unlikely(cursor_dbi(mc) != dbi || if (unlikely(cursor_dbi(mc) != dbi ||
/* paranoia */ mc->signature != cur_signature_live || /* paranoia */ mc->signature != cur_signature_live || mc->txn != txn))
mc->txn != txn))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
cASSERT(mc, mc->tree == &txn->dbs[dbi]); cASSERT(mc, mc->tree == &txn->dbs[dbi]);
cASSERT(mc, mc->clc == &txn->env->kvs[dbi].clc); cASSERT(mc, mc->clc == &txn->env->kvs[dbi].clc);
cASSERT(mc, cursor_dbi(mc) == dbi); cASSERT(mc, cursor_dbi(mc) == dbi);
return likely(cursor_dbi(mc) == dbi && return likely(cursor_dbi(mc) == dbi &&
/* paranoia */ mc->signature == cur_signature_live && /* paranoia */ mc->signature == cur_signature_live && mc->txn == txn)
mc->txn == txn)
? MDBX_SUCCESS ? MDBX_SUCCESS
: LOG_IFERR(MDBX_EINVAL) /* Disallow change DBI in nested : LOG_IFERR(MDBX_EINVAL) /* Disallow change DBI in nested
transactions */ transactions */
@ -105,9 +96,7 @@ int mdbx_cursor_unbind(MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return (mc->signature == cur_signature_ready4dispose) return (mc->signature == cur_signature_ready4dispose) ? MDBX_SUCCESS : LOG_IFERR(MDBX_EBADSIGN);
? MDBX_SUCCESS
: LOG_IFERR(MDBX_EBADSIGN);
if (unlikely(mc->backup)) /* Cursor from parent transaction */ if (unlikely(mc->backup)) /* Cursor from parent transaction */
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -116,9 +105,7 @@ int mdbx_cursor_unbind(MDBX_cursor *mc) {
cASSERT(mc, mc->signature == cur_signature_live); cASSERT(mc, mc->signature == cur_signature_live);
cASSERT(mc, !mc->backup); cASSERT(mc, !mc->backup);
if (unlikely(!mc->txn || mc->txn->signature != txn_signature)) { if (unlikely(!mc->txn || mc->txn->signature != txn_signature)) {
ERROR("Wrong cursor's transaction %p 0x%x", ERROR("Wrong cursor's transaction %p 0x%x", __Wpedantic_format_voidptr(mc->txn), mc->txn ? mc->txn->signature : 0);
__Wpedantic_format_voidptr(mc->txn),
mc->txn ? mc->txn->signature : 0);
return LOG_IFERR(MDBX_PROBLEM); return LOG_IFERR(MDBX_PROBLEM);
} }
if (mc->next != mc) { if (mc->next != mc) {
@ -160,8 +147,7 @@ int mdbx_cursor_open(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_cursor **ret) {
void mdbx_cursor_close(MDBX_cursor *mc) { void mdbx_cursor_close(MDBX_cursor *mc) {
if (likely(mc)) { if (likely(mc)) {
ENSURE(nullptr, mc->signature == cur_signature_live || ENSURE(nullptr, mc->signature == cur_signature_live || mc->signature == cur_signature_ready4dispose);
mc->signature == cur_signature_ready4dispose);
MDBX_txn *const txn = mc->txn; MDBX_txn *const txn = mc->txn;
if (!mc->backup) { if (!mc->backup) {
mc->txn = nullptr; mc->txn = nullptr;
@ -194,9 +180,7 @@ int mdbx_cursor_copy(const MDBX_cursor *src, MDBX_cursor *dest) {
if (unlikely(!src)) if (unlikely(!src))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(src->signature != cur_signature_live)) if (unlikely(src->signature != cur_signature_live))
return LOG_IFERR((src->signature == cur_signature_ready4dispose) return LOG_IFERR((src->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = mdbx_cursor_bind(src->txn, dest, cursor_dbi(src)); int rc = mdbx_cursor_bind(src->txn, dest, cursor_dbi(src));
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -229,8 +213,7 @@ int mdbx_txn_release_all_cursors(const MDBX_txn *txn, bool unbind) {
TXN_FOREACH_DBI_FROM(txn, i, MAIN_DBI) { TXN_FOREACH_DBI_FROM(txn, i, MAIN_DBI) {
while (txn->cursors[i]) { while (txn->cursors[i]) {
MDBX_cursor *mc = txn->cursors[i]; MDBX_cursor *mc = txn->cursors[i];
ENSURE(nullptr, mc->signature == cur_signature_live && ENSURE(nullptr, mc->signature == cur_signature_live && (mc->next != mc) && !mc->backup);
(mc->next != mc) && !mc->backup);
rc = likely(rc < INT_MAX) ? rc + 1 : rc; rc = likely(rc < INT_MAX) ? rc + 1 : rc;
txn->cursors[i] = mc->next; txn->cursors[i] = mc->next;
mc->next = mc; mc->next = mc;
@ -250,8 +233,7 @@ int mdbx_txn_release_all_cursors(const MDBX_txn *txn, bool unbind) {
return rc; return rc;
} }
int mdbx_cursor_compare(const MDBX_cursor *l, const MDBX_cursor *r, int mdbx_cursor_compare(const MDBX_cursor *l, const MDBX_cursor *r, bool ignore_multival) {
bool ignore_multival) {
const int incomparable = INT16_MAX + 1; const int incomparable = INT16_MAX + 1;
if (unlikely(!l)) if (unlikely(!l))
return r ? -incomparable * 9 : 0; return r ? -incomparable * 9 : 0;
@ -267,8 +249,7 @@ int mdbx_cursor_compare(const MDBX_cursor *l, const MDBX_cursor *r,
if (l->txn->env != r->txn->env) if (l->txn->env != r->txn->env)
return (l->txn->env > r->txn->env) ? incomparable * 7 : -incomparable * 7; return (l->txn->env > r->txn->env) ? incomparable * 7 : -incomparable * 7;
if (l->txn->txnid != r->txn->txnid) if (l->txn->txnid != r->txn->txnid)
return (l->txn->txnid > r->txn->txnid) ? incomparable * 6 return (l->txn->txnid > r->txn->txnid) ? incomparable * 6 : -incomparable * 6;
: -incomparable * 6;
return (l->clc > r->clc) ? incomparable * 5 : -incomparable * 5; return (l->clc > r->clc) ? incomparable * 5 : -incomparable * 5;
} }
assert(cursor_dbi(l) == cursor_dbi(r)); assert(cursor_dbi(l) == cursor_dbi(r));
@ -333,9 +314,7 @@ int mdbx_cursor_count(const MDBX_cursor *mc, size_t *countp) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = check_txn(mc->txn, MDBX_TXN_BLOCKED); int rc = check_txn(mc->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -349,9 +328,8 @@ int mdbx_cursor_count(const MDBX_cursor *mc, size_t *countp) {
const page_t *mp = mc->pg[mc->top]; const page_t *mp = mc->pg[mc->top];
const node_t *node = page_node(mp, mc->ki[mc->top]); const node_t *node = page_node(mp, mc->ki[mc->top]);
cASSERT(mc, node_flags(node) & N_DUP); cASSERT(mc, node_flags(node) & N_DUP);
*countp = unlikely(mc->subcur->nested_tree.items > PTRDIFF_MAX) *countp =
? PTRDIFF_MAX unlikely(mc->subcur->nested_tree.items > PTRDIFF_MAX) ? PTRDIFF_MAX : (size_t)mc->subcur->nested_tree.items;
: (size_t)mc->subcur->nested_tree.items;
} }
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
@ -362,9 +340,7 @@ int mdbx_cursor_on_first(const MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
for (intptr_t i = 0; i <= mc->top; ++i) { for (intptr_t i = 0; i <= mc->top; ++i) {
if (mc->ki[i]) if (mc->ki[i])
@ -379,9 +355,7 @@ int mdbx_cursor_on_first_dup(const MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
if (is_filled(mc) && mc->subcur) { if (is_filled(mc) && mc->subcur) {
mc = &mc->subcur->cursor; mc = &mc->subcur->cursor;
@ -399,9 +373,7 @@ int mdbx_cursor_on_last(const MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
for (intptr_t i = 0; i <= mc->top; ++i) { for (intptr_t i = 0; i <= mc->top; ++i) {
size_t nkeys = page_numkeys(mc->pg[i]); size_t nkeys = page_numkeys(mc->pg[i]);
@ -417,9 +389,7 @@ int mdbx_cursor_on_last_dup(const MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
if (is_filled(mc) && mc->subcur) { if (is_filled(mc) && mc->subcur) {
mc = &mc->subcur->cursor; mc = &mc->subcur->cursor;
@ -438,22 +408,17 @@ int mdbx_cursor_eof(const MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
return is_eof(mc) ? MDBX_RESULT_TRUE : MDBX_RESULT_FALSE; return is_eof(mc) ? MDBX_RESULT_TRUE : MDBX_RESULT_FALSE;
} }
int mdbx_cursor_get(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, int mdbx_cursor_get(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op) {
MDBX_cursor_op op) {
if (unlikely(mc == nullptr)) if (unlikely(mc == nullptr))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = check_txn(mc->txn, MDBX_TXN_BLOCKED); int rc = check_txn(mc->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -465,8 +430,7 @@ int mdbx_cursor_get(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
return LOG_IFERR(cursor_ops(mc, key, data, op)); return LOG_IFERR(cursor_ops(mc, key, data, op));
} }
__hot static int scan_confinue(MDBX_cursor *mc, MDBX_predicate_func *predicate, __hot static int scan_confinue(MDBX_cursor *mc, MDBX_predicate_func *predicate, void *context, void *arg, MDBX_val *key,
void *context, void *arg, MDBX_val *key,
MDBX_val *value, MDBX_cursor_op turn_op) { MDBX_val *value, MDBX_cursor_op turn_op) {
int rc; int rc;
switch (turn_op) { switch (turn_op) {
@ -528,22 +492,19 @@ __hot static int scan_confinue(MDBX_cursor *mc, MDBX_predicate_func *predicate,
} }
} }
int mdbx_cursor_scan(MDBX_cursor *mc, MDBX_predicate_func *predicate, int mdbx_cursor_scan(MDBX_cursor *mc, MDBX_predicate_func *predicate, void *context, MDBX_cursor_op start_op,
void *context, MDBX_cursor_op start_op,
MDBX_cursor_op turn_op, void *arg) { MDBX_cursor_op turn_op, void *arg) {
if (unlikely(!predicate)) if (unlikely(!predicate))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
const unsigned valid_start_mask = const unsigned valid_start_mask = 1 << MDBX_FIRST | 1 << MDBX_FIRST_DUP | 1 << MDBX_LAST | 1 << MDBX_LAST_DUP |
1 << MDBX_FIRST | 1 << MDBX_FIRST_DUP | 1 << MDBX_LAST | 1 << MDBX_GET_CURRENT | 1 << MDBX_GET_MULTIPLE;
1 << MDBX_LAST_DUP | 1 << MDBX_GET_CURRENT | 1 << MDBX_GET_MULTIPLE;
if (unlikely(start_op > 30 || ((1 << start_op) & valid_start_mask) == 0)) if (unlikely(start_op > 30 || ((1 << start_op) & valid_start_mask) == 0))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
const unsigned valid_turn_mask = const unsigned valid_turn_mask = 1 << MDBX_NEXT | 1 << MDBX_NEXT_DUP | 1 << MDBX_NEXT_NODUP | 1 << MDBX_PREV |
1 << MDBX_NEXT | 1 << MDBX_NEXT_DUP | 1 << MDBX_NEXT_NODUP | 1 << MDBX_PREV_DUP | 1 << MDBX_PREV_NODUP | 1 << MDBX_NEXT_MULTIPLE |
1 << MDBX_PREV | 1 << MDBX_PREV_DUP | 1 << MDBX_PREV_NODUP | 1 << MDBX_PREV_MULTIPLE;
1 << MDBX_NEXT_MULTIPLE | 1 << MDBX_PREV_MULTIPLE;
if (unlikely(turn_op > 30 || ((1 << turn_op) & valid_turn_mask) == 0)) if (unlikely(turn_op > 30 || ((1 << turn_op) & valid_turn_mask) == 0))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -551,28 +512,22 @@ int mdbx_cursor_scan(MDBX_cursor *mc, MDBX_predicate_func *predicate,
int rc = mdbx_cursor_get(mc, &key, &value, start_op); int rc = mdbx_cursor_get(mc, &key, &value, start_op);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
return LOG_IFERR( return LOG_IFERR(scan_confinue(mc, predicate, context, arg, &key, &value, turn_op));
scan_confinue(mc, predicate, context, arg, &key, &value, turn_op));
} }
int mdbx_cursor_scan_from(MDBX_cursor *mc, MDBX_predicate_func *predicate, int mdbx_cursor_scan_from(MDBX_cursor *mc, MDBX_predicate_func *predicate, void *context, MDBX_cursor_op from_op,
void *context, MDBX_cursor_op from_op, MDBX_val *key, MDBX_val *key, MDBX_val *value, MDBX_cursor_op turn_op, void *arg) {
MDBX_val *value, MDBX_cursor_op turn_op, void *arg) {
if (unlikely(!predicate || !key)) if (unlikely(!predicate || !key))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
const unsigned valid_start_mask = const unsigned valid_start_mask = 1 << MDBX_GET_BOTH | 1 << MDBX_GET_BOTH_RANGE | 1 << MDBX_SET_KEY |
1 << MDBX_GET_BOTH | 1 << MDBX_GET_BOTH_RANGE | 1 << MDBX_SET_KEY | 1 << MDBX_GET_MULTIPLE | 1 << MDBX_SET_LOWERBOUND | 1 << MDBX_SET_UPPERBOUND;
1 << MDBX_GET_MULTIPLE | 1 << MDBX_SET_LOWERBOUND | if (unlikely(from_op < MDBX_TO_KEY_LESSER_THAN && ((1 << from_op) & valid_start_mask) == 0))
1 << MDBX_SET_UPPERBOUND;
if (unlikely(from_op < MDBX_TO_KEY_LESSER_THAN &&
((1 << from_op) & valid_start_mask) == 0))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
const unsigned valid_turn_mask = const unsigned valid_turn_mask = 1 << MDBX_NEXT | 1 << MDBX_NEXT_DUP | 1 << MDBX_NEXT_NODUP | 1 << MDBX_PREV |
1 << MDBX_NEXT | 1 << MDBX_NEXT_DUP | 1 << MDBX_NEXT_NODUP | 1 << MDBX_PREV_DUP | 1 << MDBX_PREV_NODUP | 1 << MDBX_NEXT_MULTIPLE |
1 << MDBX_PREV | 1 << MDBX_PREV_DUP | 1 << MDBX_PREV_NODUP | 1 << MDBX_PREV_MULTIPLE;
1 << MDBX_NEXT_MULTIPLE | 1 << MDBX_PREV_MULTIPLE;
if (unlikely(turn_op > 30 || ((1 << turn_op) & valid_turn_mask) == 0)) if (unlikely(turn_op > 30 || ((1 << turn_op) & valid_turn_mask) == 0))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -588,12 +543,10 @@ int mdbx_cursor_scan_from(MDBX_cursor *mc, MDBX_predicate_func *predicate,
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
return LOG_IFERR( return LOG_IFERR(scan_confinue(mc, predicate, context, arg, key, value, turn_op));
scan_confinue(mc, predicate, context, arg, key, value, turn_op));
} }
int mdbx_cursor_get_batch(MDBX_cursor *mc, size_t *count, MDBX_val *pairs, int mdbx_cursor_get_batch(MDBX_cursor *mc, size_t *count, MDBX_val *pairs, size_t limit, MDBX_cursor_op op) {
size_t limit, MDBX_cursor_op op) {
if (unlikely(!count)) if (unlikely(!count))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -602,9 +555,7 @@ int mdbx_cursor_get_batch(MDBX_cursor *mc, size_t *count, MDBX_val *pairs,
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = check_txn(mc->txn, MDBX_TXN_BLOCKED); int rc = check_txn(mc->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -661,11 +612,9 @@ int mdbx_cursor_get_batch(MDBX_cursor *mc, size_t *count, MDBX_val *pairs,
} }
mp = mc->pg[mc->top]; mp = mc->pg[mc->top];
DEBUG("next page is %" PRIaPGNO ", key index %u", mp->pgno, DEBUG("next page is %" PRIaPGNO ", key index %u", mp->pgno, mc->ki[mc->top]);
mc->ki[mc->top]);
if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) { if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) {
ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", mp->pgno, mp->flags);
mp->pgno, mp->flags);
rc = MDBX_CORRUPTED; rc = MDBX_CORRUPTED;
goto bailout; goto bailout;
} }
@ -686,8 +635,7 @@ int mdbx_cursor_set_userctx(MDBX_cursor *mc, void *ctx) {
if (unlikely(!mc)) if (unlikely(!mc))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_ready4dispose && if (unlikely(mc->signature != cur_signature_ready4dispose && mc->signature != cur_signature_live))
mc->signature != cur_signature_live))
return LOG_IFERR(MDBX_EBADSIGN); return LOG_IFERR(MDBX_EBADSIGN);
cursor_couple_t *couple = container_of(mc, cursor_couple_t, outer); cursor_couple_t *couple = container_of(mc, cursor_couple_t, outer);
@ -699,8 +647,7 @@ void *mdbx_cursor_get_userctx(const MDBX_cursor *mc) {
if (unlikely(!mc)) if (unlikely(!mc))
return nullptr; return nullptr;
if (unlikely(mc->signature != cur_signature_ready4dispose && if (unlikely(mc->signature != cur_signature_ready4dispose && mc->signature != cur_signature_live))
mc->signature != cur_signature_live))
return nullptr; return nullptr;
cursor_couple_t *couple = container_of(mc, cursor_couple_t, outer); cursor_couple_t *couple = container_of(mc, cursor_couple_t, outer);
@ -726,15 +673,12 @@ MDBX_dbi mdbx_cursor_dbi(const MDBX_cursor *mc) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, MDBX_put_flags_t flags) {
MDBX_put_flags_t flags) {
if (unlikely(mc == nullptr || key == nullptr || data == nullptr)) if (unlikely(mc == nullptr || key == nullptr || data == nullptr))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = check_txn_rw(mc->txn, MDBX_TXN_BLOCKED); int rc = check_txn_rw(mc->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -754,12 +698,9 @@ int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
const size_t dcount = data[1].iov_len; const size_t dcount = data[1].iov_len;
if (unlikely(dcount < 2 || data->iov_len == 0)) if (unlikely(dcount < 2 || data->iov_len == 0))
return LOG_IFERR(MDBX_BAD_VALSIZE); return LOG_IFERR(MDBX_BAD_VALSIZE);
if (unlikely(mc->tree->dupfix_size != data->iov_len) && if (unlikely(mc->tree->dupfix_size != data->iov_len) && mc->tree->dupfix_size)
mc->tree->dupfix_size)
return LOG_IFERR(MDBX_BAD_VALSIZE); return LOG_IFERR(MDBX_BAD_VALSIZE);
if (unlikely(dcount > if (unlikely(dcount > MAX_MAPSIZE / 2 / (BRANCH_NODE_MAX(MDBX_MAX_PAGESIZE) - NODESIZE))) {
MAX_MAPSIZE / 2 /
(BRANCH_NODE_MAX(MDBX_MAX_PAGESIZE) - NODESIZE))) {
/* checking for multiplication overflow */ /* checking for multiplication overflow */
if (unlikely(dcount > MAX_MAPSIZE / 2 / data->iov_len)) if (unlikely(dcount > MAX_MAPSIZE / 2 / data->iov_len))
return LOG_IFERR(MDBX_TOO_LARGE); return LOG_IFERR(MDBX_TOO_LARGE);
@ -767,15 +708,13 @@ int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
} }
if (flags & MDBX_RESERVE) { if (flags & MDBX_RESERVE) {
if (unlikely(mc->tree->flags & (MDBX_DUPSORT | MDBX_REVERSEDUP | if (unlikely(mc->tree->flags & (MDBX_DUPSORT | MDBX_REVERSEDUP | MDBX_INTEGERDUP | MDBX_DUPFIXED)))
MDBX_INTEGERDUP | MDBX_DUPFIXED)))
return LOG_IFERR(MDBX_INCOMPATIBLE); return LOG_IFERR(MDBX_INCOMPATIBLE);
data->iov_base = nullptr; data->iov_base = nullptr;
} }
if (unlikely(mc->txn->flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) if (unlikely(mc->txn->flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED)))
return LOG_IFERR((mc->txn->flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS return LOG_IFERR((mc->txn->flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN);
: MDBX_BAD_TXN);
return LOG_IFERR(cursor_put_checklen(mc, key, data, flags)); return LOG_IFERR(cursor_put_checklen(mc, key, data, flags));
} }
@ -785,9 +724,7 @@ int mdbx_cursor_del(MDBX_cursor *mc, MDBX_put_flags_t flags) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = check_txn_rw(mc->txn, MDBX_TXN_BLOCKED); int rc = check_txn_rw(mc->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -804,9 +741,7 @@ __cold int mdbx_cursor_ignord(MDBX_cursor *mc) {
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(mc->signature != cur_signature_live)) if (unlikely(mc->signature != cur_signature_live))
return LOG_IFERR((mc->signature == cur_signature_ready4dispose) return LOG_IFERR((mc->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
mc->checking |= z_ignord; mc->checking |= z_ignord;
if (mc->subcur) if (mc->subcur)

View File

@ -7,8 +7,7 @@ __cold static intptr_t reasonable_db_maxsize(void) {
static intptr_t cached_result; static intptr_t cached_result;
if (cached_result == 0) { if (cached_result == 0) {
intptr_t pagesize, total_ram_pages; intptr_t pagesize, total_ram_pages;
if (unlikely(mdbx_get_sysraminfo(&pagesize, &total_ram_pages, nullptr) != if (unlikely(mdbx_get_sysraminfo(&pagesize, &total_ram_pages, nullptr) != MDBX_SUCCESS))
MDBX_SUCCESS))
/* the 32-bit limit is good enough for fallback */ /* the 32-bit limit is good enough for fallback */
return cached_result = MAX_MAPSIZE32; return cached_result = MAX_MAPSIZE32;
@ -24,8 +23,7 @@ __cold static intptr_t reasonable_db_maxsize(void) {
const size_t floor = floor_powerof2(cached_result, unit); const size_t floor = floor_powerof2(cached_result, unit);
const size_t ceil = ceil_powerof2(cached_result, unit); const size_t ceil = ceil_powerof2(cached_result, unit);
const size_t threshold = (size_t)cached_result >> 4; const size_t threshold = (size_t)cached_result >> 4;
const bool down = const bool down = cached_result - floor < ceil - cached_result || ceil > MAX_MAPSIZE;
cached_result - floor < ceil - cached_result || ceil > MAX_MAPSIZE;
if (threshold < (down ? cached_result - floor : ceil - cached_result)) if (threshold < (down ? cached_result - floor : ceil - cached_result))
break; break;
cached_result = down ? floor : ceil; cached_result = down ? floor : ceil;
@ -39,14 +37,12 @@ __cold static int check_alternative_lck_absent(const pathchar_t *lck_pathname) {
if (unlikely(err != MDBX_RESULT_FALSE)) { if (unlikely(err != MDBX_RESULT_FALSE)) {
if (err == MDBX_RESULT_TRUE) if (err == MDBX_RESULT_TRUE)
err = MDBX_DUPLICATED_CLK; err = MDBX_DUPLICATED_CLK;
ERROR("Alternative/Duplicate LCK-file '%" MDBX_PRIsPATH "' error %d", ERROR("Alternative/Duplicate LCK-file '%" MDBX_PRIsPATH "' error %d", lck_pathname, err);
lck_pathname, err);
} }
return err; return err;
} }
__cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname, __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname, const mdbx_mode_t mode) {
const mdbx_mode_t mode) {
memset(&env->pathname, 0, sizeof(env->pathname)); memset(&env->pathname, 0, sizeof(env->pathname));
if (unlikely(!pathname || !*pathname)) if (unlikely(!pathname || !*pathname))
return MDBX_EINVAL; return MDBX_EINVAL;
@ -63,8 +59,7 @@ __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname,
return rc; return rc;
/* auto-create directory if requested */ /* auto-create directory if requested */
if ((env->flags & MDBX_NOSUBDIR) == 0 && if ((env->flags & MDBX_NOSUBDIR) == 0 && !CreateDirectoryW(pathname, nullptr)) {
!CreateDirectoryW(pathname, nullptr)) {
rc = GetLastError(); rc = GetLastError();
if (rc != ERROR_ALREADY_EXISTS) if (rc != ERROR_ALREADY_EXISTS)
return rc; return rc;
@ -87,8 +82,7 @@ __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname,
/* auto-create directory if requested */ /* auto-create directory if requested */
const mdbx_mode_t dir_mode = const mdbx_mode_t dir_mode =
(/* inherit read/write permissions for group and others */ mode & (/* inherit read/write permissions for group and others */ mode & (S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) |
(S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) |
/* always add read/write/search for owner */ S_IRWXU | /* always add read/write/search for owner */ S_IRWXU |
((mode & S_IRGRP) ? /* +search if readable by group */ S_IXGRP : 0) | ((mode & S_IRGRP) ? /* +search if readable by group */ S_IXGRP : 0) |
((mode & S_IROTH) ? /* +search if readable by others */ S_IXOTH : 0); ((mode & S_IROTH) ? /* +search if readable by others */ S_IXOTH : 0);
@ -120,15 +114,11 @@ __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname,
size_t base_len = pathname_len; size_t base_len = pathname_len;
static const size_t dxb_name_len = ARRAY_LENGTH(dxb_name) - 1; static const size_t dxb_name_len = ARRAY_LENGTH(dxb_name) - 1;
if (env->flags & MDBX_NOSUBDIR) { if (env->flags & MDBX_NOSUBDIR) {
if (base_len > dxb_name_len && if (base_len > dxb_name_len && osal_pathequal(pathname + base_len - dxb_name_len, dxb_name, dxb_name_len)) {
osal_pathequal(pathname + base_len - dxb_name_len, dxb_name,
dxb_name_len)) {
env->flags -= MDBX_NOSUBDIR; env->flags -= MDBX_NOSUBDIR;
base_len -= dxb_name_len; base_len -= dxb_name_len;
} else if (base_len == dxb_name_len - 1 && osal_isdirsep(dxb_name[0]) && } else if (base_len == dxb_name_len - 1 && osal_isdirsep(dxb_name[0]) && osal_isdirsep(lck_name[0]) &&
osal_isdirsep(lck_name[0]) && osal_pathequal(pathname + base_len - dxb_name_len + 1, dxb_name + 1, dxb_name_len - 1)) {
osal_pathequal(pathname + base_len - dxb_name_len + 1,
dxb_name + 1, dxb_name_len - 1)) {
env->flags -= MDBX_NOSUBDIR; env->flags -= MDBX_NOSUBDIR;
base_len -= dxb_name_len - 1; base_len -= dxb_name_len - 1;
} }
@ -136,11 +126,9 @@ __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname,
const size_t suflen_with_NOSUBDIR = sizeof(lock_suffix) + sizeof(pathchar_t); const size_t suflen_with_NOSUBDIR = sizeof(lock_suffix) + sizeof(pathchar_t);
const size_t suflen_without_NOSUBDIR = sizeof(lck_name) + sizeof(dxb_name); const size_t suflen_without_NOSUBDIR = sizeof(lck_name) + sizeof(dxb_name);
const size_t enough4any = (suflen_with_NOSUBDIR > suflen_without_NOSUBDIR) const size_t enough4any =
? suflen_with_NOSUBDIR (suflen_with_NOSUBDIR > suflen_without_NOSUBDIR) ? suflen_with_NOSUBDIR : suflen_without_NOSUBDIR;
: suflen_without_NOSUBDIR; const size_t bytes_needed = sizeof(pathchar_t) * (base_len * 2 + pathname_len + 1) + enough4any;
const size_t bytes_needed =
sizeof(pathchar_t) * (base_len * 2 + pathname_len + 1) + enough4any;
env->pathname.buffer = osal_malloc(bytes_needed); env->pathname.buffer = osal_malloc(bytes_needed);
if (!env->pathname.buffer) if (!env->pathname.buffer)
return MDBX_ENOMEM; return MDBX_ENOMEM;
@ -153,8 +141,7 @@ __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname,
if (base_len) { if (base_len) {
memcpy(buf, pathname, sizeof(pathchar_t) * pathname_len); memcpy(buf, pathname, sizeof(pathchar_t) * pathname_len);
if (env->flags & MDBX_NOSUBDIR) { if (env->flags & MDBX_NOSUBDIR) {
const pathchar_t *const lck_ext = const pathchar_t *const lck_ext = osal_fileext(lck_name, ARRAY_LENGTH(lck_name));
osal_fileext(lck_name, ARRAY_LENGTH(lck_name));
if (lck_ext) { if (lck_ext) {
pathchar_t *pathname_ext = osal_fileext(buf, pathname_len); pathchar_t *pathname_ext = osal_fileext(buf, pathname_len);
memcpy(pathname_ext ? pathname_ext : buf + pathname_len, lck_ext, memcpy(pathname_ext ? pathname_ext : buf + pathname_len, lck_ext,
@ -181,14 +168,11 @@ __cold static int env_handle_pathname(MDBX_env *env, const pathchar_t *pathname,
memcpy(buf + dxb_name_len - 1, lock_suffix, sizeof(lock_suffix)); memcpy(buf + dxb_name_len - 1, lock_suffix, sizeof(lock_suffix));
rc = check_alternative_lck_absent(buf); rc = check_alternative_lck_absent(buf);
memcpy(env->pathname.dxb, dxb_name + 1, memcpy(env->pathname.dxb, dxb_name + 1, sizeof(dxb_name) - sizeof(pathchar_t));
sizeof(dxb_name) - sizeof(pathchar_t)); memcpy(env->pathname.lck, lck_name + 1, sizeof(lck_name) - sizeof(pathchar_t));
memcpy(env->pathname.lck, lck_name + 1,
sizeof(lck_name) - sizeof(pathchar_t));
} }
memcpy(env->pathname.specified, pathname, memcpy(env->pathname.specified, pathname, sizeof(pathchar_t) * (pathname_len + 1));
sizeof(pathchar_t) * (pathname_len + 1));
return rc; return rc;
} }
@ -212,8 +196,7 @@ __cold int mdbx_env_create(MDBX_env **penv) {
#endif /* MDBX_64BIT_ATOMIC */ #endif /* MDBX_64BIT_ATOMIC */
#endif /* MDBX_HAVE_C11ATOMICS */ #endif /* MDBX_HAVE_C11ATOMICS */
if (unlikely(!is_powerof2(globals.sys_pagesize) || if (unlikely(!is_powerof2(globals.sys_pagesize) || globals.sys_pagesize < MDBX_MIN_PAGESIZE)) {
globals.sys_pagesize < MDBX_MIN_PAGESIZE)) {
ERROR("unsuitable system pagesize %u", globals.sys_pagesize); ERROR("unsuitable system pagesize %u", globals.sys_pagesize);
return LOG_IFERR(MDBX_INCOMPATIBLE); return LOG_IFERR(MDBX_INCOMPATIBLE);
} }
@ -222,10 +205,8 @@ __cold int mdbx_env_create(MDBX_env **penv) {
if (unlikely(globals.linux_kernel_version < 0x04000000)) { if (unlikely(globals.linux_kernel_version < 0x04000000)) {
/* 2022-09-01: Прошло уже более двух лет после окончания какой-либо /* 2022-09-01: Прошло уже более двух лет после окончания какой-либо
* поддержки самого "долгоиграющего" ядра 3.16.85 ветки 3.x */ * поддержки самого "долгоиграющего" ядра 3.16.85 ветки 3.x */
ERROR("too old linux kernel %u.%u.%u.%u, the >= 4.0.0 is required", ERROR("too old linux kernel %u.%u.%u.%u, the >= 4.0.0 is required", globals.linux_kernel_version >> 24,
globals.linux_kernel_version >> 24, (globals.linux_kernel_version >> 16) & 255, (globals.linux_kernel_version >> 8) & 255,
(globals.linux_kernel_version >> 16) & 255,
(globals.linux_kernel_version >> 8) & 255,
globals.linux_kernel_version & 255); globals.linux_kernel_version & 255);
return LOG_IFERR(MDBX_INCOMPATIBLE); return LOG_IFERR(MDBX_INCOMPATIBLE);
} }
@ -237,14 +218,11 @@ __cold int mdbx_env_create(MDBX_env **penv) {
env->max_readers = DEFAULT_READERS; env->max_readers = DEFAULT_READERS;
env->max_dbi = env->n_dbi = CORE_DBS; env->max_dbi = env->n_dbi = CORE_DBS;
env->lazy_fd = env->dsync_fd = env->fd4meta = env->lck_mmap.fd = env->lazy_fd = env->dsync_fd = env->fd4meta = env->lck_mmap.fd = INVALID_HANDLE_VALUE;
INVALID_HANDLE_VALUE;
env->stuck_meta = -1; env->stuck_meta = -1;
env_options_init(env); env_options_init(env);
env_setup_pagesize(env, (globals.sys_pagesize < MDBX_MAX_PAGESIZE) env_setup_pagesize(env, (globals.sys_pagesize < MDBX_MAX_PAGESIZE) ? globals.sys_pagesize : MDBX_MAX_PAGESIZE);
? globals.sys_pagesize
: MDBX_MAX_PAGESIZE);
int rc = osal_fastmutex_init(&env->dbi_lock); int rc = osal_fastmutex_init(&env->dbi_lock);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -318,8 +296,7 @@ __cold int mdbx_env_turn_for_recovery(MDBX_env *env, unsigned target) {
return LOG_IFERR(meta_override(env, target, new_txnid, target_meta)); return LOG_IFERR(meta_override(env, target, new_txnid, target_meta));
} }
__cold int mdbx_env_open_for_recovery(MDBX_env *env, const char *pathname, __cold int mdbx_env_open_for_recovery(MDBX_env *env, const char *pathname, unsigned target_meta, bool writeable) {
unsigned target_meta, bool writeable) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
wchar_t *pathnameW = nullptr; wchar_t *pathnameW = nullptr;
int rc = osal_mb2w(pathname, &pathnameW); int rc = osal_mb2w(pathname, &pathnameW);
@ -330,8 +307,7 @@ __cold int mdbx_env_open_for_recovery(MDBX_env *env, const char *pathname,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_env_open_for_recoveryW(MDBX_env *env, const wchar_t *pathname, __cold int mdbx_env_open_for_recoveryW(MDBX_env *env, const wchar_t *pathname, unsigned target_meta, bool writeable) {
unsigned target_meta, bool writeable) {
#endif /* Windows */ #endif /* Windows */
if (unlikely(target_meta >= NUM_METAS)) if (unlikely(target_meta >= NUM_METAS))
@ -349,8 +325,7 @@ __cold int mdbx_env_open_for_recoveryW(MDBX_env *env, const wchar_t *pathname,
#else #else
mdbx_env_open mdbx_env_open
#endif /* Windows */ #endif /* Windows */
(env, pathname, writeable ? MDBX_EXCLUSIVE : MDBX_EXCLUSIVE | MDBX_RDONLY, (env, pathname, writeable ? MDBX_EXCLUSIVE : MDBX_EXCLUSIVE | MDBX_RDONLY, 0);
0);
} }
__cold int mdbx_env_delete(const char *pathname, MDBX_env_delete_mode_t mode) { __cold int mdbx_env_delete(const char *pathname, MDBX_env_delete_mode_t mode) {
@ -364,8 +339,7 @@ __cold int mdbx_env_delete(const char *pathname, MDBX_env_delete_mode_t mode) {
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_env_deleteW(const wchar_t *pathname, __cold int mdbx_env_deleteW(const wchar_t *pathname, MDBX_env_delete_mode_t mode) {
MDBX_env_delete_mode_t mode) {
#endif /* Windows */ #endif /* Windows */
switch (mode) { switch (mode) {
@ -383,22 +357,18 @@ __cold int mdbx_env_deleteW(const wchar_t *pathname,
MDBX_env dummy_env_silo, *const dummy_env = &dummy_env_silo; MDBX_env dummy_env_silo, *const dummy_env = &dummy_env_silo;
#endif #endif
memset(dummy_env, 0, sizeof(*dummy_env)); memset(dummy_env, 0, sizeof(*dummy_env));
dummy_env->flags = dummy_env->flags = (mode == MDBX_ENV_ENSURE_UNUSED) ? MDBX_EXCLUSIVE : MDBX_ENV_DEFAULTS;
(mode == MDBX_ENV_ENSURE_UNUSED) ? MDBX_EXCLUSIVE : MDBX_ENV_DEFAULTS;
dummy_env->ps = (unsigned)mdbx_default_pagesize(); dummy_env->ps = (unsigned)mdbx_default_pagesize();
STATIC_ASSERT(sizeof(dummy_env->flags) == sizeof(MDBX_env_flags_t)); STATIC_ASSERT(sizeof(dummy_env->flags) == sizeof(MDBX_env_flags_t));
int rc = MDBX_RESULT_TRUE, err = env_handle_pathname(dummy_env, pathname, 0); int rc = MDBX_RESULT_TRUE, err = env_handle_pathname(dummy_env, pathname, 0);
if (likely(err == MDBX_SUCCESS)) { if (likely(err == MDBX_SUCCESS)) {
mdbx_filehandle_t clk_handle = INVALID_HANDLE_VALUE, mdbx_filehandle_t clk_handle = INVALID_HANDLE_VALUE, dxb_handle = INVALID_HANDLE_VALUE;
dxb_handle = INVALID_HANDLE_VALUE;
if (mode > MDBX_ENV_JUST_DELETE) { if (mode > MDBX_ENV_JUST_DELETE) {
err = osal_openfile(MDBX_OPEN_DELETE, dummy_env, dummy_env->pathname.dxb, err = osal_openfile(MDBX_OPEN_DELETE, dummy_env, dummy_env->pathname.dxb, &dxb_handle, 0);
&dxb_handle, 0);
err = (err == MDBX_ENOFILE) ? MDBX_SUCCESS : err; err = (err == MDBX_ENOFILE) ? MDBX_SUCCESS : err;
if (err == MDBX_SUCCESS) { if (err == MDBX_SUCCESS) {
err = osal_openfile(MDBX_OPEN_DELETE, dummy_env, err = osal_openfile(MDBX_OPEN_DELETE, dummy_env, dummy_env->pathname.lck, &clk_handle, 0);
dummy_env->pathname.lck, &clk_handle, 0);
err = (err == MDBX_ENOFILE) ? MDBX_SUCCESS : err; err = (err == MDBX_ENOFILE) ? MDBX_SUCCESS : err;
} }
if (err == MDBX_SUCCESS && clk_handle != INVALID_HANDLE_VALUE) if (err == MDBX_SUCCESS && clk_handle != INVALID_HANDLE_VALUE)
@ -425,8 +395,7 @@ __cold int mdbx_env_deleteW(const wchar_t *pathname,
if (err == MDBX_SUCCESS && !(dummy_env->flags & MDBX_NOSUBDIR) && if (err == MDBX_SUCCESS && !(dummy_env->flags & MDBX_NOSUBDIR) &&
(/* pathname != "." */ pathname[0] != '.' || pathname[1] != 0) && (/* pathname != "." */ pathname[0] != '.' || pathname[1] != 0) &&
(/* pathname != ".." */ pathname[0] != '.' || pathname[1] != '.' || (/* pathname != ".." */ pathname[0] != '.' || pathname[1] != '.' || pathname[2] != 0)) {
pathname[2] != 0)) {
err = osal_removedirectory(pathname); err = osal_removedirectory(pathname);
if (err == MDBX_SUCCESS) if (err == MDBX_SUCCESS)
rc = MDBX_SUCCESS; rc = MDBX_SUCCESS;
@ -445,8 +414,7 @@ __cold int mdbx_env_deleteW(const wchar_t *pathname,
return LOG_IFERR((err == MDBX_SUCCESS) ? rc : err); return LOG_IFERR((err == MDBX_SUCCESS) ? rc : err);
} }
__cold int mdbx_env_open(MDBX_env *env, const char *pathname, __cold int mdbx_env_open(MDBX_env *env, const char *pathname, MDBX_env_flags_t flags, mdbx_mode_t mode) {
MDBX_env_flags_t flags, mdbx_mode_t mode) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
wchar_t *pathnameW = nullptr; wchar_t *pathnameW = nullptr;
int rc = osal_mb2w(pathname, &pathnameW); int rc = osal_mb2w(pathname, &pathnameW);
@ -460,8 +428,7 @@ __cold int mdbx_env_open(MDBX_env *env, const char *pathname,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname, __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname, MDBX_env_flags_t flags, mdbx_mode_t mode) {
MDBX_env_flags_t flags, mdbx_mode_t mode) {
#endif /* Windows */ #endif /* Windows */
int rc = check_env(env, false); int rc = check_env(env, false);
@ -471,8 +438,7 @@ __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname,
if (unlikely(flags & ~ENV_USABLE_FLAGS)) if (unlikely(flags & ~ENV_USABLE_FLAGS))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(env->lazy_fd != INVALID_HANDLE_VALUE || if (unlikely(env->lazy_fd != INVALID_HANDLE_VALUE || (env->flags & ENV_ACTIVE) != 0 || env->dxb_mmap.base))
(env->flags & ENV_ACTIVE) != 0 || env->dxb_mmap.base))
return LOG_IFERR(MDBX_EPERM); return LOG_IFERR(MDBX_EPERM);
/* Pickup previously mdbx_env_set_flags(), /* Pickup previously mdbx_env_set_flags(),
@ -482,9 +448,8 @@ __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname,
if (flags & MDBX_RDONLY) { if (flags & MDBX_RDONLY) {
/* Silently ignore irrelevant flags when we're only getting read access */ /* Silently ignore irrelevant flags when we're only getting read access */
flags &= ~(MDBX_WRITEMAP | DEPRECATED_MAPASYNC | MDBX_SAFE_NOSYNC | flags &= ~(MDBX_WRITEMAP | DEPRECATED_MAPASYNC | MDBX_SAFE_NOSYNC | MDBX_NOMETASYNC | DEPRECATED_COALESCE |
MDBX_NOMETASYNC | DEPRECATED_COALESCE | MDBX_LIFORECLAIM | MDBX_LIFORECLAIM | MDBX_NOMEMINIT | MDBX_ACCEDE);
MDBX_NOMEMINIT | MDBX_ACCEDE);
mode = 0; mode = 0;
} else { } else {
#if MDBX_MMAP_INCOHERENT_FILE_WRITE #if MDBX_MMAP_INCOHERENT_FILE_WRITE
@ -520,16 +485,14 @@ __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname,
MDBX_txn *txn = nullptr; MDBX_txn *txn = nullptr;
const intptr_t bitmap_bytes = const intptr_t bitmap_bytes =
#if MDBX_ENABLE_DBI_SPARSE #if MDBX_ENABLE_DBI_SPARSE
ceil_powerof2(env->max_dbi, CHAR_BIT * sizeof(txn->dbi_sparse[0])) / ceil_powerof2(env->max_dbi, CHAR_BIT * sizeof(txn->dbi_sparse[0])) / CHAR_BIT;
CHAR_BIT;
#else #else
0; 0;
#endif /* MDBX_ENABLE_DBI_SPARSE */ #endif /* MDBX_ENABLE_DBI_SPARSE */
const size_t base = sizeof(MDBX_txn) + sizeof(cursor_couple_t); const size_t base = sizeof(MDBX_txn) + sizeof(cursor_couple_t);
const size_t size = const size_t size = base + bitmap_bytes +
base + bitmap_bytes + env->max_dbi * (sizeof(txn->dbs[0]) + sizeof(txn->cursors[0]) + sizeof(txn->dbi_seqs[0]) +
env->max_dbi * (sizeof(txn->dbs[0]) + sizeof(txn->cursors[0]) + sizeof(txn->dbi_state[0]));
sizeof(txn->dbi_seqs[0]) + sizeof(txn->dbi_state[0]));
txn = osal_calloc(1, size); txn = osal_calloc(1, size);
if (unlikely(!txn)) { if (unlikely(!txn)) {
@ -538,10 +501,8 @@ __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname,
} }
txn->dbs = ptr_disp(txn, base); txn->dbs = ptr_disp(txn, base);
txn->cursors = ptr_disp(txn->dbs, env->max_dbi * sizeof(txn->dbs[0])); txn->cursors = ptr_disp(txn->dbs, env->max_dbi * sizeof(txn->dbs[0]));
txn->dbi_seqs = txn->dbi_seqs = ptr_disp(txn->cursors, env->max_dbi * sizeof(txn->cursors[0]));
ptr_disp(txn->cursors, env->max_dbi * sizeof(txn->cursors[0])); txn->dbi_state = ptr_disp(txn, size - env->max_dbi * sizeof(txn->dbi_state[0]));
txn->dbi_state =
ptr_disp(txn, size - env->max_dbi * sizeof(txn->dbi_state[0]));
#if MDBX_ENABLE_DBI_SPARSE #if MDBX_ENABLE_DBI_SPARSE
txn->dbi_sparse = ptr_disp(txn->dbi_state, -bitmap_bytes); txn->dbi_sparse = ptr_disp(txn->dbi_state, -bitmap_bytes);
#endif /* MDBX_ENABLE_DBI_SPARSE */ #endif /* MDBX_ENABLE_DBI_SPARSE */
@ -566,10 +527,9 @@ __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname,
const meta_ptr_t head = meta_recent(env, &troika); const meta_ptr_t head = meta_recent(env, &troika);
const tree_t *db = &head.ptr_c->trees.main; const tree_t *db = &head.ptr_c->trees.main;
DEBUG("opened database version %u, pagesize %u", DEBUG("opened database version %u, pagesize %u", (uint8_t)unaligned_peek_u64(4, head.ptr_c->magic_and_version),
(uint8_t)unaligned_peek_u64(4, head.ptr_c->magic_and_version), env->ps); env->ps);
DEBUG("using meta page %" PRIaPGNO ", txn %" PRIaTXN, DEBUG("using meta page %" PRIaPGNO ", txn %" PRIaTXN, data_page(head.ptr_c)->pgno, head.txnid);
data_page(head.ptr_c)->pgno, head.txnid);
DEBUG("depth: %u", db->height); DEBUG("depth: %u", db->height);
DEBUG("entries: %" PRIu64, db->items); DEBUG("entries: %" PRIu64, db->items);
DEBUG("branch pages: %" PRIaPGNO, db->branch_pages); DEBUG("branch pages: %" PRIaPGNO, db->branch_pages);
@ -651,8 +611,7 @@ __cold int mdbx_env_close_ex(MDBX_env *env, bool dont_sync) {
env->flags |= ENV_FATAL_ERROR; env->flags |= ENV_FATAL_ERROR;
#endif /* MDBX_ENV_CHECKPID */ #endif /* MDBX_ENV_CHECKPID */
if (env->dxb_mmap.base && if (env->dxb_mmap.base && (env->flags & (MDBX_RDONLY | ENV_FATAL_ERROR)) == 0 && env->basal_txn) {
(env->flags & (MDBX_RDONLY | ENV_FATAL_ERROR)) == 0 && env->basal_txn) {
if (env->basal_txn->owner && env->basal_txn->owner != osal_thread_self()) if (env->basal_txn->owner && env->basal_txn->owner != osal_thread_self())
return LOG_IFERR(MDBX_BUSY); return LOG_IFERR(MDBX_BUSY);
} else } else
@ -675,8 +634,8 @@ __cold int mdbx_env_close_ex(MDBX_env *env, bool dont_sync) {
rc = errno; rc = errno;
else if (st.st_nlink > 0 /* don't sync deleted files */) { else if (st.st_nlink > 0 /* don't sync deleted files */) {
rc = env_sync(env, true, true); rc = env_sync(env, true, true);
rc = (rc == MDBX_BUSY || rc == EAGAIN || rc == EACCES || rc == EBUSY || rc = (rc == MDBX_BUSY || rc == EAGAIN || rc == EACCES || rc == EBUSY || rc == EWOULDBLOCK ||
rc == EWOULDBLOCK || rc == MDBX_RESULT_TRUE) rc == MDBX_RESULT_TRUE)
? MDBX_SUCCESS ? MDBX_SUCCESS
: rc; : rc;
} }
@ -717,8 +676,7 @@ __cold int mdbx_env_close_ex(MDBX_env *env, bool dont_sync) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn, static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn, MDBX_envinfo *out, const size_t bytes,
MDBX_envinfo *out, const size_t bytes,
troika_t *const troika) { troika_t *const troika) {
const size_t size_before_bootid = offsetof(MDBX_envinfo, mi_bootid); const size_t size_before_bootid = offsetof(MDBX_envinfo, mi_bootid);
const size_t size_before_pgop_stat = offsetof(MDBX_envinfo, mi_pgop_stat); const size_t size_before_pgop_stat = offsetof(MDBX_envinfo, mi_pgop_stat);
@ -752,8 +710,7 @@ static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn,
#endif #endif
} }
*troika = *troika = (txn && !(txn->flags & MDBX_TXN_RDONLY)) ? txn->tw.troika : meta_tap(env);
(txn && !(txn->flags & MDBX_TXN_RDONLY)) ? txn->tw.troika : meta_tap(env);
const meta_ptr_t head = meta_recent(env, troika); const meta_ptr_t head = meta_recent(env, troika);
const meta_t *const meta0 = METAPAGE(env, 0); const meta_t *const meta0 = METAPAGE(env, 0);
const meta_t *const meta1 = METAPAGE(env, 1); const meta_t *const meta1 = METAPAGE(env, 1);
@ -780,9 +737,7 @@ static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn,
out->mi_last_pgno = txn->geo.first_unallocated - 1; out->mi_last_pgno = txn->geo.first_unallocated - 1;
out->mi_geo.current = pgno2bytes(env, txn->geo.end_pgno); out->mi_geo.current = pgno2bytes(env, txn->geo.end_pgno);
const txnid_t wanna_meta_txnid = (txn->flags & MDBX_TXN_RDONLY) const txnid_t wanna_meta_txnid = (txn->flags & MDBX_TXN_RDONLY) ? txn->txnid : txn->txnid - xMDBX_TXNID_STEP;
? txn->txnid
: txn->txnid - xMDBX_TXNID_STEP;
txn_meta = (out->mi_meta_txnid[0] == wanna_meta_txnid) ? meta0 : txn_meta; txn_meta = (out->mi_meta_txnid[0] == wanna_meta_txnid) ? meta0 : txn_meta;
txn_meta = (out->mi_meta_txnid[1] == wanna_meta_txnid) ? meta1 : txn_meta; txn_meta = (out->mi_meta_txnid[1] == wanna_meta_txnid) ? meta1 : txn_meta;
txn_meta = (out->mi_meta_txnid[2] == wanna_meta_txnid) ? meta2 : txn_meta; txn_meta = (out->mi_meta_txnid[2] == wanna_meta_txnid) ? meta2 : txn_meta;
@ -795,30 +750,23 @@ static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn,
const lck_t *const lck = env->lck; const lck_t *const lck = env->lck;
out->mi_maxreaders = env->max_readers; out->mi_maxreaders = env->max_readers;
out->mi_numreaders = env->lck_mmap.lck out->mi_numreaders = env->lck_mmap.lck ? atomic_load32(&lck->rdt_length, mo_Relaxed) : INT32_MAX;
? atomic_load32(&lck->rdt_length, mo_Relaxed)
: INT32_MAX;
out->mi_dxb_pagesize = env->ps; out->mi_dxb_pagesize = env->ps;
out->mi_sys_pagesize = globals.sys_pagesize; out->mi_sys_pagesize = globals.sys_pagesize;
if (likely(bytes > size_before_bootid)) { if (likely(bytes > size_before_bootid)) {
const uint64_t unsynced_pages = const uint64_t unsynced_pages =
atomic_load64(&lck->unsynced_pages, mo_Relaxed) + atomic_load64(&lck->unsynced_pages, mo_Relaxed) +
((uint32_t)out->mi_recent_txnid != ((uint32_t)out->mi_recent_txnid != atomic_load32(&lck->meta_sync_txnid, mo_Relaxed));
atomic_load32(&lck->meta_sync_txnid, mo_Relaxed));
out->mi_unsync_volume = pgno2bytes(env, (size_t)unsynced_pages); out->mi_unsync_volume = pgno2bytes(env, (size_t)unsynced_pages);
const uint64_t monotime_now = osal_monotime(); const uint64_t monotime_now = osal_monotime();
uint64_t ts = atomic_load64(&lck->eoos_timestamp, mo_Relaxed); uint64_t ts = atomic_load64(&lck->eoos_timestamp, mo_Relaxed);
out->mi_since_sync_seconds16dot16 = out->mi_since_sync_seconds16dot16 = ts ? osal_monotime_to_16dot16_noUnderflow(monotime_now - ts) : 0;
ts ? osal_monotime_to_16dot16_noUnderflow(monotime_now - ts) : 0;
ts = atomic_load64(&lck->readers_check_timestamp, mo_Relaxed); ts = atomic_load64(&lck->readers_check_timestamp, mo_Relaxed);
out->mi_since_reader_check_seconds16dot16 = out->mi_since_reader_check_seconds16dot16 = ts ? osal_monotime_to_16dot16_noUnderflow(monotime_now - ts) : 0;
ts ? osal_monotime_to_16dot16_noUnderflow(monotime_now - ts) : 0; out->mi_autosync_threshold = pgno2bytes(env, atomic_load32(&lck->autosync_threshold, mo_Relaxed));
out->mi_autosync_threshold =
pgno2bytes(env, atomic_load32(&lck->autosync_threshold, mo_Relaxed));
out->mi_autosync_period_seconds16dot16 = out->mi_autosync_period_seconds16dot16 =
osal_monotime_to_16dot16_noUnderflow( osal_monotime_to_16dot16_noUnderflow(atomic_load64(&lck->autosync_period, mo_Relaxed));
atomic_load64(&lck->autosync_period, mo_Relaxed));
out->mi_bootid.current.x = globals.bootid.x; out->mi_bootid.current.x = globals.bootid.x;
out->mi_bootid.current.y = globals.bootid.y; out->mi_bootid.current.y = globals.bootid.y;
out->mi_mode = env->lck_mmap.lck ? lck->envmode.weak : env->flags; out->mi_mode = env->lck_mmap.lck ? lck->envmode.weak : env->flags;
@ -834,8 +782,7 @@ static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn,
out->mi_pgop_stat.spill = atomic_load64(&lck->pgops.spill, mo_Relaxed); out->mi_pgop_stat.spill = atomic_load64(&lck->pgops.spill, mo_Relaxed);
out->mi_pgop_stat.unspill = atomic_load64(&lck->pgops.unspill, mo_Relaxed); out->mi_pgop_stat.unspill = atomic_load64(&lck->pgops.unspill, mo_Relaxed);
out->mi_pgop_stat.wops = atomic_load64(&lck->pgops.wops, mo_Relaxed); out->mi_pgop_stat.wops = atomic_load64(&lck->pgops.wops, mo_Relaxed);
out->mi_pgop_stat.prefault = out->mi_pgop_stat.prefault = atomic_load64(&lck->pgops.prefault, mo_Relaxed);
atomic_load64(&lck->pgops.prefault, mo_Relaxed);
out->mi_pgop_stat.mincore = atomic_load64(&lck->pgops.mincore, mo_Relaxed); out->mi_pgop_stat.mincore = atomic_load64(&lck->pgops.mincore, mo_Relaxed);
out->mi_pgop_stat.msync = atomic_load64(&lck->pgops.msync, mo_Relaxed); out->mi_pgop_stat.msync = atomic_load64(&lck->pgops.msync, mo_Relaxed);
out->mi_pgop_stat.fsync = atomic_load64(&lck->pgops.fsync, mo_Relaxed); out->mi_pgop_stat.fsync = atomic_load64(&lck->pgops.fsync, mo_Relaxed);
@ -865,8 +812,7 @@ static int env_info_snap(const MDBX_env *env, const MDBX_txn *txn,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold int env_info(const MDBX_env *env, const MDBX_txn *txn, MDBX_envinfo *out, __cold int env_info(const MDBX_env *env, const MDBX_txn *txn, MDBX_envinfo *out, size_t bytes, troika_t *troika) {
size_t bytes, troika_t *troika) {
MDBX_envinfo snap; MDBX_envinfo snap;
int rc = env_info_snap(env, txn, &snap, sizeof(snap), troika); int rc = env_info_snap(env, txn, &snap, sizeof(snap), troika);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -878,24 +824,22 @@ __cold int env_info(const MDBX_env *env, const MDBX_txn *txn, MDBX_envinfo *out,
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
snap.mi_since_sync_seconds16dot16 = out->mi_since_sync_seconds16dot16; snap.mi_since_sync_seconds16dot16 = out->mi_since_sync_seconds16dot16;
snap.mi_since_reader_check_seconds16dot16 = snap.mi_since_reader_check_seconds16dot16 = out->mi_since_reader_check_seconds16dot16;
out->mi_since_reader_check_seconds16dot16;
if (likely(memcmp(&snap, out, bytes) == 0)) if (likely(memcmp(&snap, out, bytes) == 0))
return MDBX_SUCCESS; return MDBX_SUCCESS;
memcpy(&snap, out, bytes); memcpy(&snap, out, bytes);
} }
} }
__cold int mdbx_env_info_ex(const MDBX_env *env, const MDBX_txn *txn, __cold int mdbx_env_info_ex(const MDBX_env *env, const MDBX_txn *txn, MDBX_envinfo *arg, size_t bytes) {
MDBX_envinfo *arg, size_t bytes) {
if (unlikely((env == nullptr && txn == nullptr) || arg == nullptr)) if (unlikely((env == nullptr && txn == nullptr) || arg == nullptr))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
const size_t size_before_bootid = offsetof(MDBX_envinfo, mi_bootid); const size_t size_before_bootid = offsetof(MDBX_envinfo, mi_bootid);
const size_t size_before_pgop_stat = offsetof(MDBX_envinfo, mi_pgop_stat); const size_t size_before_pgop_stat = offsetof(MDBX_envinfo, mi_pgop_stat);
const size_t size_before_dxbid = offsetof(MDBX_envinfo, mi_dxbid); const size_t size_before_dxbid = offsetof(MDBX_envinfo, mi_dxbid);
if (unlikely(bytes != sizeof(MDBX_envinfo)) && bytes != size_before_bootid && if (unlikely(bytes != sizeof(MDBX_envinfo)) && bytes != size_before_bootid && bytes != size_before_pgop_stat &&
bytes != size_before_pgop_stat && bytes != size_before_dxbid) bytes != size_before_dxbid)
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (txn) { if (txn) {
@ -917,8 +861,7 @@ __cold int mdbx_env_info_ex(const MDBX_env *env, const MDBX_txn *txn,
return LOG_IFERR(env_info(env, txn, arg, bytes, &troika)); return LOG_IFERR(env_info(env, txn, arg, bytes, &troika));
} }
__cold int mdbx_preopen_snapinfo(const char *pathname, MDBX_envinfo *out, __cold int mdbx_preopen_snapinfo(const char *pathname, MDBX_envinfo *out, size_t bytes) {
size_t bytes) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
wchar_t *pathnameW = nullptr; wchar_t *pathnameW = nullptr;
int rc = osal_mb2w(pathname, &pathnameW); int rc = osal_mb2w(pathname, &pathnameW);
@ -929,8 +872,7 @@ __cold int mdbx_preopen_snapinfo(const char *pathname, MDBX_envinfo *out,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_preopen_snapinfoW(const wchar_t *pathname, MDBX_envinfo *out, __cold int mdbx_preopen_snapinfoW(const wchar_t *pathname, MDBX_envinfo *out, size_t bytes) {
size_t bytes) {
#endif /* Windows */ #endif /* Windows */
if (unlikely(!out)) if (unlikely(!out))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -938,8 +880,8 @@ __cold int mdbx_preopen_snapinfoW(const wchar_t *pathname, MDBX_envinfo *out,
const size_t size_before_bootid = offsetof(MDBX_envinfo, mi_bootid); const size_t size_before_bootid = offsetof(MDBX_envinfo, mi_bootid);
const size_t size_before_pgop_stat = offsetof(MDBX_envinfo, mi_pgop_stat); const size_t size_before_pgop_stat = offsetof(MDBX_envinfo, mi_pgop_stat);
const size_t size_before_dxbid = offsetof(MDBX_envinfo, mi_dxbid); const size_t size_before_dxbid = offsetof(MDBX_envinfo, mi_dxbid);
if (unlikely(bytes != sizeof(MDBX_envinfo)) && bytes != size_before_bootid && if (unlikely(bytes != sizeof(MDBX_envinfo)) && bytes != size_before_bootid && bytes != size_before_pgop_stat &&
bytes != size_before_pgop_stat && bytes != size_before_dxbid) bytes != size_before_dxbid)
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
memset(out, 0, bytes); memset(out, 0, bytes);
@ -951,8 +893,7 @@ __cold int mdbx_preopen_snapinfoW(const wchar_t *pathname, MDBX_envinfo *out,
MDBX_env env; MDBX_env env;
memset(&env, 0, sizeof(env)); memset(&env, 0, sizeof(env));
env.pid = osal_getpid(); env.pid = osal_getpid();
if (unlikely(!is_powerof2(globals.sys_pagesize) || if (unlikely(!is_powerof2(globals.sys_pagesize) || globals.sys_pagesize < MDBX_MIN_PAGESIZE)) {
globals.sys_pagesize < MDBX_MIN_PAGESIZE)) {
ERROR("unsuitable system pagesize %u", globals.sys_pagesize); ERROR("unsuitable system pagesize %u", globals.sys_pagesize);
return LOG_IFERR(MDBX_INCOMPATIBLE); return LOG_IFERR(MDBX_INCOMPATIBLE);
} }
@ -972,8 +913,7 @@ __cold int mdbx_preopen_snapinfoW(const wchar_t *pathname, MDBX_envinfo *out,
int rc = env_handle_pathname(&env, pathname, 0); int rc = env_handle_pathname(&env, pathname, 0);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
rc = osal_openfile(MDBX_OPEN_DXB_READ, &env, env.pathname.dxb, &env.lazy_fd, rc = osal_openfile(MDBX_OPEN_DXB_READ, &env, env.pathname.dxb, &env.lazy_fd, 0);
0);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -1006,10 +946,8 @@ bailout:
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
__cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, intptr_t size_now, intptr_t size_upper,
intptr_t size_now, intptr_t size_upper, intptr_t growth_step, intptr_t shrink_threshold, intptr_t pagesize) {
intptr_t growth_step,
intptr_t shrink_threshold, intptr_t pagesize) {
int rc = check_env(env, false); int rc = check_env(env, false);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -1038,8 +976,7 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
should_unlock = true; should_unlock = true;
env->basal_txn->tw.troika = meta_tap(env); env->basal_txn->tw.troika = meta_tap(env);
eASSERT(env, !env->txn && !env->basal_txn->nested); eASSERT(env, !env->txn && !env->basal_txn->nested);
env->basal_txn->txnid = env->basal_txn->txnid = env->basal_txn->tw.troika.txnid[env->basal_txn->tw.troika.recent];
env->basal_txn->tw.troika.txnid[env->basal_txn->tw.troika.recent];
txn_snapshot_oldest(env->basal_txn); txn_snapshot_oldest(env->basal_txn);
} }
@ -1047,9 +984,7 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
if (pagesize <= 0 || pagesize >= INT_MAX) if (pagesize <= 0 || pagesize >= INT_MAX)
pagesize = env->ps; pagesize = env->ps;
const geo_t *const geo = const geo_t *const geo =
inside_txn inside_txn ? &env->txn->geo : &meta_recent(env, &env->basal_txn->tw.troika).ptr_c->geometry;
? &env->txn->geo
: &meta_recent(env, &env->basal_txn->tw.troika).ptr_c->geometry;
if (size_lower < 0) if (size_lower < 0)
size_lower = pgno2bytes(env, geo->lower); size_lower = pgno2bytes(env, geo->lower);
if (size_now < 0) if (size_now < 0)
@ -1065,8 +1000,7 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
rc = MDBX_EINVAL; rc = MDBX_EINVAL;
goto bailout; goto bailout;
} }
const size_t usedbytes = const size_t usedbytes = pgno2bytes(env, mvcc_snapshot_largest(env, geo->first_unallocated));
pgno2bytes(env, mvcc_snapshot_largest(env, geo->first_unallocated));
if ((size_t)size_upper < usedbytes) { if ((size_t)size_upper < usedbytes) {
rc = MDBX_MAP_FULL; rc = MDBX_MAP_FULL;
goto bailout; goto bailout;
@ -1101,14 +1035,12 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
else if (top >= (intptr_t)MAX_MAPSIZE /* maximal */) else if (top >= (intptr_t)MAX_MAPSIZE /* maximal */)
top = MAX_MAPSIZE; top = MAX_MAPSIZE;
while (top > pagesize * (int64_t)(MAX_PAGENO + 1) && while (top > pagesize * (int64_t)(MAX_PAGENO + 1) && pagesize < MDBX_MAX_PAGESIZE)
pagesize < MDBX_MAX_PAGESIZE)
pagesize <<= 1; pagesize <<= 1;
} }
} }
if (pagesize < (intptr_t)MDBX_MIN_PAGESIZE || if (pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE || !is_powerof2(pagesize)) {
pagesize > (intptr_t)MDBX_MAX_PAGESIZE || !is_powerof2(pagesize)) {
rc = MDBX_EINVAL; rc = MDBX_EINVAL;
goto bailout; goto bailout;
} }
@ -1140,13 +1072,10 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
size_upper = size_now; size_upper = size_now;
else if (size_now >= reasonable_db_maxsize() / 2) else if (size_now >= reasonable_db_maxsize() / 2)
size_upper = reasonable_db_maxsize(); size_upper = reasonable_db_maxsize();
else if ((size_t)size_now >= MAX_MAPSIZE32 / 2 && else if ((size_t)size_now >= MAX_MAPSIZE32 / 2 && (size_t)size_now <= MAX_MAPSIZE32 / 4 * 3)
(size_t)size_now <= MAX_MAPSIZE32 / 4 * 3)
size_upper = MAX_MAPSIZE32; size_upper = MAX_MAPSIZE32;
else { else {
size_upper = ceil_powerof2(((size_t)size_now < MAX_MAPSIZE / 4) size_upper = ceil_powerof2(((size_t)size_now < MAX_MAPSIZE / 4) ? size_now + size_now : size_now + size_now / 2,
? size_now + size_now
: size_now + size_now / 2,
MEGABYTE * MDBX_WORDBITS * MDBX_WORDBITS / 32); MEGABYTE * MDBX_WORDBITS * MDBX_WORDBITS / 32);
if ((size_t)size_upper > MAX_MAPSIZE) if ((size_t)size_upper > MAX_MAPSIZE)
size_upper = MAX_MAPSIZE; size_upper = MAX_MAPSIZE;
@ -1174,15 +1103,12 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
size_now = size_lower; size_now = size_lower;
} }
if (unlikely((size_t)size_upper > MAX_MAPSIZE || if (unlikely((size_t)size_upper > MAX_MAPSIZE || (uint64_t)size_upper / pagesize > MAX_PAGENO + 1)) {
(uint64_t)size_upper / pagesize > MAX_PAGENO + 1)) {
rc = MDBX_TOO_LARGE; rc = MDBX_TOO_LARGE;
goto bailout; goto bailout;
} }
const size_t unit = (globals.sys_pagesize > (size_t)pagesize) const size_t unit = (globals.sys_pagesize > (size_t)pagesize) ? globals.sys_pagesize : (size_t)pagesize;
? globals.sys_pagesize
: (size_t)pagesize;
size_lower = ceil_powerof2(size_lower, unit); size_lower = ceil_powerof2(size_lower, unit);
size_upper = ceil_powerof2(size_upper, unit); size_upper = ceil_powerof2(size_upper, unit);
size_now = ceil_powerof2(size_now, unit); size_now = ceil_powerof2(size_now, unit);
@ -1190,10 +1116,8 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
/* LY: подбираем значение size_upper: /* LY: подбираем значение size_upper:
* - кратное размеру страницы * - кратное размеру страницы
* - без нарушения MAX_MAPSIZE и MAX_PAGENO */ * - без нарушения MAX_MAPSIZE и MAX_PAGENO */
while (unlikely((size_t)size_upper > MAX_MAPSIZE || while (unlikely((size_t)size_upper > MAX_MAPSIZE || (uint64_t)size_upper / pagesize > MAX_PAGENO + 1)) {
(uint64_t)size_upper / pagesize > MAX_PAGENO + 1)) { if ((size_t)size_upper < unit + MIN_MAPSIZE || (size_t)size_upper < (size_t)pagesize * (MIN_PAGENO + 1)) {
if ((size_t)size_upper < unit + MIN_MAPSIZE ||
(size_t)size_upper < (size_t)pagesize * (MIN_PAGENO + 1)) {
/* паранойа на случай переполнения при невероятных значениях */ /* паранойа на случай переполнения при невероятных значениях */
rc = MDBX_EINVAL; rc = MDBX_EINVAL;
goto bailout; goto bailout;
@ -1235,10 +1159,8 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
env->geo_in_bytes.lower = size_lower; env->geo_in_bytes.lower = size_lower;
env->geo_in_bytes.now = size_now; env->geo_in_bytes.now = size_now;
env->geo_in_bytes.upper = size_upper; env->geo_in_bytes.upper = size_upper;
env->geo_in_bytes.grow = env->geo_in_bytes.grow = pgno2bytes(env, pv2pages(pages2pv(bytes2pgno(env, growth_step))));
pgno2bytes(env, pv2pages(pages2pv(bytes2pgno(env, growth_step)))); env->geo_in_bytes.shrink = pgno2bytes(env, pv2pages(pages2pv(bytes2pgno(env, shrink_threshold))));
env->geo_in_bytes.shrink =
pgno2bytes(env, pv2pages(pages2pv(bytes2pgno(env, shrink_threshold))));
env_options_adjust_defaults(env); env_options_adjust_defaults(env);
ENSURE(env, env->geo_in_bytes.lower >= MIN_MAPSIZE); ENSURE(env, env->geo_in_bytes.lower >= MIN_MAPSIZE);
@ -1290,15 +1212,13 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
meta_set_txnid(env, &meta, txnid); meta_set_txnid(env, &meta, txnid);
} }
const geo_t *const current_geo = const geo_t *const current_geo = &(env->txn ? env->txn : env->basal_txn)->geo;
&(env->txn ? env->txn : env->basal_txn)->geo;
/* update env-geo to avoid influences */ /* update env-geo to avoid influences */
env->geo_in_bytes.now = pgno2bytes(env, current_geo->now); env->geo_in_bytes.now = pgno2bytes(env, current_geo->now);
env->geo_in_bytes.lower = pgno2bytes(env, current_geo->lower); env->geo_in_bytes.lower = pgno2bytes(env, current_geo->lower);
env->geo_in_bytes.upper = pgno2bytes(env, current_geo->upper); env->geo_in_bytes.upper = pgno2bytes(env, current_geo->upper);
env->geo_in_bytes.grow = pgno2bytes(env, pv2pages(current_geo->grow_pv)); env->geo_in_bytes.grow = pgno2bytes(env, pv2pages(current_geo->grow_pv));
env->geo_in_bytes.shrink = env->geo_in_bytes.shrink = pgno2bytes(env, pv2pages(current_geo->shrink_pv));
pgno2bytes(env, pv2pages(current_geo->shrink_pv));
geo_t new_geo; geo_t new_geo;
new_geo.lower = bytes2pgno(env, size_lower); new_geo.lower = bytes2pgno(env, size_lower);
@ -1326,8 +1246,7 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
/* Was DB shrinking disabled before and now it will be enabled? */ /* Was DB shrinking disabled before and now it will be enabled? */
if (new_geo.lower < new_geo.upper && new_geo.shrink_pv && if (new_geo.lower < new_geo.upper && new_geo.shrink_pv &&
!(current_geo->lower < current_geo->upper && !(current_geo->lower < current_geo->upper && current_geo->shrink_pv)) {
current_geo->shrink_pv)) {
if (!env->lck_mmap.lck) { if (!env->lck_mmap.lck) {
rc = MDBX_EPERM; rc = MDBX_EPERM;
goto bailout; goto bailout;
@ -1341,9 +1260,7 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
/* Check if there are any reading threads that do not use the SRWL */ /* Check if there are any reading threads that do not use the SRWL */
const size_t CurrentTid = GetCurrentThreadId(); const size_t CurrentTid = GetCurrentThreadId();
const reader_slot_t *const begin = env->lck_mmap.lck->rdt; const reader_slot_t *const begin = env->lck_mmap.lck->rdt;
const reader_slot_t *const end = const reader_slot_t *const end = begin + atomic_load32(&env->lck_mmap.lck->rdt_length, mo_AcquireRelease);
begin +
atomic_load32(&env->lck_mmap.lck->rdt_length, mo_AcquireRelease);
for (const reader_slot_t *reader = begin; reader < end; ++reader) { for (const reader_slot_t *reader = begin; reader < end; ++reader) {
if (reader->pid.weak == env->pid && reader->tid.weak != CurrentTid) { if (reader->pid.weak == env->pid && reader->tid.weak != CurrentTid) {
/* At least one thread may don't use SRWL */ /* At least one thread may don't use SRWL */
@ -1358,10 +1275,8 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
} }
#endif /* Windows */ #endif /* Windows */
if (new_geo.now != current_geo->now || if (new_geo.now != current_geo->now || new_geo.upper != current_geo->upper) {
new_geo.upper != current_geo->upper) { rc = dxb_resize(env, current_geo->first_unallocated, new_geo.now, new_geo.upper, explicit_resize);
rc = dxb_resize(env, current_geo->first_unallocated, new_geo.now,
new_geo.upper, explicit_resize);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
} }
@ -1370,13 +1285,10 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower,
env->txn->flags |= MDBX_TXN_DIRTY; env->txn->flags |= MDBX_TXN_DIRTY;
} else { } else {
meta.geometry = new_geo; meta.geometry = new_geo;
rc = rc = dxb_sync_locked(env, env->flags, &meta, &env->basal_txn->tw.troika);
dxb_sync_locked(env, env->flags, &meta, &env->basal_txn->tw.troika);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
env->geo_in_bytes.now = env->geo_in_bytes.now = pgno2bytes(env, new_geo.now = meta.geometry.now);
pgno2bytes(env, new_geo.now = meta.geometry.now); env->geo_in_bytes.upper = pgno2bytes(env, new_geo.upper = meta.geometry.upper);
env->geo_in_bytes.upper =
pgno2bytes(env, new_geo.upper = meta.geometry.upper);
} }
} }
} }

View File

@ -6,8 +6,7 @@
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
* Readers API */ * Readers API */
__cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func, __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func, void *ctx) {
void *ctx) {
int rc = check_env(env, true); int rc = check_env(env, true);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -19,8 +18,7 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func,
int serial = 0; int serial = 0;
lck_t *const lck = env->lck_mmap.lck; lck_t *const lck = env->lck_mmap.lck;
if (likely(lck)) { if (likely(lck)) {
const size_t snap_nreaders = const size_t snap_nreaders = atomic_load32(&lck->rdt_length, mo_AcquireRelease);
atomic_load32(&lck->rdt_length, mo_AcquireRelease);
for (size_t i = 0; i < snap_nreaders; i++) { for (size_t i = 0; i < snap_nreaders; i++) {
const reader_slot_t *r = lck->rdt + i; const reader_slot_t *r = lck->rdt + i;
retry_reader:; retry_reader:;
@ -29,17 +27,12 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func,
continue; continue;
txnid_t txnid = safe64_read(&r->txnid); txnid_t txnid = safe64_read(&r->txnid);
const uint64_t tid = atomic_load64(&r->tid, mo_Relaxed); const uint64_t tid = atomic_load64(&r->tid, mo_Relaxed);
const pgno_t pages_used = const pgno_t pages_used = atomic_load32(&r->snapshot_pages_used, mo_Relaxed);
atomic_load32(&r->snapshot_pages_used, mo_Relaxed); const uint64_t reader_pages_retired = atomic_load64(&r->snapshot_pages_retired, mo_Relaxed);
const uint64_t reader_pages_retired = if (unlikely(txnid != safe64_read(&r->txnid) || pid != atomic_load32(&r->pid, mo_AcquireRelease) ||
atomic_load64(&r->snapshot_pages_retired, mo_Relaxed);
if (unlikely(txnid != safe64_read(&r->txnid) ||
pid != atomic_load32(&r->pid, mo_AcquireRelease) ||
tid != atomic_load64(&r->tid, mo_Relaxed) || tid != atomic_load64(&r->tid, mo_Relaxed) ||
pages_used != pages_used != atomic_load32(&r->snapshot_pages_used, mo_Relaxed) ||
atomic_load32(&r->snapshot_pages_used, mo_Relaxed) || reader_pages_retired != atomic_load64(&r->snapshot_pages_retired, mo_Relaxed)))
reader_pages_retired !=
atomic_load64(&r->snapshot_pages_retired, mo_Relaxed)))
goto retry_reader; goto retry_reader;
eASSERT(env, txnid > 0); eASSERT(env, txnid > 0);
@ -53,22 +46,18 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func,
troika_t troika = meta_tap(env); troika_t troika = meta_tap(env);
retry_header:; retry_header:;
const meta_ptr_t head = meta_recent(env, &troika); const meta_ptr_t head = meta_recent(env, &troika);
const uint64_t head_pages_retired = const uint64_t head_pages_retired = unaligned_peek_u64_volatile(4, head.ptr_v->pages_retired);
unaligned_peek_u64_volatile(4, head.ptr_v->pages_retired);
if (unlikely(meta_should_retry(env, &troika) || if (unlikely(meta_should_retry(env, &troika) ||
head_pages_retired != unaligned_peek_u64_volatile( head_pages_retired != unaligned_peek_u64_volatile(4, head.ptr_v->pages_retired)))
4, head.ptr_v->pages_retired)))
goto retry_header; goto retry_header;
lag = (head.txnid - txnid) / xMDBX_TXNID_STEP; lag = (head.txnid - txnid) / xMDBX_TXNID_STEP;
bytes_used = pgno2bytes(env, pages_used); bytes_used = pgno2bytes(env, pages_used);
bytes_retained = (head_pages_retired > reader_pages_retired) bytes_retained = (head_pages_retired > reader_pages_retired)
? pgno2bytes(env, (pgno_t)(head_pages_retired - ? pgno2bytes(env, (pgno_t)(head_pages_retired - reader_pages_retired))
reader_pages_retired))
: 0; : 0;
} }
rc = func(ctx, ++serial, (unsigned)i, pid, (mdbx_tid_t)((intptr_t)tid), rc = func(ctx, ++serial, (unsigned)i, pid, (mdbx_tid_t)((intptr_t)tid), txnid, lag, bytes_used, bytes_retained);
txnid, lag, bytes_used, bytes_retained);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
break; break;
} }
@ -93,8 +82,7 @@ int mdbx_txn_lock(MDBX_env *env, bool dont_wait) {
if (unlikely(env->flags & MDBX_RDONLY)) if (unlikely(env->flags & MDBX_RDONLY))
return LOG_IFERR(MDBX_EACCESS); return LOG_IFERR(MDBX_EACCESS);
if (unlikely(env->basal_txn->owner || if (unlikely(env->basal_txn->owner || (env->basal_txn->flags & MDBX_TXN_FINISHED) == 0))
(env->basal_txn->flags & MDBX_TXN_FINISHED) == 0))
return LOG_IFERR(MDBX_BUSY); return LOG_IFERR(MDBX_BUSY);
return LOG_IFERR(lck_txn_lock(env, dont_wait)); return LOG_IFERR(lck_txn_lock(env, dont_wait));

View File

@ -9,16 +9,14 @@ static inline double key2double(const int64_t key) {
double f; double f;
} casting; } casting;
casting.u = (key < 0) ? key + UINT64_C(0x8000000000000000) casting.u = (key < 0) ? key + UINT64_C(0x8000000000000000) : UINT64_C(0xffffFFFFffffFFFF) - key;
: UINT64_C(0xffffFFFFffffFFFF) - key;
return casting.f; return casting.f;
} }
static inline uint64_t double2key(const double *const ptr) { static inline uint64_t double2key(const double *const ptr) {
STATIC_ASSERT(sizeof(double) == sizeof(int64_t)); STATIC_ASSERT(sizeof(double) == sizeof(int64_t));
const int64_t i = *(const int64_t *)ptr; const int64_t i = *(const int64_t *)ptr;
const uint64_t u = (i < 0) ? UINT64_C(0xffffFFFFffffFFFF) - i const uint64_t u = (i < 0) ? UINT64_C(0xffffFFFFffffFFFF) - i : i + UINT64_C(0x8000000000000000);
: i + UINT64_C(0x8000000000000000);
if (ASSERT_ENABLED()) { if (ASSERT_ENABLED()) {
const double f = key2double(u); const double f = key2double(u);
assert(memcmp(&f, ptr, sizeof(double)) == 0); assert(memcmp(&f, ptr, sizeof(double)) == 0);
@ -32,16 +30,14 @@ static inline float key2float(const int32_t key) {
float f; float f;
} casting; } casting;
casting.u = casting.u = (key < 0) ? key + UINT32_C(0x80000000) : UINT32_C(0xffffFFFF) - key;
(key < 0) ? key + UINT32_C(0x80000000) : UINT32_C(0xffffFFFF) - key;
return casting.f; return casting.f;
} }
static inline uint32_t float2key(const float *const ptr) { static inline uint32_t float2key(const float *const ptr) {
STATIC_ASSERT(sizeof(float) == sizeof(int32_t)); STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
const int32_t i = *(const int32_t *)ptr; const int32_t i = *(const int32_t *)ptr;
const uint32_t u = const uint32_t u = (i < 0) ? UINT32_C(0xffffFFFF) - i : i + UINT32_C(0x80000000);
(i < 0) ? UINT32_C(0xffffFFFF) - i : i + UINT32_C(0x80000000);
if (ASSERT_ENABLED()) { if (ASSERT_ENABLED()) {
const float f = key2float(u); const float f = key2float(u);
assert(memcmp(&f, ptr, sizeof(float)) == 0); assert(memcmp(&f, ptr, sizeof(float)) == 0);
@ -49,21 +45,13 @@ static inline uint32_t float2key(const float *const ptr) {
return u; return u;
} }
uint64_t mdbx_key_from_double(const double ieee754_64bit) { uint64_t mdbx_key_from_double(const double ieee754_64bit) { return double2key(&ieee754_64bit); }
return double2key(&ieee754_64bit);
}
uint64_t mdbx_key_from_ptrdouble(const double *const ieee754_64bit) { uint64_t mdbx_key_from_ptrdouble(const double *const ieee754_64bit) { return double2key(ieee754_64bit); }
return double2key(ieee754_64bit);
}
uint32_t mdbx_key_from_float(const float ieee754_32bit) { uint32_t mdbx_key_from_float(const float ieee754_32bit) { return float2key(&ieee754_32bit); }
return float2key(&ieee754_32bit);
}
uint32_t mdbx_key_from_ptrfloat(const float *const ieee754_32bit) { uint32_t mdbx_key_from_ptrfloat(const float *const ieee754_32bit) { return float2key(ieee754_32bit); }
return float2key(ieee754_32bit);
}
#define IEEE754_DOUBLE_MANTISSA_SIZE 52 #define IEEE754_DOUBLE_MANTISSA_SIZE 52
#define IEEE754_DOUBLE_EXPONENTA_BIAS 0x3FF #define IEEE754_DOUBLE_EXPONENTA_BIAS 0x3FF
@ -78,8 +66,7 @@ static inline int clz64(uint64_t value) {
return __builtin_clz(value); return __builtin_clz(value);
if (sizeof(value) == sizeof(long)) if (sizeof(value) == sizeof(long))
return __builtin_clzl(value); return __builtin_clzl(value);
#if (defined(__SIZEOF_LONG_LONG__) && __SIZEOF_LONG_LONG__ == 8) || \ #if (defined(__SIZEOF_LONG_LONG__) && __SIZEOF_LONG_LONG__ == 8) || __has_builtin(__builtin_clzll)
__has_builtin(__builtin_clzll)
return __builtin_clzll(value); return __builtin_clzll(value);
#endif /* have(long long) && long long == uint64_t */ #endif /* have(long long) && long long == uint64_t */
#endif /* GNU C */ #endif /* GNU C */
@ -105,11 +92,10 @@ static inline int clz64(uint64_t value) {
value |= value >> 8; value |= value >> 8;
value |= value >> 16; value |= value >> 16;
value |= value >> 32; value |= value >> 32;
static const uint8_t debruijn_clz64[64] = { static const uint8_t debruijn_clz64[64] = {63, 16, 62, 7, 15, 36, 61, 3, 6, 14, 22, 26, 35, 47, 60, 2,
63, 16, 62, 7, 15, 36, 61, 3, 6, 14, 22, 26, 35, 47, 60, 2, 9, 5, 28, 11, 13, 21, 42, 19, 25, 31, 34, 40, 46, 52, 59, 1,
9, 5, 28, 11, 13, 21, 42, 19, 25, 31, 34, 40, 46, 52, 59, 1, 17, 8, 37, 4, 23, 27, 48, 10, 29, 12, 43, 20, 32, 41, 53, 18,
17, 8, 37, 4, 23, 27, 48, 10, 29, 12, 43, 20, 32, 41, 53, 18, 38, 24, 49, 30, 44, 33, 54, 39, 50, 45, 55, 51, 56, 57, 58, 0};
38, 24, 49, 30, 44, 33, 54, 39, 50, 45, 55, 51, 56, 57, 58, 0};
return debruijn_clz64[value * UINT64_C(0x03F79D71B4CB0A89) >> 58]; return debruijn_clz64[value * UINT64_C(0x03F79D71B4CB0A89) >> 58];
} }
@ -134,17 +120,12 @@ uint64_t mdbx_key_from_jsonInteger(const int64_t json_integer) {
mantissa = round_mantissa(u64, --shift); mantissa = round_mantissa(u64, --shift);
} }
assert(mantissa >= IEEE754_DOUBLE_IMPLICIT_LEAD && assert(mantissa >= IEEE754_DOUBLE_IMPLICIT_LEAD && mantissa <= IEEE754_DOUBLE_MANTISSA_AMAX);
mantissa <= IEEE754_DOUBLE_MANTISSA_AMAX); const uint64_t exponent = (uint64_t)IEEE754_DOUBLE_EXPONENTA_BIAS + IEEE754_DOUBLE_MANTISSA_SIZE - shift;
const uint64_t exponent = (uint64_t)IEEE754_DOUBLE_EXPONENTA_BIAS +
IEEE754_DOUBLE_MANTISSA_SIZE - shift;
assert(exponent > 0 && exponent <= IEEE754_DOUBLE_EXPONENTA_MAX); assert(exponent > 0 && exponent <= IEEE754_DOUBLE_EXPONENTA_MAX);
const uint64_t key = bias + (exponent << IEEE754_DOUBLE_MANTISSA_SIZE) + const uint64_t key = bias + (exponent << IEEE754_DOUBLE_MANTISSA_SIZE) + (mantissa - IEEE754_DOUBLE_IMPLICIT_LEAD);
(mantissa - IEEE754_DOUBLE_IMPLICIT_LEAD); #if !defined(_MSC_VER) || defined(_DEBUG) /* Workaround for MSVC error LNK2019: unresolved external \
#if !defined(_MSC_VER) || \ symbol __except1 referenced in function __ftol3_except */
defined( \
_DEBUG) /* Workaround for MSVC error LNK2019: unresolved external \
symbol __except1 referenced in function __ftol3_except */
assert(key == mdbx_key_from_double((double)json_integer)); assert(key == mdbx_key_from_double((double)json_integer));
#endif /* Workaround for MSVC */ #endif /* Workaround for MSVC */
return key; return key;
@ -160,17 +141,13 @@ uint64_t mdbx_key_from_jsonInteger(const int64_t json_integer) {
mantissa = round_mantissa(u64, --shift); mantissa = round_mantissa(u64, --shift);
} }
assert(mantissa >= IEEE754_DOUBLE_IMPLICIT_LEAD && assert(mantissa >= IEEE754_DOUBLE_IMPLICIT_LEAD && mantissa <= IEEE754_DOUBLE_MANTISSA_AMAX);
mantissa <= IEEE754_DOUBLE_MANTISSA_AMAX); const uint64_t exponent = (uint64_t)IEEE754_DOUBLE_EXPONENTA_BIAS + IEEE754_DOUBLE_MANTISSA_SIZE - shift;
const uint64_t exponent = (uint64_t)IEEE754_DOUBLE_EXPONENTA_BIAS +
IEEE754_DOUBLE_MANTISSA_SIZE - shift;
assert(exponent > 0 && exponent <= IEEE754_DOUBLE_EXPONENTA_MAX); assert(exponent > 0 && exponent <= IEEE754_DOUBLE_EXPONENTA_MAX);
const uint64_t key = bias - 1 - (exponent << IEEE754_DOUBLE_MANTISSA_SIZE) - const uint64_t key =
(mantissa - IEEE754_DOUBLE_IMPLICIT_LEAD); bias - 1 - (exponent << IEEE754_DOUBLE_MANTISSA_SIZE) - (mantissa - IEEE754_DOUBLE_IMPLICIT_LEAD);
#if !defined(_MSC_VER) || \ #if !defined(_MSC_VER) || defined(_DEBUG) /* Workaround for MSVC error LNK2019: unresolved external \
defined( \ symbol __except1 referenced in function __ftol3_except */
_DEBUG) /* Workaround for MSVC error LNK2019: unresolved external \
symbol __except1 referenced in function __ftol3_except */
assert(key == mdbx_key_from_double((double)json_integer)); assert(key == mdbx_key_from_double((double)json_integer));
#endif /* Workaround for MSVC */ #endif /* Workaround for MSVC */
return key; return key;
@ -185,21 +162,17 @@ int64_t mdbx_jsonInteger_from_key(const MDBX_val v) {
const uint64_t bias = UINT64_C(0x8000000000000000); const uint64_t bias = UINT64_C(0x8000000000000000);
const uint64_t covalent = (key > bias) ? key - bias : bias - key - 1; const uint64_t covalent = (key > bias) ? key - bias : bias - key - 1;
const int shift = IEEE754_DOUBLE_EXPONENTA_BIAS + 63 - const int shift = IEEE754_DOUBLE_EXPONENTA_BIAS + 63 -
(IEEE754_DOUBLE_EXPONENTA_MAX & (IEEE754_DOUBLE_EXPONENTA_MAX & (int)(covalent >> IEEE754_DOUBLE_MANTISSA_SIZE));
(int)(covalent >> IEEE754_DOUBLE_MANTISSA_SIZE));
if (unlikely(shift < 1)) if (unlikely(shift < 1))
return (key < bias) ? INT64_MIN : INT64_MAX; return (key < bias) ? INT64_MIN : INT64_MAX;
if (unlikely(shift > 63)) if (unlikely(shift > 63))
return 0; return 0;
const uint64_t unscaled = ((covalent & IEEE754_DOUBLE_MANTISSA_MASK) const uint64_t unscaled = ((covalent & IEEE754_DOUBLE_MANTISSA_MASK) << (63 - IEEE754_DOUBLE_MANTISSA_SIZE)) + bias;
<< (63 - IEEE754_DOUBLE_MANTISSA_SIZE)) +
bias;
const int64_t absolute = unscaled >> shift; const int64_t absolute = unscaled >> shift;
const int64_t value = (key < bias) ? -absolute : absolute; const int64_t value = (key < bias) ? -absolute : absolute;
assert(key == mdbx_key_from_jsonInteger(value) || assert(key == mdbx_key_from_jsonInteger(value) ||
(mdbx_key_from_jsonInteger(value - 1) < key && (mdbx_key_from_jsonInteger(value - 1) < key && key < mdbx_key_from_jsonInteger(value + 1)));
key < mdbx_key_from_jsonInteger(value + 1)));
return value; return value;
} }
@ -220,6 +193,5 @@ int32_t mdbx_int32_from_key(const MDBX_val v) {
int64_t mdbx_int64_from_key(const MDBX_val v) { int64_t mdbx_int64_from_key(const MDBX_val v) {
assert(v.iov_len == 8); assert(v.iov_len == 8);
return (int64_t)(unaligned_peek_u64(2, v.iov_base) - return (int64_t)(unaligned_peek_u64(2, v.iov_base) - UINT64_C(0x8000000000000000));
UINT64_C(0x8000000000000000));
} }

View File

@ -16,9 +16,7 @@ int mdbx_txn_straggler(const MDBX_txn *txn, int *percent)
MDBX_env *env = txn->env; MDBX_env *env = txn->env;
if (unlikely((txn->flags & MDBX_TXN_RDONLY) == 0)) { if (unlikely((txn->flags & MDBX_TXN_RDONLY) == 0)) {
if (percent) if (percent)
*percent = (int)((txn->geo.first_unallocated * UINT64_C(100) + *percent = (int)((txn->geo.first_unallocated * UINT64_C(100) + txn->geo.end_pgno / 2) / txn->geo.end_pgno);
txn->geo.end_pgno / 2) /
txn->geo.end_pgno);
return 0; return 0;
} }
@ -28,9 +26,7 @@ int mdbx_txn_straggler(const MDBX_txn *txn, int *percent)
const meta_ptr_t head = meta_recent(env, &troika); const meta_ptr_t head = meta_recent(env, &troika);
if (percent) { if (percent) {
const pgno_t maxpg = head.ptr_v->geometry.now; const pgno_t maxpg = head.ptr_v->geometry.now;
*percent = (int)((head.ptr_v->geometry.first_unallocated * UINT64_C(100) + *percent = (int)((head.ptr_v->geometry.first_unallocated * UINT64_C(100) + maxpg / 2) / maxpg);
maxpg / 2) /
maxpg);
} }
lag = (head.txnid - txn->txnid) / xMDBX_TXNID_STEP; lag = (head.txnid - txn->txnid) / xMDBX_TXNID_STEP;
} while (unlikely(meta_should_retry(env, &troika))); } while (unlikely(meta_should_retry(env, &troika)));
@ -38,8 +34,7 @@ int mdbx_txn_straggler(const MDBX_txn *txn, int *percent)
return (lag > INT_MAX) ? INT_MAX : (int)lag; return (lag > INT_MAX) ? INT_MAX : (int)lag;
} }
__cold int mdbx_dbi_dupsort_depthmask(const MDBX_txn *txn, MDBX_dbi dbi, __cold int mdbx_dbi_dupsort_depthmask(const MDBX_txn *txn, MDBX_dbi dbi, uint32_t *mask) {
uint32_t *mask) {
if (unlikely(!mask)) if (unlikely(!mask))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -58,8 +53,7 @@ __cold int mdbx_dbi_dupsort_depthmask(const MDBX_txn *txn, MDBX_dbi dbi,
MDBX_val key, data; MDBX_val key, data;
rc = outer_first(&cx.outer, &key, &data); rc = outer_first(&cx.outer, &key, &data);
while (rc == MDBX_SUCCESS) { while (rc == MDBX_SUCCESS) {
const node_t *node = const node_t *node = page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
const tree_t *db = node_data(node); const tree_t *db = node_data(node);
const unsigned flags = node_flags(node); const unsigned flags = node_flags(node);
switch (flags) { switch (flags) {
@ -77,8 +71,7 @@ __cold int mdbx_dbi_dupsort_depthmask(const MDBX_txn *txn, MDBX_dbi dbi,
*mask |= 1 << UNALIGNED_PEEK_16(db, tree_t, height); *mask |= 1 << UNALIGNED_PEEK_16(db, tree_t, height);
break; break;
default: default:
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid node-size", flags);
"invalid node-size", flags);
return LOG_IFERR(MDBX_CORRUPTED); return LOG_IFERR(MDBX_CORRUPTED);
} }
rc = outer_next(&cx.outer, &key, &data, MDBX_NEXT_NODUP); rc = outer_next(&cx.outer, &key, &data, MDBX_NEXT_NODUP);
@ -101,8 +94,7 @@ int mdbx_canary_get(const MDBX_txn *txn, MDBX_canary *canary) {
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int mdbx_get(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, int mdbx_get(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *data) {
MDBX_val *data) {
DKBUF_DEBUG; DKBUF_DEBUG;
DEBUG("===> get db %u key [%s]", dbi, DKEY_DEBUG(key)); DEBUG("===> get db %u key [%s]", dbi, DKEY_DEBUG(key));
@ -121,8 +113,7 @@ int mdbx_get(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key,
return LOG_IFERR(cursor_seek(&cx.outer, (MDBX_val *)key, data, MDBX_SET).err); return LOG_IFERR(cursor_seek(&cx.outer, (MDBX_val *)key, data, MDBX_SET).err);
} }
int mdbx_get_equal_or_great(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, int mdbx_get_equal_or_great(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data) {
MDBX_val *data) {
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -141,8 +132,7 @@ int mdbx_get_equal_or_great(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key,
return LOG_IFERR(cursor_ops(&cx.outer, key, data, MDBX_SET_LOWERBOUND)); return LOG_IFERR(cursor_ops(&cx.outer, key, data, MDBX_SET_LOWERBOUND));
} }
int mdbx_get_ex(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, int mdbx_get_ex(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, size_t *values_count) {
MDBX_val *data, size_t *values_count) {
DKBUF_DEBUG; DKBUF_DEBUG;
DEBUG("===> get db %u key [%s]", dbi, DKEY_DEBUG(key)); DEBUG("===> get db %u key [%s]", dbi, DKEY_DEBUG(key));
@ -169,8 +159,7 @@ int mdbx_get_ex(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key,
*values_count = 1; *values_count = 1;
if (inner_pointed(&cx.outer)) if (inner_pointed(&cx.outer))
*values_count = *values_count =
(sizeof(*values_count) >= sizeof(cx.inner.nested_tree.items) || (sizeof(*values_count) >= sizeof(cx.inner.nested_tree.items) || cx.inner.nested_tree.items <= PTRDIFF_MAX)
cx.inner.nested_tree.items <= PTRDIFF_MAX)
? (size_t)cx.inner.nested_tree.items ? (size_t)cx.inner.nested_tree.items
: PTRDIFF_MAX; : PTRDIFF_MAX;
} }
@ -185,8 +174,7 @@ int mdbx_canary_put(MDBX_txn *txn, const MDBX_canary *canary) {
return LOG_IFERR(rc); return LOG_IFERR(rc);
if (likely(canary)) { if (likely(canary)) {
if (txn->canary.x == canary->x && txn->canary.y == canary->y && if (txn->canary.x == canary->x && txn->canary.y == canary->y && txn->canary.z == canary->z)
txn->canary.z == canary->z)
return MDBX_SUCCESS; return MDBX_SUCCESS;
txn->canary.x = canary->x; txn->canary.x = canary->x;
txn->canary.y = canary->y; txn->canary.y = canary->y;
@ -236,17 +224,14 @@ int mdbx_is_dirty(const MDBX_txn *txn, const void *ptr) {
* not to the beginning of a data. */ * not to the beginning of a data. */
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
} }
return ((txn->flags & MDBX_TXN_RDONLY) || !is_modifable(txn, page)) return ((txn->flags & MDBX_TXN_RDONLY) || !is_modifable(txn, page)) ? MDBX_RESULT_FALSE : MDBX_RESULT_TRUE;
? MDBX_RESULT_FALSE
: MDBX_RESULT_TRUE;
} }
if ((size_t)offset < env->dxb_mmap.limit) { if ((size_t)offset < env->dxb_mmap.limit) {
/* Указатель адресует что-то в пределах mmap, но за границей /* Указатель адресует что-то в пределах mmap, но за границей
* распределенных страниц. Такое может случится если mdbx_is_dirty() * распределенных страниц. Такое может случится если mdbx_is_dirty()
* вызывается после операции, в ходе которой грязная страница была * вызывается после операции, в ходе которой грязная страница была
* возвращена в нераспределенное пространство. */ * возвращена в нераспределенное пространство. */
return (txn->flags & MDBX_TXN_RDONLY) ? LOG_IFERR(MDBX_EINVAL) return (txn->flags & MDBX_TXN_RDONLY) ? LOG_IFERR(MDBX_EINVAL) : MDBX_RESULT_TRUE;
: MDBX_RESULT_TRUE;
} }
} }
@ -256,13 +241,10 @@ int mdbx_is_dirty(const MDBX_txn *txn, const void *ptr) {
* *
* Для режима MDBX_WRITE_MAP режима страница однозначно "не грязная", * Для режима MDBX_WRITE_MAP режима страница однозначно "не грязная",
* а для режимов без MDBX_WRITE_MAP однозначно "не чистая". */ * а для режимов без MDBX_WRITE_MAP однозначно "не чистая". */
return (txn->flags & (MDBX_WRITEMAP | MDBX_TXN_RDONLY)) return (txn->flags & (MDBX_WRITEMAP | MDBX_TXN_RDONLY)) ? LOG_IFERR(MDBX_EINVAL) : MDBX_RESULT_TRUE;
? LOG_IFERR(MDBX_EINVAL)
: MDBX_RESULT_TRUE;
} }
int mdbx_del(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, int mdbx_del(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, const MDBX_val *data) {
const MDBX_val *data) {
int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED); int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -274,8 +256,7 @@ int mdbx_del(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key,
return LOG_IFERR(MDBX_BAD_DBI); return LOG_IFERR(MDBX_BAD_DBI);
if (unlikely(txn->flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) if (unlikely(txn->flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED)))
return LOG_IFERR((txn->flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS return LOG_IFERR((txn->flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN);
: MDBX_BAD_TXN);
cursor_couple_t cx; cursor_couple_t cx;
rc = cursor_init(&cx.outer, txn, dbi); rc = cursor_init(&cx.outer, txn, dbi);
@ -302,8 +283,7 @@ int mdbx_del(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *data, int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *data, MDBX_put_flags_t flags) {
MDBX_put_flags_t flags) {
int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED); int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -314,14 +294,12 @@ int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *data,
if (unlikely(dbi <= FREE_DBI)) if (unlikely(dbi <= FREE_DBI))
return LOG_IFERR(MDBX_BAD_DBI); return LOG_IFERR(MDBX_BAD_DBI);
if (unlikely(flags & ~(MDBX_NOOVERWRITE | MDBX_NODUPDATA | MDBX_ALLDUPS | if (unlikely(flags & ~(MDBX_NOOVERWRITE | MDBX_NODUPDATA | MDBX_ALLDUPS | MDBX_ALLDUPS | MDBX_RESERVE | MDBX_APPEND |
MDBX_ALLDUPS | MDBX_RESERVE | MDBX_APPEND |
MDBX_APPENDDUP | MDBX_CURRENT | MDBX_MULTIPLE))) MDBX_APPENDDUP | MDBX_CURRENT | MDBX_MULTIPLE)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(txn->flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) if (unlikely(txn->flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED)))
return LOG_IFERR((txn->flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS return LOG_IFERR((txn->flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN);
: MDBX_BAD_TXN);
cursor_couple_t cx; cursor_couple_t cx;
rc = cursor_init(&cx.outer, txn, dbi); rc = cursor_init(&cx.outer, txn, dbi);
@ -333,14 +311,11 @@ int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *data,
/* LY: support for update (explicit overwrite) */ /* LY: support for update (explicit overwrite) */
if (flags & MDBX_CURRENT) { if (flags & MDBX_CURRENT) {
rc = cursor_seek(&cx.outer, (MDBX_val *)key, nullptr, MDBX_SET).err; rc = cursor_seek(&cx.outer, (MDBX_val *)key, nullptr, MDBX_SET).err;
if (likely(rc == MDBX_SUCCESS) && (txn->dbs[dbi].flags & MDBX_DUPSORT) && if (likely(rc == MDBX_SUCCESS) && (txn->dbs[dbi].flags & MDBX_DUPSORT) && (flags & MDBX_ALLDUPS) == 0) {
(flags & MDBX_ALLDUPS) == 0) {
/* LY: allows update (explicit overwrite) only for unique keys */ /* LY: allows update (explicit overwrite) only for unique keys */
node_t *node = node_t *node = page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
if (node_flags(node) & N_DUP) { if (node_flags(node) & N_DUP) {
tASSERT(txn, inner_pointed(&cx.outer) && tASSERT(txn, inner_pointed(&cx.outer) && cx.outer.subcur->nested_tree.items > 1);
cx.outer.subcur->nested_tree.items > 1);
rc = MDBX_EMULTIVAL; rc = MDBX_EMULTIVAL;
if ((flags & MDBX_NOOVERWRITE) == 0) { if ((flags & MDBX_NOOVERWRITE) == 0) {
flags -= MDBX_CURRENT; flags -= MDBX_CURRENT;
@ -383,10 +358,8 @@ int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *data,
* - получения dirty-статуса страницы по адресу (знать о MUTABLE/WRITEABLE). * - получения dirty-статуса страницы по адресу (знать о MUTABLE/WRITEABLE).
*/ */
int mdbx_replace_ex(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, int mdbx_replace_ex(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *new_data, MDBX_val *old_data,
MDBX_val *new_data, MDBX_val *old_data, MDBX_put_flags_t flags, MDBX_preserve_func preserver, void *preserver_context) {
MDBX_put_flags_t flags, MDBX_preserve_func preserver,
void *preserver_context) {
int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED); int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -397,16 +370,14 @@ int mdbx_replace_ex(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key,
if (unlikely(old_data->iov_base == nullptr && old_data->iov_len)) if (unlikely(old_data->iov_base == nullptr && old_data->iov_len))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(new_data == nullptr && if (unlikely(new_data == nullptr && (flags & (MDBX_CURRENT | MDBX_RESERVE)) != MDBX_CURRENT))
(flags & (MDBX_CURRENT | MDBX_RESERVE)) != MDBX_CURRENT))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(dbi <= FREE_DBI)) if (unlikely(dbi <= FREE_DBI))
return LOG_IFERR(MDBX_BAD_DBI); return LOG_IFERR(MDBX_BAD_DBI);
if (unlikely(flags & if (unlikely(flags & ~(MDBX_NOOVERWRITE | MDBX_NODUPDATA | MDBX_ALLDUPS | MDBX_RESERVE | MDBX_APPEND |
~(MDBX_NOOVERWRITE | MDBX_NODUPDATA | MDBX_ALLDUPS | MDBX_APPENDDUP | MDBX_CURRENT)))
MDBX_RESERVE | MDBX_APPEND | MDBX_APPENDDUP | MDBX_CURRENT)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
cursor_couple_t cx; cursor_couple_t cx;
@ -452,8 +423,7 @@ int mdbx_replace_ex(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key,
/* disallow update/delete for multi-values */ /* disallow update/delete for multi-values */
node_t *node = page_node(page, cx.outer.ki[cx.outer.top]); node_t *node = page_node(page, cx.outer.ki[cx.outer.top]);
if (node_flags(node) & N_DUP) { if (node_flags(node) & N_DUP) {
tASSERT(txn, inner_pointed(&cx.outer) && tASSERT(txn, inner_pointed(&cx.outer) && cx.outer.subcur->nested_tree.items > 1);
cx.outer.subcur->nested_tree.items > 1);
if (cx.outer.subcur->nested_tree.items > 1) { if (cx.outer.subcur->nested_tree.items > 1) {
rc = MDBX_EMULTIVAL; rc = MDBX_EMULTIVAL;
goto bailout; goto bailout;
@ -472,8 +442,7 @@ int mdbx_replace_ex(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key,
*old_data = *new_data; *old_data = *new_data;
goto bailout; goto bailout;
} }
rc = preserver ? preserver(preserver_context, old_data, rc = preserver ? preserver(preserver_context, old_data, present_data.iov_base, present_data.iov_len)
present_data.iov_base, present_data.iov_len)
: MDBX_SUCCESS; : MDBX_SUCCESS;
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -494,8 +463,7 @@ bailout:
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
static int default_value_preserver(void *context, MDBX_val *target, static int default_value_preserver(void *context, MDBX_val *target, const void *src, size_t bytes) {
const void *src, size_t bytes) {
(void)context; (void)context;
if (unlikely(target->iov_len < bytes)) { if (unlikely(target->iov_len < bytes)) {
target->iov_base = nullptr; target->iov_base = nullptr;
@ -506,9 +474,7 @@ static int default_value_preserver(void *context, MDBX_val *target,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int mdbx_replace(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, int mdbx_replace(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *key, MDBX_val *new_data, MDBX_val *old_data,
MDBX_val *new_data, MDBX_val *old_data,
MDBX_put_flags_t flags) { MDBX_put_flags_t flags) {
return mdbx_replace_ex(txn, dbi, key, new_data, old_data, flags, return mdbx_replace_ex(txn, dbi, key, new_data, old_data, flags, default_value_preserver, nullptr);
default_value_preserver, nullptr);
} }

View File

@ -8,43 +8,36 @@
#ifndef __cplusplus #ifndef __cplusplus
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
#define osal_memory_fence(order, write) \ #define osal_memory_fence(order, write) atomic_thread_fence((write) ? mo_c11_store(order) : mo_c11_load(order))
atomic_thread_fence((write) ? mo_c11_store(order) : mo_c11_load(order))
#else /* MDBX_HAVE_C11ATOMICS */ #else /* MDBX_HAVE_C11ATOMICS */
#define osal_memory_fence(order, write) \ #define osal_memory_fence(order, write) \
do { \ do { \
osal_compiler_barrier(); \ osal_compiler_barrier(); \
if (write && order > (MDBX_CPU_WRITEBACK_INCOHERENT ? mo_Relaxed \ if (write && order > (MDBX_CPU_WRITEBACK_INCOHERENT ? mo_Relaxed : mo_AcquireRelease)) \
: mo_AcquireRelease)) \ osal_memory_barrier(); \
osal_memory_barrier(); \
} while (0) } while (0)
#endif /* MDBX_HAVE_C11ATOMICS */ #endif /* MDBX_HAVE_C11ATOMICS */
#if defined(MDBX_HAVE_C11ATOMICS) && defined(__LCC__) #if defined(MDBX_HAVE_C11ATOMICS) && defined(__LCC__)
#define atomic_store32(p, value, order) \ #define atomic_store32(p, value, order) \
({ \ ({ \
const uint32_t value_to_store = (value); \ const uint32_t value_to_store = (value); \
atomic_store_explicit(MDBX_c11a_rw(uint32_t, p), value_to_store, \ atomic_store_explicit(MDBX_c11a_rw(uint32_t, p), value_to_store, mo_c11_store(order)); \
mo_c11_store(order)); \ value_to_store; \
value_to_store; \
}) })
#define atomic_load32(p, order) \ #define atomic_load32(p, order) atomic_load_explicit(MDBX_c11a_ro(uint32_t, p), mo_c11_load(order))
atomic_load_explicit(MDBX_c11a_ro(uint32_t, p), mo_c11_load(order)) #define atomic_store64(p, value, order) \
#define atomic_store64(p, value, order) \ ({ \
({ \ const uint64_t value_to_store = (value); \
const uint64_t value_to_store = (value); \ atomic_store_explicit(MDBX_c11a_rw(uint64_t, p), value_to_store, mo_c11_store(order)); \
atomic_store_explicit(MDBX_c11a_rw(uint64_t, p), value_to_store, \ value_to_store; \
mo_c11_store(order)); \
value_to_store; \
}) })
#define atomic_load64(p, order) \ #define atomic_load64(p, order) atomic_load_explicit(MDBX_c11a_ro(uint64_t, p), mo_c11_load(order))
atomic_load_explicit(MDBX_c11a_ro(uint64_t, p), mo_c11_load(order))
#endif /* LCC && MDBX_HAVE_C11ATOMICS */ #endif /* LCC && MDBX_HAVE_C11ATOMICS */
#ifndef atomic_store32 #ifndef atomic_store32
MDBX_MAYBE_UNUSED static __always_inline uint32_t MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_store32(mdbx_atomic_uint32_t *p, const uint32_t value,
atomic_store32(mdbx_atomic_uint32_t *p, const uint32_t value, enum mdbx_memory_order order) {
enum mdbx_memory_order order) {
STATIC_ASSERT(sizeof(mdbx_atomic_uint32_t) == 4); STATIC_ASSERT(sizeof(mdbx_atomic_uint32_t) == 4);
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p))); assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
@ -60,8 +53,8 @@ atomic_store32(mdbx_atomic_uint32_t *p, const uint32_t value,
#endif /* atomic_store32 */ #endif /* atomic_store32 */
#ifndef atomic_load32 #ifndef atomic_load32
MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_load32( MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_load32(const volatile mdbx_atomic_uint32_t *p,
const volatile mdbx_atomic_uint32_t *p, enum mdbx_memory_order order) { enum mdbx_memory_order order) {
STATIC_ASSERT(sizeof(mdbx_atomic_uint32_t) == 4); STATIC_ASSERT(sizeof(mdbx_atomic_uint32_t) == 4);
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
assert(atomic_is_lock_free(MDBX_c11a_ro(uint32_t, p))); assert(atomic_is_lock_free(MDBX_c11a_ro(uint32_t, p)));
@ -90,9 +83,8 @@ MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_load32(
#endif /* xMDBX_TXNID_STEP */ #endif /* xMDBX_TXNID_STEP */
#ifndef atomic_store64 #ifndef atomic_store64
MDBX_MAYBE_UNUSED static __always_inline uint64_t MDBX_MAYBE_UNUSED static __always_inline uint64_t atomic_store64(mdbx_atomic_uint64_t *p, const uint64_t value,
atomic_store64(mdbx_atomic_uint64_t *p, const uint64_t value, enum mdbx_memory_order order) {
enum mdbx_memory_order order) {
STATIC_ASSERT(sizeof(mdbx_atomic_uint64_t) == 8); STATIC_ASSERT(sizeof(mdbx_atomic_uint64_t) == 8);
#if MDBX_64BIT_ATOMIC #if MDBX_64BIT_ATOMIC
#if __GNUC_PREREQ(11, 0) #if __GNUC_PREREQ(11, 0)
@ -124,8 +116,7 @@ MDBX_MAYBE_UNUSED static
__always_inline __always_inline
#endif /* MDBX_64BIT_ATOMIC */ #endif /* MDBX_64BIT_ATOMIC */
uint64_t uint64_t
atomic_load64(const volatile mdbx_atomic_uint64_t *p, atomic_load64(const volatile mdbx_atomic_uint64_t *p, enum mdbx_memory_order order) {
enum mdbx_memory_order order) {
STATIC_ASSERT(sizeof(mdbx_atomic_uint64_t) == 8); STATIC_ASSERT(sizeof(mdbx_atomic_uint64_t) == 8);
#if MDBX_64BIT_ATOMIC #if MDBX_64BIT_ATOMIC
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
@ -142,15 +133,13 @@ MDBX_MAYBE_UNUSED static
osal_compiler_barrier(); osal_compiler_barrier();
uint64_t value = (uint64_t)atomic_load32(&p->high, order) << 32; uint64_t value = (uint64_t)atomic_load32(&p->high, order) << 32;
jitter4testing(true); jitter4testing(true);
value |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed value |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed : mo_AcquireRelease);
: mo_AcquireRelease);
jitter4testing(true); jitter4testing(true);
for (;;) { for (;;) {
osal_compiler_barrier(); osal_compiler_barrier();
uint64_t again = (uint64_t)atomic_load32(&p->high, order) << 32; uint64_t again = (uint64_t)atomic_load32(&p->high, order) << 32;
jitter4testing(true); jitter4testing(true);
again |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed again |= atomic_load32(&p->low, (order == mo_Relaxed) ? mo_Relaxed : mo_AcquireRelease);
: mo_AcquireRelease);
jitter4testing(true); jitter4testing(true);
if (likely(value == again)) if (likely(value == again))
return value; return value;
@ -171,19 +160,16 @@ MDBX_MAYBE_UNUSED static __always_inline void atomic_yield(void) {
#else #else
__asm__ __volatile__("hint @pause"); __asm__ __volatile__("hint @pause");
#endif #endif
#elif defined(__aarch64__) || (defined(__ARM_ARCH) && __ARM_ARCH > 6) || \ #elif defined(__aarch64__) || (defined(__ARM_ARCH) && __ARM_ARCH > 6) || defined(__ARM_ARCH_6K__)
defined(__ARM_ARCH_6K__)
#ifdef __CC_ARM #ifdef __CC_ARM
__yield(); __yield();
#else #else
__asm__ __volatile__("yield"); __asm__ __volatile__("yield");
#endif #endif
#elif (defined(__mips64) || defined(__mips64__)) && defined(__mips_isa_rev) && \ #elif (defined(__mips64) || defined(__mips64__)) && defined(__mips_isa_rev) && __mips_isa_rev >= 2
__mips_isa_rev >= 2
__asm__ __volatile__("pause"); __asm__ __volatile__("pause");
#elif defined(__mips) || defined(__mips__) || defined(__mips64) || \ #elif defined(__mips) || defined(__mips__) || defined(__mips64) || defined(__mips64__) || defined(_M_MRX000) || \
defined(__mips64__) || defined(_M_MRX000) || defined(_MIPS_) || \ defined(_MIPS_) || defined(__MWERKS__) || defined(__sgi)
defined(__MWERKS__) || defined(__sgi)
__asm__ __volatile__(".word 0x00000140"); __asm__ __volatile__(".word 0x00000140");
#elif defined(__linux__) || defined(__gnu_linux__) || defined(_UNIX03_SOURCE) #elif defined(__linux__) || defined(__gnu_linux__) || defined(_UNIX03_SOURCE)
sched_yield(); sched_yield();
@ -193,8 +179,7 @@ MDBX_MAYBE_UNUSED static __always_inline void atomic_yield(void) {
} }
#if MDBX_64BIT_CAS #if MDBX_64BIT_CAS
MDBX_MAYBE_UNUSED static __always_inline bool MDBX_MAYBE_UNUSED static __always_inline bool atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
STATIC_ASSERT(sizeof(long long) >= sizeof(uint64_t)); STATIC_ASSERT(sizeof(long long) >= sizeof(uint64_t));
assert(atomic_is_lock_free(MDBX_c11a_rw(uint64_t, p))); assert(atomic_is_lock_free(MDBX_c11a_rw(uint64_t, p)));
@ -202,8 +187,7 @@ atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
#elif defined(__GNUC__) || defined(__clang__) #elif defined(__GNUC__) || defined(__clang__)
return __sync_bool_compare_and_swap(&p->weak, c, v); return __sync_bool_compare_and_swap(&p->weak, c, v);
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
return c == (uint64_t)_InterlockedCompareExchange64( return c == (uint64_t)_InterlockedCompareExchange64((volatile __int64 *)&p->weak, v, c);
(volatile __int64 *)&p->weak, v, c);
#elif defined(__APPLE__) #elif defined(__APPLE__)
return OSAtomicCompareAndSwap64Barrier(c, v, &p->weak); return OSAtomicCompareAndSwap64Barrier(c, v, &p->weak);
#else #else
@ -212,8 +196,7 @@ atomic_cas64(mdbx_atomic_uint64_t *p, uint64_t c, uint64_t v) {
} }
#endif /* MDBX_64BIT_CAS */ #endif /* MDBX_64BIT_CAS */
MDBX_MAYBE_UNUSED static __always_inline bool MDBX_MAYBE_UNUSED static __always_inline bool atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
STATIC_ASSERT(sizeof(int) >= sizeof(uint32_t)); STATIC_ASSERT(sizeof(int) >= sizeof(uint32_t));
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p))); assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
@ -222,8 +205,7 @@ atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
return __sync_bool_compare_and_swap(&p->weak, c, v); return __sync_bool_compare_and_swap(&p->weak, c, v);
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
STATIC_ASSERT(sizeof(volatile long) == sizeof(volatile uint32_t)); STATIC_ASSERT(sizeof(volatile long) == sizeof(volatile uint32_t));
return c == return c == (uint32_t)_InterlockedCompareExchange((volatile long *)&p->weak, v, c);
(uint32_t)_InterlockedCompareExchange((volatile long *)&p->weak, v, c);
#elif defined(__APPLE__) #elif defined(__APPLE__)
return OSAtomicCompareAndSwap32Barrier(c, v, &p->weak); return OSAtomicCompareAndSwap32Barrier(c, v, &p->weak);
#else #else
@ -231,8 +213,7 @@ atomic_cas32(mdbx_atomic_uint32_t *p, uint32_t c, uint32_t v) {
#endif #endif
} }
MDBX_MAYBE_UNUSED static __always_inline uint32_t MDBX_MAYBE_UNUSED static __always_inline uint32_t atomic_add32(mdbx_atomic_uint32_t *p, uint32_t v) {
atomic_add32(mdbx_atomic_uint32_t *p, uint32_t v) {
#ifdef MDBX_HAVE_C11ATOMICS #ifdef MDBX_HAVE_C11ATOMICS
STATIC_ASSERT(sizeof(int) >= sizeof(uint32_t)); STATIC_ASSERT(sizeof(int) >= sizeof(uint32_t));
assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p))); assert(atomic_is_lock_free(MDBX_c11a_rw(uint32_t, p)));
@ -251,8 +232,7 @@ atomic_add32(mdbx_atomic_uint32_t *p, uint32_t v) {
#define atomic_sub32(p, v) atomic_add32(p, 0 - (v)) #define atomic_sub32(p, v) atomic_add32(p, 0 - (v))
MDBX_MAYBE_UNUSED static __always_inline uint64_t MDBX_MAYBE_UNUSED static __always_inline uint64_t safe64_txnid_next(uint64_t txnid) {
safe64_txnid_next(uint64_t txnid) {
txnid += xMDBX_TXNID_STEP; txnid += xMDBX_TXNID_STEP;
#if !MDBX_64BIT_CAS #if !MDBX_64BIT_CAS
/* avoid overflow of low-part in safe64_reset() */ /* avoid overflow of low-part in safe64_reset() */
@ -262,8 +242,7 @@ safe64_txnid_next(uint64_t txnid) {
} }
/* Atomically make target value >= SAFE64_INVALID_THRESHOLD */ /* Atomically make target value >= SAFE64_INVALID_THRESHOLD */
MDBX_MAYBE_UNUSED static __always_inline void MDBX_MAYBE_UNUSED static __always_inline void safe64_reset(mdbx_atomic_uint64_t *p, bool single_writer) {
safe64_reset(mdbx_atomic_uint64_t *p, bool single_writer) {
if (single_writer) { if (single_writer) {
#if MDBX_64BIT_ATOMIC && MDBX_WORDBITS >= 64 #if MDBX_64BIT_ATOMIC && MDBX_WORDBITS >= 64
atomic_store64(p, UINT64_MAX, mo_AcquireRelease); atomic_store64(p, UINT64_MAX, mo_AcquireRelease);
@ -290,8 +269,7 @@ safe64_reset(mdbx_atomic_uint64_t *p, bool single_writer) {
jitter4testing(true); jitter4testing(true);
} }
MDBX_MAYBE_UNUSED static __always_inline bool MDBX_MAYBE_UNUSED static __always_inline bool safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
/* LY: This function is used to reset `txnid` from hsr-handler in case /* LY: This function is used to reset `txnid` from hsr-handler in case
* the asynchronously cancellation of read transaction. Therefore, * the asynchronously cancellation of read transaction. Therefore,
* there may be a collision between the cleanup performed here and * there may be a collision between the cleanup performed here and
@ -307,8 +285,7 @@ safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
bool rc = false; bool rc = false;
if (likely(atomic_load32(&p->low, mo_AcquireRelease) == (uint32_t)compare && if (likely(atomic_load32(&p->low, mo_AcquireRelease) == (uint32_t)compare &&
atomic_cas32(&p->high, (uint32_t)(compare >> 32), UINT32_MAX))) { atomic_cas32(&p->high, (uint32_t)(compare >> 32), UINT32_MAX))) {
if (unlikely(atomic_load32(&p->low, mo_AcquireRelease) != if (unlikely(atomic_load32(&p->low, mo_AcquireRelease) != (uint32_t)compare))
(uint32_t)compare))
atomic_cas32(&p->high, UINT32_MAX, (uint32_t)(compare >> 32)); atomic_cas32(&p->high, UINT32_MAX, (uint32_t)(compare >> 32));
else else
rc = true; rc = true;
@ -318,8 +295,7 @@ safe64_reset_compare(mdbx_atomic_uint64_t *p, uint64_t compare) {
return rc; return rc;
} }
MDBX_MAYBE_UNUSED static __always_inline void MDBX_MAYBE_UNUSED static __always_inline void safe64_write(mdbx_atomic_uint64_t *p, const uint64_t v) {
safe64_write(mdbx_atomic_uint64_t *p, const uint64_t v) {
assert(p->weak >= SAFE64_INVALID_THRESHOLD); assert(p->weak >= SAFE64_INVALID_THRESHOLD);
#if MDBX_64BIT_ATOMIC && MDBX_64BIT_CAS #if MDBX_64BIT_ATOMIC && MDBX_64BIT_CAS
atomic_store64(p, v, mo_AcquireRelease); atomic_store64(p, v, mo_AcquireRelease);
@ -336,8 +312,7 @@ safe64_write(mdbx_atomic_uint64_t *p, const uint64_t v) {
jitter4testing(true); jitter4testing(true);
} }
MDBX_MAYBE_UNUSED static __always_inline uint64_t MDBX_MAYBE_UNUSED static __always_inline uint64_t safe64_read(const mdbx_atomic_uint64_t *p) {
safe64_read(const mdbx_atomic_uint64_t *p) {
jitter4testing(true); jitter4testing(true);
uint64_t v; uint64_t v;
do do
@ -366,8 +341,7 @@ MDBX_MAYBE_UNUSED static __always_inline bool
#endif /* unused for now */ #endif /* unused for now */
/* non-atomic write with safety for reading a half-updated value */ /* non-atomic write with safety for reading a half-updated value */
MDBX_MAYBE_UNUSED static __always_inline void MDBX_MAYBE_UNUSED static __always_inline void safe64_update(mdbx_atomic_uint64_t *p, const uint64_t v) {
safe64_update(mdbx_atomic_uint64_t *p, const uint64_t v) {
#if MDBX_64BIT_ATOMIC #if MDBX_64BIT_ATOMIC
atomic_store64(p, v, mo_Relaxed); atomic_store64(p, v, mo_Relaxed);
#else #else

View File

@ -16,21 +16,19 @@
#if defined(__cplusplus) && !defined(__STDC_NO_ATOMICS__) && __has_include(<cstdatomic>) #if defined(__cplusplus) && !defined(__STDC_NO_ATOMICS__) && __has_include(<cstdatomic>)
#include <cstdatomic> #include <cstdatomic>
#define MDBX_HAVE_C11ATOMICS #define MDBX_HAVE_C11ATOMICS
#elif !defined(__cplusplus) && \ #elif !defined(__cplusplus) && (__STDC_VERSION__ >= 201112L || __has_extension(c_atomic)) && \
(__STDC_VERSION__ >= 201112L || __has_extension(c_atomic)) && \ !defined(__STDC_NO_ATOMICS__) && \
!defined(__STDC_NO_ATOMICS__) && \ (__GNUC_PREREQ(4, 9) || __CLANG_PREREQ(3, 8) || !(defined(__GNUC__) || defined(__clang__)))
(__GNUC_PREREQ(4, 9) || __CLANG_PREREQ(3, 8) || \
!(defined(__GNUC__) || defined(__clang__)))
#include <stdatomic.h> #include <stdatomic.h>
#define MDBX_HAVE_C11ATOMICS #define MDBX_HAVE_C11ATOMICS
#elif defined(__GNUC__) || defined(__clang__) #elif defined(__GNUC__) || defined(__clang__)
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#pragma warning(disable : 4163) /* 'xyz': not available as an intrinsic */ #pragma warning(disable : 4163) /* 'xyz': not available as an intrinsic */
#pragma warning(disable : 4133) /* 'function': incompatible types - from \ #pragma warning(disable : 4133) /* 'function': incompatible types - from \
'size_t' to 'LONGLONG' */ 'size_t' to 'LONGLONG' */
#pragma warning(disable : 4244) /* 'return': conversion from 'LONGLONG' to \ #pragma warning(disable : 4244) /* 'return': conversion from 'LONGLONG' to \
'std::size_t', possible loss of data */ 'std::size_t', possible loss of data */
#pragma warning(disable : 4267) /* 'function': conversion from 'size_t' to \ #pragma warning(disable : 4267) /* 'function': conversion from 'size_t' to \
'long', possible loss of data */ 'long', possible loss of data */
#pragma intrinsic(_InterlockedExchangeAdd, _InterlockedCompareExchange) #pragma intrinsic(_InterlockedExchangeAdd, _InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedCompareExchange64) #pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedCompareExchange64)
@ -85,13 +83,13 @@ typedef union {
#define MDBX_c11a_rw(type, ptr) (&(ptr)->c11a) #define MDBX_c11a_rw(type, ptr) (&(ptr)->c11a)
#endif /* Crutches for C11 atomic compiler's bugs */ #endif /* Crutches for C11 atomic compiler's bugs */
#define mo_c11_store(fence) \ #define mo_c11_store(fence) \
(((fence) == mo_Relaxed) ? memory_order_relaxed \ (((fence) == mo_Relaxed) ? memory_order_relaxed \
: ((fence) == mo_AcquireRelease) ? memory_order_release \ : ((fence) == mo_AcquireRelease) ? memory_order_release \
: memory_order_seq_cst) : memory_order_seq_cst)
#define mo_c11_load(fence) \ #define mo_c11_load(fence) \
(((fence) == mo_Relaxed) ? memory_order_relaxed \ (((fence) == mo_Relaxed) ? memory_order_relaxed \
: ((fence) == mo_AcquireRelease) ? memory_order_acquire \ : ((fence) == mo_AcquireRelease) ? memory_order_acquire \
: memory_order_seq_cst) : memory_order_seq_cst)
#endif /* MDBX_HAVE_C11ATOMICS */ #endif /* MDBX_HAVE_C11ATOMICS */

View File

@ -8,29 +8,23 @@ struct audit_ctx {
uint8_t *const done_bitmap; uint8_t *const done_bitmap;
}; };
static int audit_dbi(void *ctx, const MDBX_txn *txn, const MDBX_val *name, static int audit_dbi(void *ctx, const MDBX_txn *txn, const MDBX_val *name, MDBX_db_flags_t flags,
MDBX_db_flags_t flags, const struct MDBX_stat *stat, const struct MDBX_stat *stat, MDBX_dbi dbi) {
MDBX_dbi dbi) {
struct audit_ctx *audit_ctx = ctx; struct audit_ctx *audit_ctx = ctx;
(void)name; (void)name;
(void)txn; (void)txn;
(void)flags; (void)flags;
audit_ctx->used += (size_t)stat->ms_branch_pages + audit_ctx->used += (size_t)stat->ms_branch_pages + (size_t)stat->ms_leaf_pages + (size_t)stat->ms_overflow_pages;
(size_t)stat->ms_leaf_pages +
(size_t)stat->ms_overflow_pages;
if (dbi) if (dbi)
audit_ctx->done_bitmap[dbi / CHAR_BIT] |= 1 << dbi % CHAR_BIT; audit_ctx->done_bitmap[dbi / CHAR_BIT] |= 1 << dbi % CHAR_BIT;
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
static size_t audit_db_used(const tree_t *db) { static size_t audit_db_used(const tree_t *db) {
return db ? (size_t)db->branch_pages + (size_t)db->leaf_pages + return db ? (size_t)db->branch_pages + (size_t)db->leaf_pages + (size_t)db->large_pages : 0;
(size_t)db->large_pages
: 0;
} }
__cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored, __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored, bool dont_filter_gc) {
bool dont_filter_gc) {
const MDBX_env *const env = txn->env; const MDBX_env *const env = txn->env;
size_t pending = 0; size_t pending = 0;
if ((txn->flags & MDBX_TXN_RDONLY) == 0) if ((txn->flags & MDBX_TXN_RDONLY) == 0)
@ -48,8 +42,7 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored,
while (rc == MDBX_SUCCESS) { while (rc == MDBX_SUCCESS) {
if (!dont_filter_gc) { if (!dont_filter_gc) {
if (unlikely(key.iov_len != sizeof(txnid_t))) { if (unlikely(key.iov_len != sizeof(txnid_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-key size", (unsigned)key.iov_len);
"invalid GC-key size", (unsigned)key.iov_len);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
txnid_t id = unaligned_peek_u64(4, key.iov_base); txnid_t id = unaligned_peek_u64(4, key.iov_base);
@ -68,18 +61,16 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored,
const size_t done_bitmap_size = (txn->n_dbi + CHAR_BIT - 1) / CHAR_BIT; const size_t done_bitmap_size = (txn->n_dbi + CHAR_BIT - 1) / CHAR_BIT;
if (txn->parent) { if (txn->parent) {
tASSERT(txn, txn->n_dbi == txn->parent->n_dbi && tASSERT(txn, txn->n_dbi == txn->parent->n_dbi && txn->n_dbi == txn->env->txn->n_dbi);
txn->n_dbi == txn->env->txn->n_dbi);
#if MDBX_ENABLE_DBI_SPARSE #if MDBX_ENABLE_DBI_SPARSE
tASSERT(txn, txn->dbi_sparse == txn->parent->dbi_sparse && tASSERT(txn, txn->dbi_sparse == txn->parent->dbi_sparse && txn->dbi_sparse == txn->env->txn->dbi_sparse);
txn->dbi_sparse == txn->env->txn->dbi_sparse);
#endif /* MDBX_ENABLE_DBI_SPARSE */ #endif /* MDBX_ENABLE_DBI_SPARSE */
} }
struct audit_ctx ctx = {0, alloca(done_bitmap_size)}; struct audit_ctx ctx = {0, alloca(done_bitmap_size)};
memset(ctx.done_bitmap, 0, done_bitmap_size); memset(ctx.done_bitmap, 0, done_bitmap_size);
ctx.used = NUM_METAS + audit_db_used(dbi_dig(txn, FREE_DBI, nullptr)) + ctx.used =
audit_db_used(dbi_dig(txn, MAIN_DBI, nullptr)); NUM_METAS + audit_db_used(dbi_dig(txn, FREE_DBI, nullptr)) + audit_db_used(dbi_dig(txn, MAIN_DBI, nullptr));
rc = mdbx_enumerate_tables(txn, audit_dbi, &ctx); rc = mdbx_enumerate_tables(txn, audit_dbi, &ctx);
tASSERT(txn, rc == MDBX_SUCCESS); tASSERT(txn, rc == MDBX_SUCCESS);
@ -91,11 +82,9 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored,
if (db) if (db)
ctx.used += audit_db_used(db); ctx.used += audit_db_used(db);
else if (dbi_state(txn, dbi)) else if (dbi_state(txn, dbi))
WARNING("audit %s@%" PRIaTXN WARNING("audit %s@%" PRIaTXN ": unable account dbi %zd / \"%*s\", state 0x%02x", txn->parent ? "nested-" : "",
": unable account dbi %zd / \"%*s\", state 0x%02x", txn->txnid, dbi, (int)env->kvs[dbi].name.iov_len, (const char *)env->kvs[dbi].name.iov_base,
txn->parent ? "nested-" : "", txn->txnid, dbi, dbi_state(txn, dbi));
(int)env->kvs[dbi].name.iov_len,
(const char *)env->kvs[dbi].name.iov_base, dbi_state(txn, dbi));
} }
if (pending + gc + ctx.used == txn->geo.first_unallocated) if (pending + gc + ctx.used == txn->geo.first_unallocated)
@ -104,15 +93,12 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored,
if ((txn->flags & MDBX_TXN_RDONLY) == 0) if ((txn->flags & MDBX_TXN_RDONLY) == 0)
ERROR("audit @%" PRIaTXN ": %zu(pending) = %zu(loose) + " ERROR("audit @%" PRIaTXN ": %zu(pending) = %zu(loose) + "
"%zu(reclaimed) + %zu(retired-pending) - %zu(retired-stored)", "%zu(reclaimed) + %zu(retired-pending) - %zu(retired-stored)",
txn->txnid, pending, txn->tw.loose_count, txn->txnid, pending, txn->tw.loose_count, MDBX_PNL_GETSIZE(txn->tw.relist),
MDBX_PNL_GETSIZE(txn->tw.relist), txn->tw.retired_pages ? MDBX_PNL_GETSIZE(txn->tw.retired_pages) : 0, retired_stored);
txn->tw.retired_pages ? MDBX_PNL_GETSIZE(txn->tw.retired_pages) : 0,
retired_stored);
ERROR("audit @%" PRIaTXN ": %zu(pending) + %zu" ERROR("audit @%" PRIaTXN ": %zu(pending) + %zu"
"(gc) + %zu(count) = %zu(total) <> %zu" "(gc) + %zu(count) = %zu(total) <> %zu"
"(allocated)", "(allocated)",
txn->txnid, pending, gc, ctx.used, pending + gc + ctx.used, txn->txnid, pending, gc, ctx.used, pending + gc + ctx.used, (size_t)txn->geo.first_unallocated);
(size_t)txn->geo.first_unallocated);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }

1007
src/chk.c

File diff suppressed because it is too large Load Diff

View File

@ -74,27 +74,22 @@ __cold bool pv2pages_verify(void) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_NOTHROW_PURE_FUNCTION size_t bytes_align2os_bytes(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION size_t bytes_align2os_bytes(const MDBX_env *env, size_t bytes) {
size_t bytes) { return ceil_powerof2(bytes, (env->ps > globals.sys_pagesize) ? env->ps : globals.sys_pagesize);
return ceil_powerof2(
bytes, (env->ps > globals.sys_pagesize) ? env->ps : globals.sys_pagesize);
} }
MDBX_NOTHROW_PURE_FUNCTION size_t pgno_align2os_bytes(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION size_t pgno_align2os_bytes(const MDBX_env *env, size_t pgno) {
size_t pgno) {
return ceil_powerof2(pgno2bytes(env, pgno), globals.sys_pagesize); return ceil_powerof2(pgno2bytes(env, pgno), globals.sys_pagesize);
} }
MDBX_NOTHROW_PURE_FUNCTION pgno_t pgno_align2os_pgno(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION pgno_t pgno_align2os_pgno(const MDBX_env *env, size_t pgno) {
size_t pgno) {
return bytes2pgno(env, pgno_align2os_bytes(env, pgno)); return bytes2pgno(env, pgno_align2os_bytes(env, pgno));
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_NOTHROW_PURE_FUNCTION static __always_inline int MDBX_NOTHROW_PURE_FUNCTION static __always_inline int cmp_int_inline(const size_t expected_alignment, const MDBX_val *a,
cmp_int_inline(const size_t expected_alignment, const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
if (likely(a->iov_len == b->iov_len)) { if (likely(a->iov_len == b->iov_len)) {
if (sizeof(size_t) > 7 && likely(a->iov_len == 8)) if (sizeof(size_t) > 7 && likely(a->iov_len == 8))
return CMP2INT(unaligned_peek_u64(expected_alignment, a->iov_base), return CMP2INT(unaligned_peek_u64(expected_alignment, a->iov_base),
@ -106,35 +101,31 @@ cmp_int_inline(const size_t expected_alignment, const MDBX_val *a,
return CMP2INT(unaligned_peek_u64(expected_alignment, a->iov_base), return CMP2INT(unaligned_peek_u64(expected_alignment, a->iov_base),
unaligned_peek_u64(expected_alignment, b->iov_base)); unaligned_peek_u64(expected_alignment, b->iov_base));
} }
ERROR("mismatch and/or invalid size %p.%zu/%p.%zu for INTEGERKEY/INTEGERDUP", ERROR("mismatch and/or invalid size %p.%zu/%p.%zu for INTEGERKEY/INTEGERDUP", a->iov_base, a->iov_len, b->iov_base,
a->iov_base, a->iov_len, b->iov_base, b->iov_len); b->iov_len);
return 0; return 0;
} }
MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_int_unaligned(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_int_unaligned(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
return cmp_int_inline(1, a, b); return cmp_int_inline(1, a, b);
} }
#ifndef cmp_int_align2 #ifndef cmp_int_align2
/* Compare two items pointing at 2-byte aligned unsigned int's. */ /* Compare two items pointing at 2-byte aligned unsigned int's. */
MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_int_align2(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_int_align2(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
return cmp_int_inline(2, a, b); return cmp_int_inline(2, a, b);
} }
#endif /* cmp_int_align2 */ #endif /* cmp_int_align2 */
#ifndef cmp_int_align4 #ifndef cmp_int_align4
/* Compare two items pointing at 4-byte aligned unsigned int's. */ /* Compare two items pointing at 4-byte aligned unsigned int's. */
MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_int_align4(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_int_align4(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
return cmp_int_inline(4, a, b); return cmp_int_inline(4, a, b);
} }
#endif /* cmp_int_align4 */ #endif /* cmp_int_align4 */
/* Compare two items lexically */ /* Compare two items lexically */
MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_lexical(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_lexical(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
if (a->iov_len == b->iov_len) if (a->iov_len == b->iov_len)
return a->iov_len ? memcmp(a->iov_base, b->iov_base, a->iov_len) : 0; return a->iov_len ? memcmp(a->iov_base, b->iov_base, a->iov_len) : 0;
@ -144,8 +135,7 @@ MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_lexical(const MDBX_val *a,
return likely(diff_data) ? diff_data : diff_len; return likely(diff_data) ? diff_data : diff_len;
} }
MDBX_NOTHROW_PURE_FUNCTION static __always_inline unsigned MDBX_NOTHROW_PURE_FUNCTION static __always_inline unsigned tail3le(const uint8_t *p, size_t l) {
tail3le(const uint8_t *p, size_t l) {
STATIC_ASSERT(sizeof(unsigned) > 2); STATIC_ASSERT(sizeof(unsigned) > 2);
// 1: 0 0 0 // 1: 0 0 0
// 2: 0 1 1 // 2: 0 1 1
@ -154,8 +144,7 @@ tail3le(const uint8_t *p, size_t l) {
} }
/* Compare two items in reverse byte order */ /* Compare two items in reverse byte order */
MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_reverse(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_reverse(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
size_t left = (a->iov_len < b->iov_len) ? a->iov_len : b->iov_len; size_t left = (a->iov_len < b->iov_len) ? a->iov_len : b->iov_len;
if (likely(left)) { if (likely(left)) {
const uint8_t *pa = ptr_disp(a->iov_base, a->iov_len); const uint8_t *pa = ptr_disp(a->iov_base, a->iov_len);
@ -209,25 +198,19 @@ MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_reverse(const MDBX_val *a,
} }
/* Fast non-lexically comparator */ /* Fast non-lexically comparator */
MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_lenfast(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION __hot int cmp_lenfast(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
int diff = CMP2INT(a->iov_len, b->iov_len); int diff = CMP2INT(a->iov_len, b->iov_len);
return (likely(diff) || a->iov_len == 0) return (likely(diff) || a->iov_len == 0) ? diff : memcmp(a->iov_base, b->iov_base, a->iov_len);
? diff
: memcmp(a->iov_base, b->iov_base, a->iov_len);
} }
MDBX_NOTHROW_PURE_FUNCTION __hot bool MDBX_NOTHROW_PURE_FUNCTION __hot bool eq_fast_slowpath(const uint8_t *a, const uint8_t *b, size_t l) {
eq_fast_slowpath(const uint8_t *a, const uint8_t *b, size_t l) {
if (likely(l > 3)) { if (likely(l > 3)) {
if (MDBX_UNALIGNED_OK >= 4 && likely(l < 9)) if (MDBX_UNALIGNED_OK >= 4 && likely(l < 9))
return ((unaligned_peek_u32(1, a) - unaligned_peek_u32(1, b)) | return ((unaligned_peek_u32(1, a) - unaligned_peek_u32(1, b)) |
(unaligned_peek_u32(1, a + l - 4) - (unaligned_peek_u32(1, a + l - 4) - unaligned_peek_u32(1, b + l - 4))) == 0;
unaligned_peek_u32(1, b + l - 4))) == 0;
if (MDBX_UNALIGNED_OK >= 8 && sizeof(size_t) > 7 && likely(l < 17)) if (MDBX_UNALIGNED_OK >= 8 && sizeof(size_t) > 7 && likely(l < 17))
return ((unaligned_peek_u64(1, a) - unaligned_peek_u64(1, b)) | return ((unaligned_peek_u64(1, a) - unaligned_peek_u64(1, b)) |
(unaligned_peek_u64(1, a + l - 8) - (unaligned_peek_u64(1, a + l - 8) - unaligned_peek_u64(1, b + l - 8))) == 0;
unaligned_peek_u64(1, b + l - 8))) == 0;
return memcmp(a, b, l) == 0; return memcmp(a, b, l) == 0;
} }
if (likely(l)) if (likely(l))
@ -235,31 +218,21 @@ eq_fast_slowpath(const uint8_t *a, const uint8_t *b, size_t l) {
return true; return true;
} }
int cmp_equal_or_greater(const MDBX_val *a, const MDBX_val *b) { int cmp_equal_or_greater(const MDBX_val *a, const MDBX_val *b) { return eq_fast(a, b) ? 0 : 1; }
return eq_fast(a, b) ? 0 : 1;
}
int cmp_equal_or_wrong(const MDBX_val *a, const MDBX_val *b) { int cmp_equal_or_wrong(const MDBX_val *a, const MDBX_val *b) { return eq_fast(a, b) ? 0 : -1; }
return eq_fast(a, b) ? 0 : -1;
}
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
__cold void update_mlcnt(const MDBX_env *env, __cold void update_mlcnt(const MDBX_env *env, const pgno_t new_aligned_mlocked_pgno, const bool lock_not_release) {
const pgno_t new_aligned_mlocked_pgno,
const bool lock_not_release) {
for (;;) { for (;;) {
const pgno_t mlock_pgno_before = const pgno_t mlock_pgno_before = atomic_load32(&env->mlocked_pgno, mo_AcquireRelease);
atomic_load32(&env->mlocked_pgno, mo_AcquireRelease); eASSERT(env, pgno_align2os_pgno(env, mlock_pgno_before) == mlock_pgno_before);
eASSERT(env, eASSERT(env, pgno_align2os_pgno(env, new_aligned_mlocked_pgno) == new_aligned_mlocked_pgno);
pgno_align2os_pgno(env, mlock_pgno_before) == mlock_pgno_before);
eASSERT(env, pgno_align2os_pgno(env, new_aligned_mlocked_pgno) ==
new_aligned_mlocked_pgno);
if (lock_not_release ? (mlock_pgno_before >= new_aligned_mlocked_pgno) if (lock_not_release ? (mlock_pgno_before >= new_aligned_mlocked_pgno)
: (mlock_pgno_before <= new_aligned_mlocked_pgno)) : (mlock_pgno_before <= new_aligned_mlocked_pgno))
break; break;
if (likely(atomic_cas32(&((MDBX_env *)env)->mlocked_pgno, mlock_pgno_before, if (likely(atomic_cas32(&((MDBX_env *)env)->mlocked_pgno, mlock_pgno_before, new_aligned_mlocked_pgno)))
new_aligned_mlocked_pgno)))
for (;;) { for (;;) {
mdbx_atomic_uint32_t *const mlcnt = env->lck->mlcnt; mdbx_atomic_uint32_t *const mlcnt = env->lck->mlcnt;
const int32_t snap_locked = atomic_load32(mlcnt + 0, mo_Relaxed); const int32_t snap_locked = atomic_load32(mlcnt + 0, mo_Relaxed);
@ -269,52 +242,39 @@ __cold void update_mlcnt(const MDBX_env *env,
if (unlikely(!atomic_cas32(mlcnt + 0, snap_locked, snap_locked + 1))) if (unlikely(!atomic_cas32(mlcnt + 0, snap_locked, snap_locked + 1)))
continue; continue;
} }
if (new_aligned_mlocked_pgno == 0 && if (new_aligned_mlocked_pgno == 0 && (snap_locked - snap_unlocked) > 0) {
(snap_locked - snap_unlocked) > 0) {
eASSERT(env, !lock_not_release); eASSERT(env, !lock_not_release);
if (unlikely( if (unlikely(!atomic_cas32(mlcnt + 1, snap_unlocked, snap_unlocked + 1)))
!atomic_cas32(mlcnt + 1, snap_unlocked, snap_unlocked + 1)))
continue; continue;
} }
NOTICE("%s-pages %u..%u, mlocked-process(es) %u -> %u", NOTICE("%s-pages %u..%u, mlocked-process(es) %u -> %u", lock_not_release ? "lock" : "unlock",
lock_not_release ? "lock" : "unlock",
lock_not_release ? mlock_pgno_before : new_aligned_mlocked_pgno, lock_not_release ? mlock_pgno_before : new_aligned_mlocked_pgno,
lock_not_release ? new_aligned_mlocked_pgno : mlock_pgno_before, lock_not_release ? new_aligned_mlocked_pgno : mlock_pgno_before, snap_locked - snap_unlocked,
snap_locked - snap_unlocked, atomic_load32(mlcnt + 0, mo_Relaxed) - atomic_load32(mlcnt + 1, mo_Relaxed));
atomic_load32(mlcnt + 0, mo_Relaxed) -
atomic_load32(mlcnt + 1, mo_Relaxed));
return; return;
} }
} }
} }
__cold void munlock_after(const MDBX_env *env, const pgno_t aligned_pgno, __cold void munlock_after(const MDBX_env *env, const pgno_t aligned_pgno, const size_t end_bytes) {
const size_t end_bytes) {
if (atomic_load32(&env->mlocked_pgno, mo_AcquireRelease) > aligned_pgno) { if (atomic_load32(&env->mlocked_pgno, mo_AcquireRelease) > aligned_pgno) {
int err = MDBX_ENOSYS; int err = MDBX_ENOSYS;
const size_t munlock_begin = pgno2bytes(env, aligned_pgno); const size_t munlock_begin = pgno2bytes(env, aligned_pgno);
const size_t munlock_size = end_bytes - munlock_begin; const size_t munlock_size = end_bytes - munlock_begin;
eASSERT(env, end_bytes % globals.sys_pagesize == 0 && eASSERT(env, end_bytes % globals.sys_pagesize == 0 && munlock_begin % globals.sys_pagesize == 0 &&
munlock_begin % globals.sys_pagesize == 0 &&
munlock_size % globals.sys_pagesize == 0); munlock_size % globals.sys_pagesize == 0);
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
err = err = VirtualUnlock(ptr_disp(env->dxb_mmap.base, munlock_begin), munlock_size) ? MDBX_SUCCESS : (int)GetLastError();
VirtualUnlock(ptr_disp(env->dxb_mmap.base, munlock_begin), munlock_size)
? MDBX_SUCCESS
: (int)GetLastError();
if (err == ERROR_NOT_LOCKED) if (err == ERROR_NOT_LOCKED)
err = MDBX_SUCCESS; err = MDBX_SUCCESS;
#elif defined(_POSIX_MEMLOCK_RANGE) #elif defined(_POSIX_MEMLOCK_RANGE)
err = munlock(ptr_disp(env->dxb_mmap.base, munlock_begin), munlock_size) err = munlock(ptr_disp(env->dxb_mmap.base, munlock_begin), munlock_size) ? errno : MDBX_SUCCESS;
? errno
: MDBX_SUCCESS;
#endif #endif
if (likely(err == MDBX_SUCCESS)) if (likely(err == MDBX_SUCCESS))
update_mlcnt(env, aligned_pgno, false); update_mlcnt(env, aligned_pgno, false);
else { else {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
WARNING("VirtualUnlock(%zu, %zu) error %d", munlock_begin, munlock_size, WARNING("VirtualUnlock(%zu, %zu) error %d", munlock_begin, munlock_size, err);
err);
#else #else
WARNING("munlock(%zu, %zu) error %d", munlock_begin, munlock_size, err); WARNING("munlock(%zu, %zu) error %d", munlock_begin, munlock_size, err);
#endif #endif
@ -332,13 +292,11 @@ uint32_t combine_durability_flags(const uint32_t a, const uint32_t b) {
uint32_t r = a | b; uint32_t r = a | b;
/* avoid false MDBX_UTTERLY_NOSYNC */ /* avoid false MDBX_UTTERLY_NOSYNC */
if (F_ISSET(r, MDBX_UTTERLY_NOSYNC) && !F_ISSET(a, MDBX_UTTERLY_NOSYNC) && if (F_ISSET(r, MDBX_UTTERLY_NOSYNC) && !F_ISSET(a, MDBX_UTTERLY_NOSYNC) && !F_ISSET(b, MDBX_UTTERLY_NOSYNC))
!F_ISSET(b, MDBX_UTTERLY_NOSYNC))
r = (r - MDBX_UTTERLY_NOSYNC) | MDBX_SAFE_NOSYNC; r = (r - MDBX_UTTERLY_NOSYNC) | MDBX_SAFE_NOSYNC;
/* convert DEPRECATED_MAPASYNC to MDBX_SAFE_NOSYNC */ /* convert DEPRECATED_MAPASYNC to MDBX_SAFE_NOSYNC */
if ((r & (MDBX_WRITEMAP | DEPRECATED_MAPASYNC)) == if ((r & (MDBX_WRITEMAP | DEPRECATED_MAPASYNC)) == (MDBX_WRITEMAP | DEPRECATED_MAPASYNC) &&
(MDBX_WRITEMAP | DEPRECATED_MAPASYNC) &&
!F_ISSET(r, MDBX_UTTERLY_NOSYNC)) !F_ISSET(r, MDBX_UTTERLY_NOSYNC))
r = (r - DEPRECATED_MAPASYNC) | MDBX_SAFE_NOSYNC; r = (r - DEPRECATED_MAPASYNC) | MDBX_SAFE_NOSYNC;
@ -346,8 +304,6 @@ uint32_t combine_durability_flags(const uint32_t a, const uint32_t b) {
if (r & (MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC)) if (r & (MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC))
r |= MDBX_NOMETASYNC; r |= MDBX_NOMETASYNC;
assert(!(F_ISSET(r, MDBX_UTTERLY_NOSYNC) && assert(!(F_ISSET(r, MDBX_UTTERLY_NOSYNC) && !F_ISSET(a, MDBX_UTTERLY_NOSYNC) && !F_ISSET(b, MDBX_UTTERLY_NOSYNC)));
!F_ISSET(a, MDBX_UTTERLY_NOSYNC) &&
!F_ISSET(b, MDBX_UTTERLY_NOSYNC)));
return r; return r;
} }

View File

@ -51,19 +51,15 @@ MDBX_MAYBE_UNUSED MDBX_INTERNAL bool pv2pages_verify(void);
#define PAGESPACE(pagesize) ((pagesize) - PAGEHDRSZ) #define PAGESPACE(pagesize) ((pagesize) - PAGEHDRSZ)
#define BRANCH_NODE_MAX(pagesize) \ #define BRANCH_NODE_MAX(pagesize) \
(EVEN_FLOOR((PAGESPACE(pagesize) - sizeof(indx_t) - NODESIZE) / (3 - 1) - \ (EVEN_FLOOR((PAGESPACE(pagesize) - sizeof(indx_t) - NODESIZE) / (3 - 1) - sizeof(indx_t)))
sizeof(indx_t)))
#define LEAF_NODE_MAX(pagesize) \ #define LEAF_NODE_MAX(pagesize) (EVEN_FLOOR(PAGESPACE(pagesize) / 2) - sizeof(indx_t))
(EVEN_FLOOR(PAGESPACE(pagesize) / 2) - sizeof(indx_t))
#define MAX_GC1OVPAGE(pagesize) (PAGESPACE(pagesize) / sizeof(pgno_t) - 1) #define MAX_GC1OVPAGE(pagesize) (PAGESPACE(pagesize) / sizeof(pgno_t) - 1)
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t keysize_max(size_t pagesize, MDBX_db_flags_t flags) {
keysize_max(size_t pagesize, MDBX_db_flags_t flags) { assert(pagesize >= MDBX_MIN_PAGESIZE && pagesize <= MDBX_MAX_PAGESIZE && is_powerof2(pagesize));
assert(pagesize >= MDBX_MIN_PAGESIZE && pagesize <= MDBX_MAX_PAGESIZE &&
is_powerof2(pagesize));
STATIC_ASSERT(BRANCH_NODE_MAX(MDBX_MIN_PAGESIZE) - NODESIZE >= 8); STATIC_ASSERT(BRANCH_NODE_MAX(MDBX_MIN_PAGESIZE) - NODESIZE >= 8);
if (flags & MDBX_INTEGERKEY) if (flags & MDBX_INTEGERKEY)
return 8 /* sizeof(uint64_t) */; return 8 /* sizeof(uint64_t) */;
@ -72,18 +68,14 @@ keysize_max(size_t pagesize, MDBX_db_flags_t flags) {
STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) - NODESIZE - STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) - NODESIZE -
/* sizeof(uint64) as a key */ 8 > /* sizeof(uint64) as a key */ 8 >
sizeof(tree_t)); sizeof(tree_t));
if (flags & if (flags & (MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_REVERSEDUP | MDBX_INTEGERDUP)) {
(MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_REVERSEDUP | MDBX_INTEGERDUP)) { const intptr_t max_dupsort_leaf_key = LEAF_NODE_MAX(pagesize) - NODESIZE - sizeof(tree_t);
const intptr_t max_dupsort_leaf_key = return (max_branch_key < max_dupsort_leaf_key) ? max_branch_key : max_dupsort_leaf_key;
LEAF_NODE_MAX(pagesize) - NODESIZE - sizeof(tree_t);
return (max_branch_key < max_dupsort_leaf_key) ? max_branch_key
: max_dupsort_leaf_key;
} }
return max_branch_key; return max_branch_key;
} }
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t env_keysize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
env_keysize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
size_t size_max; size_t size_max;
if (flags & MDBX_INTEGERKEY) if (flags & MDBX_INTEGERKEY)
size_max = 8 /* sizeof(uint64_t) */; size_max = 8 /* sizeof(uint64_t) */;
@ -92,12 +84,9 @@ env_keysize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) - NODESIZE - STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) - NODESIZE -
/* sizeof(uint64) as a key */ 8 > /* sizeof(uint64) as a key */ 8 >
sizeof(tree_t)); sizeof(tree_t));
if (flags & if (flags & (MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_REVERSEDUP | MDBX_INTEGERDUP)) {
(MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_REVERSEDUP | MDBX_INTEGERDUP)) { const intptr_t max_dupsort_leaf_key = env->leaf_nodemax - NODESIZE - sizeof(tree_t);
const intptr_t max_dupsort_leaf_key = size_max = (max_branch_key < max_dupsort_leaf_key) ? max_branch_key : max_dupsort_leaf_key;
env->leaf_nodemax - NODESIZE - sizeof(tree_t);
size_max = (max_branch_key < max_dupsort_leaf_key) ? max_branch_key
: max_dupsort_leaf_key;
} else } else
size_max = max_branch_key; size_max = max_branch_key;
} }
@ -105,13 +94,11 @@ env_keysize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
return size_max; return size_max;
} }
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t keysize_min(MDBX_db_flags_t flags) {
keysize_min(MDBX_db_flags_t flags) {
return (flags & MDBX_INTEGERKEY) ? 4 /* sizeof(uint32_t) */ : 0; return (flags & MDBX_INTEGERKEY) ? 4 /* sizeof(uint32_t) */ : 0;
} }
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t valsize_min(MDBX_db_flags_t flags) {
valsize_min(MDBX_db_flags_t flags) {
if (flags & MDBX_INTEGERDUP) if (flags & MDBX_INTEGERDUP)
return 4 /* sizeof(uint32_t) */; return 4 /* sizeof(uint32_t) */;
else if (flags & MDBX_DUPFIXED) else if (flags & MDBX_DUPFIXED)
@ -120,10 +107,8 @@ valsize_min(MDBX_db_flags_t flags) {
return 0; return 0;
} }
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t valsize_max(size_t pagesize, MDBX_db_flags_t flags) {
valsize_max(size_t pagesize, MDBX_db_flags_t flags) { assert(pagesize >= MDBX_MIN_PAGESIZE && pagesize <= MDBX_MAX_PAGESIZE && is_powerof2(pagesize));
assert(pagesize >= MDBX_MIN_PAGESIZE && pagesize <= MDBX_MAX_PAGESIZE &&
is_powerof2(pagesize));
if (flags & MDBX_INTEGERDUP) if (flags & MDBX_INTEGERDUP)
return 8 /* sizeof(uint64_t) */; return 8 /* sizeof(uint64_t) */;
@ -136,13 +121,11 @@ valsize_max(size_t pagesize, MDBX_db_flags_t flags) {
const size_t hard_pages = hard >> page_ln2; const size_t hard_pages = hard >> page_ln2;
STATIC_ASSERT(PAGELIST_LIMIT <= MAX_PAGENO); STATIC_ASSERT(PAGELIST_LIMIT <= MAX_PAGENO);
const size_t pages_limit = PAGELIST_LIMIT / 4; const size_t pages_limit = PAGELIST_LIMIT / 4;
const size_t limit = const size_t limit = (hard_pages < pages_limit) ? hard : (pages_limit << page_ln2);
(hard_pages < pages_limit) ? hard : (pages_limit << page_ln2);
return (limit < MAX_MAPSIZE / 2) ? limit : MAX_MAPSIZE / 2; return (limit < MAX_MAPSIZE / 2) ? limit : MAX_MAPSIZE / 2;
} }
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t env_valsize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
env_valsize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
size_t size_max; size_t size_max;
if (flags & MDBX_INTEGERDUP) if (flags & MDBX_INTEGERDUP)
size_max = 8 /* sizeof(uint64_t) */; size_max = 8 /* sizeof(uint64_t) */;
@ -153,8 +136,7 @@ env_valsize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
const size_t hard_pages = hard >> env->ps2ln; const size_t hard_pages = hard >> env->ps2ln;
STATIC_ASSERT(PAGELIST_LIMIT <= MAX_PAGENO); STATIC_ASSERT(PAGELIST_LIMIT <= MAX_PAGENO);
const size_t pages_limit = PAGELIST_LIMIT / 4; const size_t pages_limit = PAGELIST_LIMIT / 4;
const size_t limit = const size_t limit = (hard_pages < pages_limit) ? hard : (pages_limit << env->ps2ln);
(hard_pages < pages_limit) ? hard : (pages_limit << env->ps2ln);
size_max = (limit < MAX_MAPSIZE / 2) ? limit : MAX_MAPSIZE / 2; size_max = (limit < MAX_MAPSIZE / 2) ? limit : MAX_MAPSIZE / 2;
} }
eASSERT(env, size_max == valsize_max(env->ps, flags)); eASSERT(env, size_max == valsize_max(env->ps, flags));
@ -163,8 +145,8 @@ env_valsize_max(const MDBX_env *env, MDBX_db_flags_t flags) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_NOTHROW_PURE_FUNCTION static inline size_t leaf_size(const MDBX_env *env, const MDBX_val *key,
leaf_size(const MDBX_env *env, const MDBX_val *key, const MDBX_val *data) { const MDBX_val *data) {
size_t node_bytes = node_size(key, data); size_t node_bytes = node_size(key, data);
if (node_bytes > env->leaf_nodemax) if (node_bytes > env->leaf_nodemax)
/* put on large/overflow page */ /* put on large/overflow page */
@ -173,35 +155,30 @@ leaf_size(const MDBX_env *env, const MDBX_val *key, const MDBX_val *data) {
return node_bytes + sizeof(indx_t); return node_bytes + sizeof(indx_t);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_NOTHROW_PURE_FUNCTION static inline size_t branch_size(const MDBX_env *env, const MDBX_val *key) {
branch_size(const MDBX_env *env, const MDBX_val *key) {
/* Size of a node in a branch page with a given key. /* Size of a node in a branch page with a given key.
* This is just the node header plus the key, there is no data. */ * This is just the node header plus the key, there is no data. */
size_t node_bytes = node_size(key, nullptr); size_t node_bytes = node_size(key, nullptr);
if (unlikely(node_bytes > env->branch_nodemax)) { if (unlikely(node_bytes > env->branch_nodemax)) {
/* put on large/overflow page, not implemented */ /* put on large/overflow page, not implemented */
mdbx_panic("node_size(key) %zu > %u branch_nodemax", node_bytes, mdbx_panic("node_size(key) %zu > %u branch_nodemax", node_bytes, env->branch_nodemax);
env->branch_nodemax);
node_bytes = node_size(key, nullptr) + sizeof(pgno_t); node_bytes = node_size(key, nullptr) + sizeof(pgno_t);
} }
return node_bytes + sizeof(indx_t); return node_bytes + sizeof(indx_t);
} }
MDBX_NOTHROW_CONST_FUNCTION static inline uint16_t MDBX_NOTHROW_CONST_FUNCTION static inline uint16_t flags_db2sub(uint16_t db_flags) {
flags_db2sub(uint16_t db_flags) {
uint16_t sub_flags = db_flags & MDBX_DUPFIXED; uint16_t sub_flags = db_flags & MDBX_DUPFIXED;
/* MDBX_INTEGERDUP => MDBX_INTEGERKEY */ /* MDBX_INTEGERDUP => MDBX_INTEGERKEY */
#define SHIFT_INTEGERDUP_TO_INTEGERKEY 2 #define SHIFT_INTEGERDUP_TO_INTEGERKEY 2
STATIC_ASSERT((MDBX_INTEGERDUP >> SHIFT_INTEGERDUP_TO_INTEGERKEY) == STATIC_ASSERT((MDBX_INTEGERDUP >> SHIFT_INTEGERDUP_TO_INTEGERKEY) == MDBX_INTEGERKEY);
MDBX_INTEGERKEY);
sub_flags |= (db_flags & MDBX_INTEGERDUP) >> SHIFT_INTEGERDUP_TO_INTEGERKEY; sub_flags |= (db_flags & MDBX_INTEGERDUP) >> SHIFT_INTEGERDUP_TO_INTEGERKEY;
/* MDBX_REVERSEDUP => MDBX_REVERSEKEY */ /* MDBX_REVERSEDUP => MDBX_REVERSEKEY */
#define SHIFT_REVERSEDUP_TO_REVERSEKEY 5 #define SHIFT_REVERSEDUP_TO_REVERSEKEY 5
STATIC_ASSERT((MDBX_REVERSEDUP >> SHIFT_REVERSEDUP_TO_REVERSEKEY) == STATIC_ASSERT((MDBX_REVERSEDUP >> SHIFT_REVERSEDUP_TO_REVERSEKEY) == MDBX_REVERSEKEY);
MDBX_REVERSEKEY);
sub_flags |= (db_flags & MDBX_REVERSEDUP) >> SHIFT_REVERSEDUP_TO_REVERSEKEY; sub_flags |= (db_flags & MDBX_REVERSEDUP) >> SHIFT_REVERSEDUP_TO_REVERSEKEY;
return sub_flags; return sub_flags;
@ -219,41 +196,33 @@ static inline bool check_table_flags(unsigned flags) {
case MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP: case MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP:
case MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP: case MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP:
case MDBX_DB_DEFAULTS: case MDBX_DB_DEFAULTS:
return (flags & (MDBX_REVERSEKEY | MDBX_INTEGERKEY)) != return (flags & (MDBX_REVERSEKEY | MDBX_INTEGERKEY)) != (MDBX_REVERSEKEY | MDBX_INTEGERKEY);
(MDBX_REVERSEKEY | MDBX_INTEGERKEY);
} }
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_NOTHROW_PURE_FUNCTION static inline size_t pgno2bytes(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION static inline size_t pgno2bytes(const MDBX_env *env, size_t pgno) {
size_t pgno) {
eASSERT(env, (1u << env->ps2ln) == env->ps); eASSERT(env, (1u << env->ps2ln) == env->ps);
return ((size_t)pgno) << env->ps2ln; return ((size_t)pgno) << env->ps2ln;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline page_t *pgno2page(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION static inline page_t *pgno2page(const MDBX_env *env, size_t pgno) {
size_t pgno) {
return ptr_disp(env->dxb_mmap.base, pgno2bytes(env, pgno)); return ptr_disp(env->dxb_mmap.base, pgno2bytes(env, pgno));
} }
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t bytes2pgno(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t bytes2pgno(const MDBX_env *env, size_t bytes) {
size_t bytes) {
eASSERT(env, (env->ps >> env->ps2ln) == 1); eASSERT(env, (env->ps >> env->ps2ln) == 1);
return (pgno_t)(bytes >> env->ps2ln); return (pgno_t)(bytes >> env->ps2ln);
} }
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t bytes_align2os_bytes(const MDBX_env *env, size_t bytes);
bytes_align2os_bytes(const MDBX_env *env, size_t bytes);
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t pgno_align2os_bytes(const MDBX_env *env, size_t pgno);
pgno_align2os_bytes(const MDBX_env *env, size_t pgno);
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL pgno_t MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL pgno_t pgno_align2os_pgno(const MDBX_env *env, size_t pgno);
pgno_align2os_pgno(const MDBX_env *env, size_t pgno);
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t largechunk_npages(const MDBX_env *env, size_t bytes) {
largechunk_npages(const MDBX_env *env, size_t bytes) {
return bytes2pgno(env, PAGEHDRSZ - 1 + bytes) + 1; return bytes2pgno(env, PAGEHDRSZ - 1 + bytes) + 1;
} }
@ -264,69 +233,53 @@ MDBX_NOTHROW_PURE_FUNCTION static inline MDBX_val get_key(const node_t *node) {
return key; return key;
} }
static inline void get_key_optional(const node_t *node, static inline void get_key_optional(const node_t *node, MDBX_val *keyptr /* __may_null */) {
MDBX_val *keyptr /* __may_null */) {
if (keyptr) if (keyptr)
*keyptr = get_key(node); *keyptr = get_key(node);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline void *page_data(const page_t *mp) { MDBX_NOTHROW_PURE_FUNCTION static inline void *page_data(const page_t *mp) { return ptr_disp(mp, PAGEHDRSZ); }
return ptr_disp(mp, PAGEHDRSZ);
}
MDBX_NOTHROW_PURE_FUNCTION static inline const page_t * MDBX_NOTHROW_PURE_FUNCTION static inline const page_t *data_page(const void *data) {
data_page(const void *data) {
return container_of(data, page_t, entries); return container_of(data, page_t, entries);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline meta_t *page_meta(page_t *mp) { MDBX_NOTHROW_PURE_FUNCTION static inline meta_t *page_meta(page_t *mp) { return (meta_t *)page_data(mp); }
return (meta_t *)page_data(mp);
}
MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_numkeys(const page_t *mp) { MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_numkeys(const page_t *mp) { return mp->lower >> 1; }
return mp->lower >> 1;
}
MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_room(const page_t *mp) { MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_room(const page_t *mp) { return mp->upper - mp->lower; }
return mp->upper - mp->lower;
}
MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_space(const MDBX_env *env) {
page_space(const MDBX_env *env) {
STATIC_ASSERT(PAGEHDRSZ % 2 == 0); STATIC_ASSERT(PAGEHDRSZ % 2 == 0);
return env->ps - PAGEHDRSZ; return env->ps - PAGEHDRSZ;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_used(const MDBX_env *env, MDBX_NOTHROW_PURE_FUNCTION static inline size_t page_used(const MDBX_env *env, const page_t *mp) {
const page_t *mp) {
return page_space(env) - page_room(mp); return page_space(env) - page_room(mp);
} }
/* The percentage of space used in the page, in a percents. */ /* The percentage of space used in the page, in a percents. */
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline unsigned MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline unsigned page_fill_percentum_x10(const MDBX_env *env,
page_fill_percentum_x10(const MDBX_env *env, const page_t *mp) { const page_t *mp) {
const size_t space = page_space(env); const size_t space = page_space(env);
return (unsigned)((page_used(env, mp) * 1000 + space / 2) / space); return (unsigned)((page_used(env, mp) * 1000 + space / 2) / space);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline node_t *page_node(const page_t *mp, MDBX_NOTHROW_PURE_FUNCTION static inline node_t *page_node(const page_t *mp, size_t i) {
size_t i) {
assert(page_type_compat(mp) == P_LEAF || page_type(mp) == P_BRANCH); assert(page_type_compat(mp) == P_LEAF || page_type(mp) == P_BRANCH);
assert(page_numkeys(mp) > i); assert(page_numkeys(mp) > i);
assert(mp->entries[i] % 2 == 0); assert(mp->entries[i] % 2 == 0);
return ptr_disp(mp, mp->entries[i] + PAGEHDRSZ); return ptr_disp(mp, mp->entries[i] + PAGEHDRSZ);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline void * MDBX_NOTHROW_PURE_FUNCTION static inline void *page_dupfix_ptr(const page_t *mp, size_t i, size_t keysize) {
page_dupfix_ptr(const page_t *mp, size_t i, size_t keysize) { assert(page_type_compat(mp) == (P_LEAF | P_DUPFIX) && i == (indx_t)i && mp->dupfix_ksize == keysize);
assert(page_type_compat(mp) == (P_LEAF | P_DUPFIX) && i == (indx_t)i &&
mp->dupfix_ksize == keysize);
(void)keysize; (void)keysize;
return ptr_disp(mp, PAGEHDRSZ + mp->dupfix_ksize * (indx_t)i); return ptr_disp(mp, PAGEHDRSZ + mp->dupfix_ksize * (indx_t)i);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline MDBX_val MDBX_NOTHROW_PURE_FUNCTION static inline MDBX_val page_dupfix_key(const page_t *mp, size_t i, size_t keysize) {
page_dupfix_key(const page_t *mp, size_t i, size_t keysize) {
MDBX_val r; MDBX_val r;
r.iov_base = page_dupfix_ptr(mp, i, keysize); r.iov_base = page_dupfix_ptr(mp, i, keysize);
r.iov_len = mp->dupfix_ksize; r.iov_len = mp->dupfix_ksize;
@ -335,11 +288,9 @@ page_dupfix_key(const page_t *mp, size_t i, size_t keysize) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_int_unaligned(const MDBX_val *a, const MDBX_val *b);
cmp_int_unaligned(const MDBX_val *a, const MDBX_val *b);
#if MDBX_UNALIGNED_OK < 2 || \ #if MDBX_UNALIGNED_OK < 2 || (MDBX_DEBUG || MDBX_FORCE_ASSERTIONS || !defined(NDEBUG))
(MDBX_DEBUG || MDBX_FORCE_ASSERTIONS || !defined(NDEBUG))
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int
/* Compare two items pointing at 2-byte aligned unsigned int's. */ /* Compare two items pointing at 2-byte aligned unsigned int's. */
cmp_int_align2(const MDBX_val *a, const MDBX_val *b); cmp_int_align2(const MDBX_val *a, const MDBX_val *b);
@ -347,8 +298,7 @@ cmp_int_align2(const MDBX_val *a, const MDBX_val *b);
#define cmp_int_align2 cmp_int_unaligned #define cmp_int_align2 cmp_int_unaligned
#endif /* !MDBX_UNALIGNED_OK || debug */ #endif /* !MDBX_UNALIGNED_OK || debug */
#if MDBX_UNALIGNED_OK < 4 || \ #if MDBX_UNALIGNED_OK < 4 || (MDBX_DEBUG || MDBX_FORCE_ASSERTIONS || !defined(NDEBUG))
(MDBX_DEBUG || MDBX_FORCE_ASSERTIONS || !defined(NDEBUG))
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int
/* Compare two items pointing at 4-byte aligned unsigned int's. */ /* Compare two items pointing at 4-byte aligned unsigned int's. */
cmp_int_align4(const MDBX_val *a, const MDBX_val *b); cmp_int_align4(const MDBX_val *a, const MDBX_val *b);
@ -357,50 +307,38 @@ cmp_int_align4(const MDBX_val *a, const MDBX_val *b);
#endif /* !MDBX_UNALIGNED_OK || debug */ #endif /* !MDBX_UNALIGNED_OK || debug */
/* Compare two items lexically */ /* Compare two items lexically */
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_lexical(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_lexical(const MDBX_val *a, const MDBX_val *b);
const MDBX_val *b);
/* Compare two items in reverse byte order */ /* Compare two items in reverse byte order */
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_reverse(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_reverse(const MDBX_val *a, const MDBX_val *b);
const MDBX_val *b);
/* Fast non-lexically comparator */ /* Fast non-lexically comparator */
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_lenfast(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_lenfast(const MDBX_val *a, const MDBX_val *b);
const MDBX_val *b);
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL bool MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL bool eq_fast_slowpath(const uint8_t *a, const uint8_t *b, size_t l);
eq_fast_slowpath(const uint8_t *a, const uint8_t *b, size_t l);
MDBX_NOTHROW_PURE_FUNCTION static inline bool eq_fast(const MDBX_val *a, MDBX_NOTHROW_PURE_FUNCTION static inline bool eq_fast(const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) { return unlikely(a->iov_len == b->iov_len) && eq_fast_slowpath(a->iov_base, b->iov_base, a->iov_len);
return unlikely(a->iov_len == b->iov_len) &&
eq_fast_slowpath(a->iov_base, b->iov_base, a->iov_len);
} }
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_equal_or_greater(const MDBX_val *a, const MDBX_val *b);
cmp_equal_or_greater(const MDBX_val *a, const MDBX_val *b);
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL int cmp_equal_or_wrong(const MDBX_val *a, const MDBX_val *b);
cmp_equal_or_wrong(const MDBX_val *a, const MDBX_val *b);
static inline MDBX_cmp_func *builtin_keycmp(MDBX_db_flags_t flags) { static inline MDBX_cmp_func *builtin_keycmp(MDBX_db_flags_t flags) {
return (flags & MDBX_REVERSEKEY) ? cmp_reverse return (flags & MDBX_REVERSEKEY) ? cmp_reverse : (flags & MDBX_INTEGERKEY) ? cmp_int_align2 : cmp_lexical;
: (flags & MDBX_INTEGERKEY) ? cmp_int_align2
: cmp_lexical;
} }
static inline MDBX_cmp_func *builtin_datacmp(MDBX_db_flags_t flags) { static inline MDBX_cmp_func *builtin_datacmp(MDBX_db_flags_t flags) {
return !(flags & MDBX_DUPSORT) return !(flags & MDBX_DUPSORT)
? cmp_lenfast ? cmp_lenfast
: ((flags & MDBX_INTEGERDUP) : ((flags & MDBX_INTEGERDUP) ? cmp_int_unaligned
? cmp_int_unaligned : ((flags & MDBX_REVERSEDUP) ? cmp_reverse : cmp_lexical));
: ((flags & MDBX_REVERSEDUP) ? cmp_reverse : cmp_lexical));
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_INTERNAL uint32_t combine_durability_flags(const uint32_t a, MDBX_INTERNAL uint32_t combine_durability_flags(const uint32_t a, const uint32_t b);
const uint32_t b);
MDBX_CONST_FUNCTION static inline lck_t *lckless_stub(const MDBX_env *env) { MDBX_CONST_FUNCTION static inline lck_t *lckless_stub(const MDBX_env *env) {
uintptr_t stub = (uintptr_t)&env->lckless_placeholder; uintptr_t stub = (uintptr_t)&env->lckless_placeholder;
@ -477,12 +415,10 @@ static inline int check_txn(const MDBX_txn *txn, int bad_bits) {
} }
tASSERT(txn, (txn->flags & MDBX_TXN_FINISHED) || tASSERT(txn, (txn->flags & MDBX_TXN_FINISHED) ||
(txn->flags & MDBX_NOSTICKYTHREADS) == (txn->flags & MDBX_NOSTICKYTHREADS) == (txn->env->flags & MDBX_NOSTICKYTHREADS));
(txn->env->flags & MDBX_NOSTICKYTHREADS));
#if MDBX_TXN_CHECKOWNER #if MDBX_TXN_CHECKOWNER
STATIC_ASSERT((long)MDBX_NOSTICKYTHREADS > (long)MDBX_TXN_FINISHED); STATIC_ASSERT((long)MDBX_NOSTICKYTHREADS > (long)MDBX_TXN_FINISHED);
if ((txn->flags & (MDBX_NOSTICKYTHREADS | MDBX_TXN_FINISHED)) < if ((txn->flags & (MDBX_NOSTICKYTHREADS | MDBX_TXN_FINISHED)) < MDBX_TXN_FINISHED &&
MDBX_TXN_FINISHED &&
unlikely(txn->owner != osal_thread_self())) unlikely(txn->owner != osal_thread_self()))
return txn->owner ? MDBX_THREAD_MISMATCH : MDBX_BAD_TXN; return txn->owner ? MDBX_THREAD_MISMATCH : MDBX_BAD_TXN;
#endif /* MDBX_TXN_CHECKOWNER */ #endif /* MDBX_TXN_CHECKOWNER */
@ -508,12 +444,10 @@ static inline int check_txn_rw(const MDBX_txn *txn, int bad_bits) {
MDBX_INTERNAL void mincore_clean_cache(const MDBX_env *const env); MDBX_INTERNAL void mincore_clean_cache(const MDBX_env *const env);
MDBX_INTERNAL void update_mlcnt(const MDBX_env *env, MDBX_INTERNAL void update_mlcnt(const MDBX_env *env, const pgno_t new_aligned_mlocked_pgno,
const pgno_t new_aligned_mlocked_pgno,
const bool lock_not_release); const bool lock_not_release);
MDBX_INTERNAL void munlock_after(const MDBX_env *env, const pgno_t aligned_pgno, MDBX_INTERNAL void munlock_after(const MDBX_env *env, const pgno_t aligned_pgno, const size_t end_bytes);
const size_t end_bytes);
MDBX_INTERNAL void munlock_all(const MDBX_env *env); MDBX_INTERNAL void munlock_all(const MDBX_env *env);
@ -527,15 +461,13 @@ MDBX_INTERNAL void munlock_all(const MDBX_env *env);
#define osal_flush_incoherent_cpu_writeback() osal_compiler_barrier() #define osal_flush_incoherent_cpu_writeback() osal_compiler_barrier()
#endif /* MDBX_CPU_WRITEBACK_INCOHERENT */ #endif /* MDBX_CPU_WRITEBACK_INCOHERENT */
MDBX_MAYBE_UNUSED static inline void MDBX_MAYBE_UNUSED static inline void osal_flush_incoherent_mmap(const void *addr, size_t nbytes,
osal_flush_incoherent_mmap(const void *addr, size_t nbytes, const intptr_t pagesize) {
const intptr_t pagesize) {
#ifndef MDBX_MMAP_INCOHERENT_FILE_WRITE #ifndef MDBX_MMAP_INCOHERENT_FILE_WRITE
#error "The MDBX_MMAP_INCOHERENT_FILE_WRITE must be defined before" #error "The MDBX_MMAP_INCOHERENT_FILE_WRITE must be defined before"
#elif MDBX_MMAP_INCOHERENT_FILE_WRITE #elif MDBX_MMAP_INCOHERENT_FILE_WRITE
char *const begin = (char *)(-pagesize & (intptr_t)addr); char *const begin = (char *)(-pagesize & (intptr_t)addr);
char *const end = char *const end = (char *)(-pagesize & (intptr_t)((char *)addr + nbytes + pagesize - 1));
(char *)(-pagesize & (intptr_t)((char *)addr + nbytes + pagesize - 1));
int err = msync(begin, end - begin, MS_SYNC | MS_INVALIDATE) ? errno : 0; int err = msync(begin, end - begin, MS_SYNC | MS_INVALIDATE) ? errno : 0;
eASSERT(nullptr, err == 0); eASSERT(nullptr, err == 0);
(void)err; (void)err;

View File

@ -4,8 +4,7 @@
#include "internals.h" #include "internals.h"
/* check against https://libmdbx.dqdkfa.ru/dead-github/issues/269 */ /* check against https://libmdbx.dqdkfa.ru/dead-github/issues/269 */
static bool coherency_check(const MDBX_env *env, const txnid_t txnid, static bool coherency_check(const MDBX_env *env, const txnid_t txnid, const volatile tree_t *trees,
const volatile tree_t *trees,
const volatile meta_t *meta, bool report) { const volatile meta_t *meta, bool report) {
const txnid_t freedb_mod_txnid = trees[FREE_DBI].mod_txnid; const txnid_t freedb_mod_txnid = trees[FREE_DBI].mod_txnid;
const txnid_t maindb_mod_txnid = trees[MAIN_DBI].mod_txnid; const txnid_t maindb_mod_txnid = trees[MAIN_DBI].mod_txnid;
@ -13,67 +12,42 @@ static bool coherency_check(const MDBX_env *env, const txnid_t txnid,
const pgno_t freedb_root_pgno = trees[FREE_DBI].root; const pgno_t freedb_root_pgno = trees[FREE_DBI].root;
const page_t *freedb_root = const page_t *freedb_root =
(env->dxb_mmap.base && freedb_root_pgno < last_pgno) (env->dxb_mmap.base && freedb_root_pgno < last_pgno) ? pgno2page(env, freedb_root_pgno) : nullptr;
? pgno2page(env, freedb_root_pgno)
: nullptr;
const pgno_t maindb_root_pgno = trees[MAIN_DBI].root; const pgno_t maindb_root_pgno = trees[MAIN_DBI].root;
const page_t *maindb_root = const page_t *maindb_root =
(env->dxb_mmap.base && maindb_root_pgno < last_pgno) (env->dxb_mmap.base && maindb_root_pgno < last_pgno) ? pgno2page(env, maindb_root_pgno) : nullptr;
? pgno2page(env, maindb_root_pgno) const uint64_t magic_and_version = unaligned_peek_u64_volatile(4, &meta->magic_and_version);
: nullptr;
const uint64_t magic_and_version =
unaligned_peek_u64_volatile(4, &meta->magic_and_version);
bool ok = true; bool ok = true;
if (freedb_root_pgno != P_INVALID && if (freedb_root_pgno != P_INVALID && unlikely(freedb_root_pgno >= last_pgno)) {
unlikely(freedb_root_pgno >= last_pgno)) {
if (report) if (report)
WARNING( WARNING("catch invalid %s-db root %" PRIaPGNO " for meta_txnid %" PRIaTXN " %s", "free", freedb_root_pgno, txnid,
"catch invalid %s-db root %" PRIaPGNO " for meta_txnid %" PRIaTXN (env->stuck_meta < 0) ? "(workaround for incoherent flaw of unified page/buffer cache)"
" %s", : "(wagering meta)");
"free", freedb_root_pgno, txnid,
(env->stuck_meta < 0)
? "(workaround for incoherent flaw of unified page/buffer cache)"
: "(wagering meta)");
ok = false; ok = false;
} }
if (maindb_root_pgno != P_INVALID && if (maindb_root_pgno != P_INVALID && unlikely(maindb_root_pgno >= last_pgno)) {
unlikely(maindb_root_pgno >= last_pgno)) {
if (report) if (report)
WARNING( WARNING("catch invalid %s-db root %" PRIaPGNO " for meta_txnid %" PRIaTXN " %s", "main", maindb_root_pgno, txnid,
"catch invalid %s-db root %" PRIaPGNO " for meta_txnid %" PRIaTXN (env->stuck_meta < 0) ? "(workaround for incoherent flaw of unified page/buffer cache)"
" %s", : "(wagering meta)");
"main", maindb_root_pgno, txnid,
(env->stuck_meta < 0)
? "(workaround for incoherent flaw of unified page/buffer cache)"
: "(wagering meta)");
ok = false; ok = false;
} }
if (unlikely(txnid < freedb_mod_txnid || if (unlikely(txnid < freedb_mod_txnid ||
(!freedb_mod_txnid && freedb_root && (!freedb_mod_txnid && freedb_root && likely(magic_and_version == MDBX_DATA_MAGIC)))) {
likely(magic_and_version == MDBX_DATA_MAGIC)))) {
if (report) if (report)
WARNING( WARNING(
"catch invalid %s-db.mod_txnid %" PRIaTXN " for meta_txnid %" PRIaTXN "catch invalid %s-db.mod_txnid %" PRIaTXN " for meta_txnid %" PRIaTXN " %s", "free", freedb_mod_txnid, txnid,
" %s", (env->stuck_meta < 0) ? "(workaround for incoherent flaw of unified page/buffer cache)" : "(wagering meta)");
"free", freedb_mod_txnid, txnid,
(env->stuck_meta < 0)
? "(workaround for incoherent flaw of unified page/buffer cache)"
: "(wagering meta)");
ok = false; ok = false;
} }
if (unlikely(txnid < maindb_mod_txnid || if (unlikely(txnid < maindb_mod_txnid ||
(!maindb_mod_txnid && maindb_root && (!maindb_mod_txnid && maindb_root && likely(magic_and_version == MDBX_DATA_MAGIC)))) {
likely(magic_and_version == MDBX_DATA_MAGIC)))) {
if (report) if (report)
WARNING( WARNING(
"catch invalid %s-db.mod_txnid %" PRIaTXN " for meta_txnid %" PRIaTXN "catch invalid %s-db.mod_txnid %" PRIaTXN " for meta_txnid %" PRIaTXN " %s", "main", maindb_mod_txnid, txnid,
" %s", (env->stuck_meta < 0) ? "(workaround for incoherent flaw of unified page/buffer cache)" : "(wagering meta)");
"main", maindb_mod_txnid, txnid,
(env->stuck_meta < 0)
? "(workaround for incoherent flaw of unified page/buffer cache)"
: "(wagering meta)");
ok = false; ok = false;
} }
@ -81,15 +55,13 @@ static bool coherency_check(const MDBX_env *env, const txnid_t txnid,
* в пределах текущего отображения. Иначе возможны SIGSEGV до переноса * в пределах текущего отображения. Иначе возможны SIGSEGV до переноса
* вызова coherency_check_head() после dxb_resize() внутри txn_renew(). */ * вызова coherency_check_head() после dxb_resize() внутри txn_renew(). */
if (likely(freedb_root && freedb_mod_txnid && if (likely(freedb_root && freedb_mod_txnid &&
(size_t)ptr_dist(env->dxb_mmap.base, freedb_root) < (size_t)ptr_dist(env->dxb_mmap.base, freedb_root) < env->dxb_mmap.limit)) {
env->dxb_mmap.limit)) {
VALGRIND_MAKE_MEM_DEFINED(freedb_root, sizeof(freedb_root->txnid)); VALGRIND_MAKE_MEM_DEFINED(freedb_root, sizeof(freedb_root->txnid));
MDBX_ASAN_UNPOISON_MEMORY_REGION(freedb_root, sizeof(freedb_root->txnid)); MDBX_ASAN_UNPOISON_MEMORY_REGION(freedb_root, sizeof(freedb_root->txnid));
const txnid_t root_txnid = freedb_root->txnid; const txnid_t root_txnid = freedb_root->txnid;
if (unlikely(root_txnid != freedb_mod_txnid)) { if (unlikely(root_txnid != freedb_mod_txnid)) {
if (report) if (report)
WARNING("catch invalid root_page %" PRIaPGNO " mod_txnid %" PRIaTXN WARNING("catch invalid root_page %" PRIaPGNO " mod_txnid %" PRIaTXN " for %s-db.mod_txnid %" PRIaTXN " %s",
" for %s-db.mod_txnid %" PRIaTXN " %s",
freedb_root_pgno, root_txnid, "free", freedb_mod_txnid, freedb_root_pgno, root_txnid, "free", freedb_mod_txnid,
(env->stuck_meta < 0) ? "(workaround for incoherent flaw of " (env->stuck_meta < 0) ? "(workaround for incoherent flaw of "
"unified page/buffer cache)" "unified page/buffer cache)"
@ -98,15 +70,13 @@ static bool coherency_check(const MDBX_env *env, const txnid_t txnid,
} }
} }
if (likely(maindb_root && maindb_mod_txnid && if (likely(maindb_root && maindb_mod_txnid &&
(size_t)ptr_dist(env->dxb_mmap.base, maindb_root) < (size_t)ptr_dist(env->dxb_mmap.base, maindb_root) < env->dxb_mmap.limit)) {
env->dxb_mmap.limit)) {
VALGRIND_MAKE_MEM_DEFINED(maindb_root, sizeof(maindb_root->txnid)); VALGRIND_MAKE_MEM_DEFINED(maindb_root, sizeof(maindb_root->txnid));
MDBX_ASAN_UNPOISON_MEMORY_REGION(maindb_root, sizeof(maindb_root->txnid)); MDBX_ASAN_UNPOISON_MEMORY_REGION(maindb_root, sizeof(maindb_root->txnid));
const txnid_t root_txnid = maindb_root->txnid; const txnid_t root_txnid = maindb_root->txnid;
if (unlikely(root_txnid != maindb_mod_txnid)) { if (unlikely(root_txnid != maindb_mod_txnid)) {
if (report) if (report)
WARNING("catch invalid root_page %" PRIaPGNO " mod_txnid %" PRIaTXN WARNING("catch invalid root_page %" PRIaPGNO " mod_txnid %" PRIaTXN " for %s-db.mod_txnid %" PRIaTXN " %s",
" for %s-db.mod_txnid %" PRIaTXN " %s",
maindb_root_pgno, root_txnid, "main", maindb_mod_txnid, maindb_root_pgno, root_txnid, "main", maindb_mod_txnid,
(env->stuck_meta < 0) ? "(workaround for incoherent flaw of " (env->stuck_meta < 0) ? "(workaround for incoherent flaw of "
"unified page/buffer cache)" "unified page/buffer cache)"
@ -116,24 +86,19 @@ static bool coherency_check(const MDBX_env *env, const txnid_t txnid,
} }
if (unlikely(!ok) && report) if (unlikely(!ok) && report)
env->lck->pgops.incoherence.weak = env->lck->pgops.incoherence.weak =
(env->lck->pgops.incoherence.weak >= INT32_MAX) (env->lck->pgops.incoherence.weak >= INT32_MAX) ? INT32_MAX : env->lck->pgops.incoherence.weak + 1;
? INT32_MAX
: env->lck->pgops.incoherence.weak + 1;
return ok; return ok;
} }
__cold int coherency_timeout(uint64_t *timestamp, intptr_t pgno, __cold int coherency_timeout(uint64_t *timestamp, intptr_t pgno, const MDBX_env *env) {
const MDBX_env *env) {
if (likely(timestamp && *timestamp == 0)) if (likely(timestamp && *timestamp == 0))
*timestamp = osal_monotime(); *timestamp = osal_monotime();
else if (unlikely(!timestamp || osal_monotime() - *timestamp > else if (unlikely(!timestamp || osal_monotime() - *timestamp > osal_16dot16_to_monotime(65536 / 10))) {
osal_16dot16_to_monotime(65536 / 10))) {
if (pgno >= 0 && pgno != env->stuck_meta) if (pgno >= 0 && pgno != env->stuck_meta)
ERROR("bailout waiting for %" PRIuSIZE " page arrival %s", pgno, ERROR("bailout waiting for %" PRIuSIZE " page arrival %s", pgno,
"(workaround for incoherent flaw of unified page/buffer cache)"); "(workaround for incoherent flaw of unified page/buffer cache)");
else if (env->stuck_meta < 0) else if (env->stuck_meta < 0)
ERROR("bailout waiting for valid snapshot (%s)", ERROR("bailout waiting for valid snapshot (%s)", "workaround for incoherent flaw of unified page/buffer cache");
"workaround for incoherent flaw of unified page/buffer cache");
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
@ -152,28 +117,23 @@ __cold int coherency_timeout(uint64_t *timestamp, intptr_t pgno,
/* check with timeout as the workaround /* check with timeout as the workaround
* for https://libmdbx.dqdkfa.ru/dead-github/issues/269 */ * for https://libmdbx.dqdkfa.ru/dead-github/issues/269 */
__hot int coherency_fetch_head(MDBX_txn *txn, const meta_ptr_t head, __hot int coherency_fetch_head(MDBX_txn *txn, const meta_ptr_t head, uint64_t *timestamp) {
uint64_t *timestamp) {
/* Copy the DB info and flags */ /* Copy the DB info and flags */
txn->txnid = head.txnid; txn->txnid = head.txnid;
txn->geo = head.ptr_c->geometry; txn->geo = head.ptr_c->geometry;
memcpy(txn->dbs, &head.ptr_c->trees, sizeof(head.ptr_c->trees)); memcpy(txn->dbs, &head.ptr_c->trees, sizeof(head.ptr_c->trees));
STATIC_ASSERT(sizeof(head.ptr_c->trees) == CORE_DBS * sizeof(tree_t)); STATIC_ASSERT(sizeof(head.ptr_c->trees) == CORE_DBS * sizeof(tree_t));
VALGRIND_MAKE_MEM_UNDEFINED(txn->dbs + CORE_DBS, VALGRIND_MAKE_MEM_UNDEFINED(txn->dbs + CORE_DBS, txn->env->max_dbi - CORE_DBS);
txn->env->max_dbi - CORE_DBS);
txn->canary = head.ptr_c->canary; txn->canary = head.ptr_c->canary;
if (unlikely(!coherency_check(txn->env, head.txnid, txn->dbs, head.ptr_v, if (unlikely(!coherency_check(txn->env, head.txnid, txn->dbs, head.ptr_v, *timestamp == 0) ||
*timestamp == 0) ||
txn->txnid != meta_txnid(head.ptr_v))) txn->txnid != meta_txnid(head.ptr_v)))
return coherency_timeout(timestamp, -1, txn->env); return coherency_timeout(timestamp, -1, txn->env);
if (unlikely(txn->dbs[FREE_DBI].flags != MDBX_INTEGERKEY)) { if (unlikely(txn->dbs[FREE_DBI].flags != MDBX_INTEGERKEY)) {
if ((txn->dbs[FREE_DBI].flags & DB_PERSISTENT_FLAGS) != MDBX_INTEGERKEY || if ((txn->dbs[FREE_DBI].flags & DB_PERSISTENT_FLAGS) != MDBX_INTEGERKEY ||
unaligned_peek_u64(4, &head.ptr_c->magic_and_version) == unaligned_peek_u64(4, &head.ptr_c->magic_and_version) == MDBX_DATA_MAGIC) {
MDBX_DATA_MAGIC) { ERROR("unexpected/invalid db-flags 0x%x for %s", txn->dbs[FREE_DBI].flags, "GC/FreeDB");
ERROR("unexpected/invalid db-flags 0x%x for %s", txn->dbs[FREE_DBI].flags,
"GC/FreeDB");
return MDBX_INCOMPATIBLE; return MDBX_INCOMPATIBLE;
} }
txn->dbs[FREE_DBI].flags &= DB_PERSISTENT_FLAGS; txn->dbs[FREE_DBI].flags &= DB_PERSISTENT_FLAGS;
@ -183,23 +143,19 @@ __hot int coherency_fetch_head(MDBX_txn *txn, const meta_ptr_t head,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int coherency_check_written(const MDBX_env *env, const txnid_t txnid, int coherency_check_written(const MDBX_env *env, const txnid_t txnid, const volatile meta_t *meta, const intptr_t pgno,
const volatile meta_t *meta, const intptr_t pgno,
uint64_t *timestamp) { uint64_t *timestamp) {
const bool report = !(timestamp && *timestamp); const bool report = !(timestamp && *timestamp);
const txnid_t head_txnid = meta_txnid(meta); const txnid_t head_txnid = meta_txnid(meta);
if (likely(head_txnid >= MIN_TXNID && head_txnid >= txnid)) { if (likely(head_txnid >= MIN_TXNID && head_txnid >= txnid)) {
if (likely( if (likely(coherency_check(env, head_txnid, &meta->trees.gc, meta, report))) {
coherency_check(env, head_txnid, &meta->trees.gc, meta, report))) {
eASSERT(env, meta->trees.gc.flags == MDBX_INTEGERKEY); eASSERT(env, meta->trees.gc.flags == MDBX_INTEGERKEY);
eASSERT(env, check_table_flags(meta->trees.main.flags)); eASSERT(env, check_table_flags(meta->trees.main.flags));
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
} else if (report) { } else if (report) {
env->lck->pgops.incoherence.weak = env->lck->pgops.incoherence.weak =
(env->lck->pgops.incoherence.weak >= INT32_MAX) (env->lck->pgops.incoherence.weak >= INT32_MAX) ? INT32_MAX : env->lck->pgops.incoherence.weak + 1;
? INT32_MAX
: env->lck->pgops.incoherence.weak + 1;
WARNING("catch %s txnid %" PRIaTXN " for meta_%" PRIaPGNO " %s", WARNING("catch %s txnid %" PRIaTXN " for meta_%" PRIaPGNO " %s",
(head_txnid < MIN_TXNID) ? "invalid" : "unexpected", head_txnid, (head_txnid < MIN_TXNID) ? "invalid" : "unexpected", head_txnid,
bytes2pgno(env, ptr_dist(meta, env->dxb_mmap.base)), bytes2pgno(env, ptr_dist(meta, env->dxb_mmap.base)),
@ -208,9 +164,7 @@ int coherency_check_written(const MDBX_env *env, const txnid_t txnid,
return coherency_timeout(timestamp, pgno, env); return coherency_timeout(timestamp, pgno, env);
} }
bool coherency_check_meta(const MDBX_env *env, const volatile meta_t *meta, bool coherency_check_meta(const MDBX_env *env, const volatile meta_t *meta, bool report) {
bool report) {
uint64_t timestamp = 0; uint64_t timestamp = 0;
return coherency_check_written(env, 0, meta, -1, return coherency_check_written(env, 0, meta, -1, report ? &timestamp : nullptr) == MDBX_SUCCESS;
report ? &timestamp : nullptr) == MDBX_SUCCESS;
} }

View File

@ -14,8 +14,7 @@ __cold size_t mdbx_default_pagesize(void) {
__cold intptr_t mdbx_limits_dbsize_min(intptr_t pagesize) { __cold intptr_t mdbx_limits_dbsize_min(intptr_t pagesize) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
else if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || else if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
@ -25,8 +24,7 @@ __cold intptr_t mdbx_limits_dbsize_min(intptr_t pagesize) {
__cold intptr_t mdbx_limits_dbsize_max(intptr_t pagesize) { __cold intptr_t mdbx_limits_dbsize_max(intptr_t pagesize) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
else if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || else if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
@ -38,112 +36,90 @@ __cold intptr_t mdbx_limits_dbsize_max(intptr_t pagesize) {
__cold intptr_t mdbx_limits_txnsize_max(intptr_t pagesize) { __cold intptr_t mdbx_limits_txnsize_max(intptr_t pagesize) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
else if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || else if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
STATIC_ASSERT(MAX_MAPSIZE < INTPTR_MAX); STATIC_ASSERT(MAX_MAPSIZE < INTPTR_MAX);
const uint64_t pgl_limit = const uint64_t pgl_limit = pagesize * (uint64_t)(PAGELIST_LIMIT / MDBX_GOLD_RATIO_DBL);
pagesize * (uint64_t)(PAGELIST_LIMIT / MDBX_GOLD_RATIO_DBL);
const uint64_t map_limit = (uint64_t)(MAX_MAPSIZE / MDBX_GOLD_RATIO_DBL); const uint64_t map_limit = (uint64_t)(MAX_MAPSIZE / MDBX_GOLD_RATIO_DBL);
return (pgl_limit < map_limit) ? (intptr_t)pgl_limit : (intptr_t)map_limit; return (pgl_limit < map_limit) ? (intptr_t)pgl_limit : (intptr_t)map_limit;
} }
__cold intptr_t mdbx_limits_keysize_max(intptr_t pagesize, __cold intptr_t mdbx_limits_keysize_max(intptr_t pagesize, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
return keysize_max(pagesize, flags); return keysize_max(pagesize, flags);
} }
__cold int mdbx_env_get_maxkeysize_ex(const MDBX_env *env, __cold int mdbx_env_get_maxkeysize_ex(const MDBX_env *env, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (unlikely(!env || env->signature.weak != env_signature)) if (unlikely(!env || env->signature.weak != env_signature))
return -1; return -1;
return (int)mdbx_limits_keysize_max((intptr_t)env->ps, flags); return (int)mdbx_limits_keysize_max((intptr_t)env->ps, flags);
} }
__cold int mdbx_env_get_maxkeysize(const MDBX_env *env) { __cold int mdbx_env_get_maxkeysize(const MDBX_env *env) { return mdbx_env_get_maxkeysize_ex(env, MDBX_DUPSORT); }
return mdbx_env_get_maxkeysize_ex(env, MDBX_DUPSORT);
}
__cold intptr_t mdbx_limits_keysize_min(MDBX_db_flags_t flags) { __cold intptr_t mdbx_limits_keysize_min(MDBX_db_flags_t flags) { return keysize_min(flags); }
return keysize_min(flags);
}
__cold intptr_t mdbx_limits_valsize_max(intptr_t pagesize, __cold intptr_t mdbx_limits_valsize_max(intptr_t pagesize, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
return valsize_max(pagesize, flags); return valsize_max(pagesize, flags);
} }
__cold int mdbx_env_get_maxvalsize_ex(const MDBX_env *env, __cold int mdbx_env_get_maxvalsize_ex(const MDBX_env *env, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (unlikely(!env || env->signature.weak != env_signature)) if (unlikely(!env || env->signature.weak != env_signature))
return -1; return -1;
return (int)mdbx_limits_valsize_max((intptr_t)env->ps, flags); return (int)mdbx_limits_valsize_max((intptr_t)env->ps, flags);
} }
__cold intptr_t mdbx_limits_valsize_min(MDBX_db_flags_t flags) { __cold intptr_t mdbx_limits_valsize_min(MDBX_db_flags_t flags) { return valsize_min(flags); }
return valsize_min(flags);
}
__cold intptr_t mdbx_limits_pairsize4page_max(intptr_t pagesize, __cold intptr_t mdbx_limits_pairsize4page_max(intptr_t pagesize, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
if (flags & if (flags & (MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP))
(MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP))
return BRANCH_NODE_MAX(pagesize) - NODESIZE; return BRANCH_NODE_MAX(pagesize) - NODESIZE;
return LEAF_NODE_MAX(pagesize) - NODESIZE; return LEAF_NODE_MAX(pagesize) - NODESIZE;
} }
__cold int mdbx_env_get_pairsize4page_max(const MDBX_env *env, __cold int mdbx_env_get_pairsize4page_max(const MDBX_env *env, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (unlikely(!env || env->signature.weak != env_signature)) if (unlikely(!env || env->signature.weak != env_signature))
return -1; return -1;
return (int)mdbx_limits_pairsize4page_max((intptr_t)env->ps, flags); return (int)mdbx_limits_pairsize4page_max((intptr_t)env->ps, flags);
} }
__cold intptr_t mdbx_limits_valsize4page_max(intptr_t pagesize, __cold intptr_t mdbx_limits_valsize4page_max(intptr_t pagesize, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (pagesize < 1) if (pagesize < 1)
pagesize = (intptr_t)mdbx_default_pagesize(); pagesize = (intptr_t)mdbx_default_pagesize();
if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || if (unlikely(pagesize < (intptr_t)MDBX_MIN_PAGESIZE || pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
pagesize > (intptr_t)MDBX_MAX_PAGESIZE ||
!is_powerof2((size_t)pagesize))) !is_powerof2((size_t)pagesize)))
return -1; return -1;
if (flags & if (flags & (MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP))
(MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP))
return valsize_max(pagesize, flags); return valsize_max(pagesize, flags);
return PAGESPACE(pagesize); return PAGESPACE(pagesize);
} }
__cold int mdbx_env_get_valsize4page_max(const MDBX_env *env, __cold int mdbx_env_get_valsize4page_max(const MDBX_env *env, MDBX_db_flags_t flags) {
MDBX_db_flags_t flags) {
if (unlikely(!env || env->signature.weak != env_signature)) if (unlikely(!env || env->signature.weak != env_signature))
return -1; return -1;
@ -152,17 +128,14 @@ __cold int mdbx_env_get_valsize4page_max(const MDBX_env *env,
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
__cold static void stat_add(const tree_t *db, MDBX_stat *const st, __cold static void stat_add(const tree_t *db, MDBX_stat *const st, const size_t bytes) {
const size_t bytes) {
st->ms_depth += db->height; st->ms_depth += db->height;
st->ms_branch_pages += db->branch_pages; st->ms_branch_pages += db->branch_pages;
st->ms_leaf_pages += db->leaf_pages; st->ms_leaf_pages += db->leaf_pages;
st->ms_overflow_pages += db->large_pages; st->ms_overflow_pages += db->large_pages;
st->ms_entries += db->items; st->ms_entries += db->items;
if (likely(bytes >= if (likely(bytes >= offsetof(MDBX_stat, ms_mod_txnid) + sizeof(st->ms_mod_txnid)))
offsetof(MDBX_stat, ms_mod_txnid) + sizeof(st->ms_mod_txnid))) st->ms_mod_txnid = (st->ms_mod_txnid > db->mod_txnid) ? st->ms_mod_txnid : db->mod_txnid;
st->ms_mod_txnid =
(st->ms_mod_txnid > db->mod_txnid) ? st->ms_mod_txnid : db->mod_txnid;
} }
__cold static int stat_acc(const MDBX_txn *txn, MDBX_stat *st, size_t bytes) { __cold static int stat_acc(const MDBX_txn *txn, MDBX_stat *st, size_t bytes) {
@ -179,15 +152,13 @@ __cold static int stat_acc(const MDBX_txn *txn, MDBX_stat *st, size_t bytes) {
const MDBX_env *const env = txn->env; const MDBX_env *const env = txn->env;
st->ms_psize = env->ps; st->ms_psize = env->ps;
TXN_FOREACH_DBI_FROM( TXN_FOREACH_DBI_FROM(txn, dbi,
txn, dbi, /* assuming GC is internal and not subject for accounting */ MAIN_DBI) {
/* assuming GC is internal and not subject for accounting */ MAIN_DBI) {
if ((txn->dbi_state[dbi] & (DBI_VALID | DBI_STALE)) == DBI_VALID) if ((txn->dbi_state[dbi] & (DBI_VALID | DBI_STALE)) == DBI_VALID)
stat_add(txn->dbs + dbi, st, bytes); stat_add(txn->dbs + dbi, st, bytes);
} }
if (!(txn->dbs[MAIN_DBI].flags & MDBX_DUPSORT) && if (!(txn->dbs[MAIN_DBI].flags & MDBX_DUPSORT) && txn->dbs[MAIN_DBI].items /* TODO: use `md_subs` field */) {
txn->dbs[MAIN_DBI].items /* TODO: use `md_subs` field */) {
/* scan and account not opened named tables */ /* scan and account not opened named tables */
err = tree_search(&cx.outer, nullptr, Z_FIRST); err = tree_search(&cx.outer, nullptr, Z_FIRST);
@ -198,8 +169,7 @@ __cold static int stat_acc(const MDBX_txn *txn, MDBX_stat *st, size_t bytes) {
if (node_flags(node) != N_TREE) if (node_flags(node) != N_TREE)
continue; continue;
if (unlikely(node_ds(node) != sizeof(tree_t))) { if (unlikely(node_ds(node) != sizeof(tree_t))) {
ERROR("%s/%d: %s %zu", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %zu", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid table node size", node_ds(node));
"invalid table node size", node_ds(node));
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
@ -228,8 +198,7 @@ __cold static int stat_acc(const MDBX_txn *txn, MDBX_stat *st, size_t bytes) {
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold int mdbx_env_stat_ex(const MDBX_env *env, const MDBX_txn *txn, __cold int mdbx_env_stat_ex(const MDBX_env *env, const MDBX_txn *txn, MDBX_stat *dest, size_t bytes) {
MDBX_stat *dest, size_t bytes) {
if (unlikely(!dest)) if (unlikely(!dest))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
const size_t size_before_modtxnid = offsetof(MDBX_stat, ms_mod_txnid); const size_t size_before_modtxnid = offsetof(MDBX_stat, ms_mod_txnid);
@ -265,18 +234,15 @@ __cold int mdbx_env_stat_ex(const MDBX_env *env, const MDBX_txn *txn,
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
static size_t estimate_rss(size_t database_bytes) { static size_t estimate_rss(size_t database_bytes) {
return database_bytes + database_bytes / 64 + return database_bytes + database_bytes / 64 + (512 + MDBX_WORDBITS * 16) * MEGABYTE;
(512 + MDBX_WORDBITS * 16) * MEGABYTE;
} }
__cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn, __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn, MDBX_warmup_flags_t flags,
MDBX_warmup_flags_t flags,
unsigned timeout_seconds_16dot16) { unsigned timeout_seconds_16dot16) {
if (unlikely(env == nullptr && txn == nullptr)) if (unlikely(env == nullptr && txn == nullptr))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(flags > if (unlikely(flags > (MDBX_warmup_force | MDBX_warmup_oomsafe | MDBX_warmup_lock | MDBX_warmup_touchlimit |
(MDBX_warmup_force | MDBX_warmup_oomsafe | MDBX_warmup_lock | MDBX_warmup_release)))
MDBX_warmup_touchlimit | MDBX_warmup_release)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (txn) { if (txn) {
@ -294,10 +260,9 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn,
env = txn->env; env = txn->env;
} }
const uint64_t timeout_monotime = const uint64_t timeout_monotime = (timeout_seconds_16dot16 && (flags & MDBX_warmup_force))
(timeout_seconds_16dot16 && (flags & MDBX_warmup_force)) ? osal_monotime() + osal_16dot16_to_monotime(timeout_seconds_16dot16)
? osal_monotime() + osal_16dot16_to_monotime(timeout_seconds_16dot16) : 0;
: 0;
if (flags & MDBX_warmup_release) if (flags & MDBX_warmup_release)
munlock_all(env); munlock_all(env);
@ -317,18 +282,14 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn,
const size_t estimated_rss = estimate_rss(used_range); const size_t estimated_rss = estimate_rss(used_range);
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
SIZE_T current_ws_lower, current_ws_upper; SIZE_T current_ws_lower, current_ws_upper;
if (GetProcessWorkingSetSize(GetCurrentProcess(), &current_ws_lower, if (GetProcessWorkingSetSize(GetCurrentProcess(), &current_ws_lower, &current_ws_upper) &&
&current_ws_upper) &&
current_ws_lower < estimated_rss) { current_ws_lower < estimated_rss) {
const SIZE_T ws_lower = estimated_rss; const SIZE_T ws_lower = estimated_rss;
const SIZE_T ws_upper = const SIZE_T ws_upper =
(MDBX_WORDBITS == 32 && ws_lower > MEGABYTE * 2048) (MDBX_WORDBITS == 32 && ws_lower > MEGABYTE * 2048) ? ws_lower : ws_lower + MDBX_WORDBITS * MEGABYTE * 32;
? ws_lower
: ws_lower + MDBX_WORDBITS * MEGABYTE * 32;
if (!SetProcessWorkingSetSize(GetCurrentProcess(), ws_lower, ws_upper)) { if (!SetProcessWorkingSetSize(GetCurrentProcess(), ws_lower, ws_upper)) {
rc = (int)GetLastError(); rc = (int)GetLastError();
WARNING("SetProcessWorkingSetSize(%zu, %zu) error %d", ws_lower, WARNING("SetProcessWorkingSetSize(%zu, %zu) error %d", ws_lower, ws_upper, rc);
ws_upper, rc);
} }
} }
#endif /* Windows */ #endif /* Windows */
@ -340,23 +301,21 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn,
rss.rlim_max = estimated_rss; rss.rlim_max = estimated_rss;
if (setrlimit(RLIMIT_RSS, &rss)) { if (setrlimit(RLIMIT_RSS, &rss)) {
rc = errno; rc = errno;
WARNING("setrlimit(%s, {%zu, %zu}) error %d", "RLIMIT_RSS", WARNING("setrlimit(%s, {%zu, %zu}) error %d", "RLIMIT_RSS", (size_t)rss.rlim_cur, (size_t)rss.rlim_max, rc);
(size_t)rss.rlim_cur, (size_t)rss.rlim_max, rc);
} }
} }
#endif /* RLIMIT_RSS */ #endif /* RLIMIT_RSS */
#ifdef RLIMIT_MEMLOCK #ifdef RLIMIT_MEMLOCK
if (flags & MDBX_warmup_lock) { if (flags & MDBX_warmup_lock) {
struct rlimit memlock; struct rlimit memlock;
if (getrlimit(RLIMIT_MEMLOCK, &memlock) == 0 && if (getrlimit(RLIMIT_MEMLOCK, &memlock) == 0 && memlock.rlim_cur < estimated_rss) {
memlock.rlim_cur < estimated_rss) {
memlock.rlim_cur = estimated_rss; memlock.rlim_cur = estimated_rss;
if (memlock.rlim_max < estimated_rss) if (memlock.rlim_max < estimated_rss)
memlock.rlim_max = estimated_rss; memlock.rlim_max = estimated_rss;
if (setrlimit(RLIMIT_MEMLOCK, &memlock)) { if (setrlimit(RLIMIT_MEMLOCK, &memlock)) {
rc = errno; rc = errno;
WARNING("setrlimit(%s, {%zu, %zu}) error %d", "RLIMIT_MEMLOCK", WARNING("setrlimit(%s, {%zu, %zu}) error %d", "RLIMIT_MEMLOCK", (size_t)memlock.rlim_cur,
(size_t)memlock.rlim_cur, (size_t)memlock.rlim_max, rc); (size_t)memlock.rlim_max, rc);
} }
} }
} }
@ -364,12 +323,10 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn,
(void)estimated_rss; (void)estimated_rss;
} }
#if defined(MLOCK_ONFAULT) && \ #if defined(MLOCK_ONFAULT) && \
((defined(_GNU_SOURCE) && __GLIBC_PREREQ(2, 27)) || \ ((defined(_GNU_SOURCE) && __GLIBC_PREREQ(2, 27)) || (defined(__ANDROID_API__) && __ANDROID_API__ >= 30)) && \
(defined(__ANDROID_API__) && __ANDROID_API__ >= 30)) && \
(defined(__linux__) || defined(__gnu_linux__)) (defined(__linux__) || defined(__gnu_linux__))
if ((flags & MDBX_warmup_lock) != 0 && if ((flags & MDBX_warmup_lock) != 0 && globals.linux_kernel_version >= 0x04040000 &&
globals.linux_kernel_version >= 0x04040000 &&
atomic_load32(&env->mlocked_pgno, mo_AcquireRelease) < mlock_pgno) { atomic_load32(&env->mlocked_pgno, mo_AcquireRelease) < mlock_pgno) {
if (mlock2(env->dxb_mmap.base, used_range, MLOCK_ONFAULT)) { if (mlock2(env->dxb_mmap.base, used_range, MLOCK_ONFAULT)) {
rc = errno; rc = errno;
@ -388,8 +345,7 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn,
if (err != MDBX_SUCCESS && rc == MDBX_SUCCESS) if (err != MDBX_SUCCESS && rc == MDBX_SUCCESS)
rc = err; rc = err;
if ((flags & MDBX_warmup_force) != 0 && if ((flags & MDBX_warmup_force) != 0 && (rc == MDBX_SUCCESS || rc == MDBX_ENOSYS)) {
(rc == MDBX_SUCCESS || rc == MDBX_ENOSYS)) {
const volatile uint8_t *ptr = env->dxb_mmap.base; const volatile uint8_t *ptr = env->dxb_mmap.base;
size_t offset = 0, unused = 42; size_t offset = 0, unused = 42;
#if !(defined(_WIN32) || defined(_WIN64)) #if !(defined(_WIN32) || defined(_WIN64))
@ -440,8 +396,7 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn,
(void)unused; (void)unused;
} }
if ((flags & MDBX_warmup_lock) != 0 && if ((flags & MDBX_warmup_lock) != 0 && (rc == MDBX_SUCCESS || rc == MDBX_ENOSYS) &&
(rc == MDBX_SUCCESS || rc == MDBX_ENOSYS) &&
atomic_load32(&env->mlocked_pgno, mo_AcquireRelease) < mlock_pgno) { atomic_load32(&env->mlocked_pgno, mo_AcquireRelease) < mlock_pgno) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
if (VirtualLock(env->dxb_mmap.base, used_range)) { if (VirtualLock(env->dxb_mmap.base, used_range)) {
@ -481,14 +436,12 @@ __cold int mdbx_env_get_fd(const MDBX_env *env, mdbx_filehandle_t *arg) {
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold int mdbx_env_set_flags(MDBX_env *env, MDBX_env_flags_t flags, __cold int mdbx_env_set_flags(MDBX_env *env, MDBX_env_flags_t flags, bool onoff) {
bool onoff) {
int rc = check_env(env, false); int rc = check_env(env, false);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
if (unlikely(flags & ((env->flags & ENV_ACTIVE) ? ~ENV_CHANGEABLE_FLAGS if (unlikely(flags & ((env->flags & ENV_ACTIVE) ? ~ENV_CHANGEABLE_FLAGS : ~ENV_USABLE_FLAGS)))
: ~ENV_USABLE_FLAGS)))
return LOG_IFERR(MDBX_EPERM); return LOG_IFERR(MDBX_EPERM);
if (unlikely(env->flags & MDBX_RDONLY)) if (unlikely(env->flags & MDBX_RDONLY))
@ -536,9 +489,7 @@ __cold int mdbx_env_set_userctx(MDBX_env *env, void *ctx) {
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold void *mdbx_env_get_userctx(const MDBX_env *env) { __cold void *mdbx_env_get_userctx(const MDBX_env *env) { return env ? env->userctx : nullptr; }
return env ? env->userctx : nullptr;
}
__cold int mdbx_env_set_assert(MDBX_env *env, MDBX_assert_func *func) { __cold int mdbx_env_set_assert(MDBX_env *env, MDBX_assert_func *func) {
int rc = check_env(env, false); int rc = check_env(env, false);
@ -564,8 +515,7 @@ __cold int mdbx_env_set_hsr(MDBX_env *env, MDBX_hsr_func *hsr) {
} }
__cold MDBX_hsr_func *mdbx_env_get_hsr(const MDBX_env *env) { __cold MDBX_hsr_func *mdbx_env_get_hsr(const MDBX_env *env) {
return likely(env && env->signature.weak == env_signature) ? env->hsr_callback return likely(env && env->signature.weak == env_signature) ? env->hsr_callback : nullptr;
: nullptr;
} }
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
@ -595,13 +545,10 @@ __cold int mdbx_env_get_path(const MDBX_env *env, const char **arg) {
*arg = nullptr; *arg = nullptr;
DWORD flags = /* WC_ERR_INVALID_CHARS */ 0x80; DWORD flags = /* WC_ERR_INVALID_CHARS */ 0x80;
size_t mb_len = size_t mb_len =
WideCharToMultiByte(CP_THREAD_ACP, flags, env->pathname.specified, -1, WideCharToMultiByte(CP_THREAD_ACP, flags, env->pathname.specified, -1, nullptr, 0, nullptr, nullptr);
nullptr, 0, nullptr, nullptr);
rc = mb_len ? MDBX_SUCCESS : (int)GetLastError(); rc = mb_len ? MDBX_SUCCESS : (int)GetLastError();
if (rc == ERROR_INVALID_FLAGS) { if (rc == ERROR_INVALID_FLAGS) {
mb_len = mb_len = WideCharToMultiByte(CP_THREAD_ACP, flags = 0, env->pathname.specified, -1, nullptr, 0, nullptr, nullptr);
WideCharToMultiByte(CP_THREAD_ACP, flags = 0, env->pathname.specified,
-1, nullptr, 0, nullptr, nullptr);
rc = mb_len ? MDBX_SUCCESS : (int)GetLastError(); rc = mb_len ? MDBX_SUCCESS : (int)GetLastError();
} }
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -610,16 +557,14 @@ __cold int mdbx_env_get_path(const MDBX_env *env, const char **arg) {
char *const mb_pathname = osal_malloc(mb_len); char *const mb_pathname = osal_malloc(mb_len);
if (!mb_pathname) if (!mb_pathname)
return LOG_IFERR(MDBX_ENOMEM); return LOG_IFERR(MDBX_ENOMEM);
if (mb_len != (size_t)WideCharToMultiByte( if (mb_len != (size_t)WideCharToMultiByte(CP_THREAD_ACP, flags, env->pathname.specified, -1, mb_pathname,
CP_THREAD_ACP, flags, env->pathname.specified, -1, (int)mb_len, nullptr, nullptr)) {
mb_pathname, (int)mb_len, nullptr, nullptr)) {
rc = (int)GetLastError(); rc = (int)GetLastError();
osal_free(mb_pathname); osal_free(mb_pathname);
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
if (env->pathname_char || if (env->pathname_char ||
InterlockedCompareExchangePointer((PVOID volatile *)&env->pathname_char, InterlockedCompareExchangePointer((PVOID volatile *)&env->pathname_char, mb_pathname, nullptr))
mb_pathname, nullptr))
osal_free(mb_pathname); osal_free(mb_pathname);
} }
*arg = env->pathname_char; *arg = env->pathname_char;
@ -634,41 +579,29 @@ __cold int mdbx_env_get_path(const MDBX_env *env, const char **arg) {
#ifndef LIBMDBX_NO_EXPORTS_LEGACY_API #ifndef LIBMDBX_NO_EXPORTS_LEGACY_API
LIBMDBX_API int mdbx_txn_begin(MDBX_env *env, MDBX_txn *parent, LIBMDBX_API int mdbx_txn_begin(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, MDBX_txn **ret) {
MDBX_txn_flags_t flags, MDBX_txn **ret) {
return __inline_mdbx_txn_begin(env, parent, flags, ret); return __inline_mdbx_txn_begin(env, parent, flags, ret);
} }
LIBMDBX_API int mdbx_txn_commit(MDBX_txn *txn) { LIBMDBX_API int mdbx_txn_commit(MDBX_txn *txn) { return __inline_mdbx_txn_commit(txn); }
return __inline_mdbx_txn_commit(txn);
}
LIBMDBX_API __cold int mdbx_env_stat(const MDBX_env *env, MDBX_stat *stat, LIBMDBX_API __cold int mdbx_env_stat(const MDBX_env *env, MDBX_stat *stat, size_t bytes) {
size_t bytes) {
return __inline_mdbx_env_stat(env, stat, bytes); return __inline_mdbx_env_stat(env, stat, bytes);
} }
LIBMDBX_API __cold int mdbx_env_info(const MDBX_env *env, MDBX_envinfo *info, LIBMDBX_API __cold int mdbx_env_info(const MDBX_env *env, MDBX_envinfo *info, size_t bytes) {
size_t bytes) {
return __inline_mdbx_env_info(env, info, bytes); return __inline_mdbx_env_info(env, info, bytes);
} }
LIBMDBX_API int mdbx_dbi_flags(const MDBX_txn *txn, MDBX_dbi dbi, LIBMDBX_API int mdbx_dbi_flags(const MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags) {
unsigned *flags) {
return __inline_mdbx_dbi_flags(txn, dbi, flags); return __inline_mdbx_dbi_flags(txn, dbi, flags);
} }
LIBMDBX_API __cold int mdbx_env_sync(MDBX_env *env) { LIBMDBX_API __cold int mdbx_env_sync(MDBX_env *env) { return __inline_mdbx_env_sync(env); }
return __inline_mdbx_env_sync(env);
}
LIBMDBX_API __cold int mdbx_env_sync_poll(MDBX_env *env) { LIBMDBX_API __cold int mdbx_env_sync_poll(MDBX_env *env) { return __inline_mdbx_env_sync_poll(env); }
return __inline_mdbx_env_sync_poll(env);
}
LIBMDBX_API __cold int mdbx_env_close(MDBX_env *env) { LIBMDBX_API __cold int mdbx_env_close(MDBX_env *env) { return __inline_mdbx_env_close(env); }
return __inline_mdbx_env_close(env);
}
LIBMDBX_API __cold int mdbx_env_set_mapsize(MDBX_env *env, size_t size) { LIBMDBX_API __cold int mdbx_env_set_mapsize(MDBX_env *env, size_t size) {
return __inline_mdbx_env_set_mapsize(env, size); return __inline_mdbx_env_set_mapsize(env, size);
@ -682,13 +615,11 @@ LIBMDBX_API __cold int mdbx_env_get_maxdbs(const MDBX_env *env, MDBX_dbi *dbs) {
return __inline_mdbx_env_get_maxdbs(env, dbs); return __inline_mdbx_env_get_maxdbs(env, dbs);
} }
LIBMDBX_API __cold int mdbx_env_set_maxreaders(MDBX_env *env, LIBMDBX_API __cold int mdbx_env_set_maxreaders(MDBX_env *env, unsigned readers) {
unsigned readers) {
return __inline_mdbx_env_set_maxreaders(env, readers); return __inline_mdbx_env_set_maxreaders(env, readers);
} }
LIBMDBX_API __cold int mdbx_env_get_maxreaders(const MDBX_env *env, LIBMDBX_API __cold int mdbx_env_get_maxreaders(const MDBX_env *env, unsigned *readers) {
unsigned *readers) {
return __inline_mdbx_env_get_maxreaders(env, readers); return __inline_mdbx_env_get_maxreaders(env, readers);
} }
@ -696,35 +627,24 @@ LIBMDBX_API __cold int mdbx_env_set_syncbytes(MDBX_env *env, size_t threshold) {
return __inline_mdbx_env_set_syncbytes(env, threshold); return __inline_mdbx_env_set_syncbytes(env, threshold);
} }
LIBMDBX_API __cold int mdbx_env_get_syncbytes(const MDBX_env *env, LIBMDBX_API __cold int mdbx_env_get_syncbytes(const MDBX_env *env, size_t *threshold) {
size_t *threshold) {
return __inline_mdbx_env_get_syncbytes(env, threshold); return __inline_mdbx_env_get_syncbytes(env, threshold);
} }
LIBMDBX_API __cold int mdbx_env_set_syncperiod(MDBX_env *env, LIBMDBX_API __cold int mdbx_env_set_syncperiod(MDBX_env *env, unsigned seconds_16dot16) {
unsigned seconds_16dot16) {
return __inline_mdbx_env_set_syncperiod(env, seconds_16dot16); return __inline_mdbx_env_set_syncperiod(env, seconds_16dot16);
} }
LIBMDBX_API __cold int mdbx_env_get_syncperiod(const MDBX_env *env, LIBMDBX_API __cold int mdbx_env_get_syncperiod(const MDBX_env *env, unsigned *seconds_16dot16) {
unsigned *seconds_16dot16) {
return __inline_mdbx_env_get_syncperiod(env, seconds_16dot16); return __inline_mdbx_env_get_syncperiod(env, seconds_16dot16);
} }
LIBMDBX_API __cold uint64_t mdbx_key_from_int64(const int64_t i64) { LIBMDBX_API __cold uint64_t mdbx_key_from_int64(const int64_t i64) { return __inline_mdbx_key_from_int64(i64); }
return __inline_mdbx_key_from_int64(i64);
}
LIBMDBX_API __cold uint32_t mdbx_key_from_int32(const int32_t i32) { LIBMDBX_API __cold uint32_t mdbx_key_from_int32(const int32_t i32) { return __inline_mdbx_key_from_int32(i32); }
return __inline_mdbx_key_from_int32(i32);
}
LIBMDBX_API __cold intptr_t mdbx_limits_pgsize_min(void) { LIBMDBX_API __cold intptr_t mdbx_limits_pgsize_min(void) { return __inline_mdbx_limits_pgsize_min(); }
return __inline_mdbx_limits_pgsize_min();
}
LIBMDBX_API __cold intptr_t mdbx_limits_pgsize_max(void) { LIBMDBX_API __cold intptr_t mdbx_limits_pgsize_max(void) { return __inline_mdbx_limits_pgsize_max(); }
return __inline_mdbx_limits_pgsize_max();
}
#endif /* LIBMDBX_NO_EXPORTS_LEGACY_API */ #endif /* LIBMDBX_NO_EXPORTS_LEGACY_API */

View File

@ -93,8 +93,7 @@ __cold static int compacting_toggle_write_buffers(ctx_t *ctx) {
return ctx->error; return ctx->error;
} }
static int compacting_put_bytes(ctx_t *ctx, const void *src, size_t bytes, static int compacting_put_bytes(ctx_t *ctx, const void *src, size_t bytes, pgno_t pgno, pgno_t npages) {
pgno_t pgno, pgno_t npages) {
assert(pgno == 0 || bytes > PAGEHDRSZ); assert(pgno == 0 || bytes > PAGEHDRSZ);
while (bytes > 0) { while (bytes > 0) {
const size_t side = ctx->head & 1; const size_t side = ctx->head & 1;
@ -130,17 +129,14 @@ static int compacting_put_bytes(ctx_t *ctx, const void *src, size_t bytes,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
static int compacting_put_page(ctx_t *ctx, const page_t *mp, static int compacting_put_page(ctx_t *ctx, const page_t *mp, const size_t head_bytes, const size_t tail_bytes,
const size_t head_bytes, const size_t tail_bytes,
const pgno_t npages) { const pgno_t npages) {
if (tail_bytes) { if (tail_bytes) {
assert(head_bytes + tail_bytes <= ctx->env->ps); assert(head_bytes + tail_bytes <= ctx->env->ps);
assert(npages == 1 && assert(npages == 1 && (page_type(mp) == P_BRANCH || page_type(mp) == P_LEAF));
(page_type(mp) == P_BRANCH || page_type(mp) == P_LEAF));
} else { } else {
assert(head_bytes <= pgno2bytes(ctx->env, npages)); assert(head_bytes <= pgno2bytes(ctx->env, npages));
assert((npages == 1 && page_type(mp) == (P_LEAF | P_DUPFIX)) || assert((npages == 1 && page_type(mp) == (P_LEAF | P_DUPFIX)) || page_type(mp) == P_LARGE);
page_type(mp) == P_LARGE);
} }
const pgno_t pgno = ctx->first_unallocated; const pgno_t pgno = ctx->first_unallocated;
@ -148,18 +144,13 @@ static int compacting_put_page(ctx_t *ctx, const page_t *mp,
int err = compacting_put_bytes(ctx, mp, head_bytes, pgno, npages); int err = compacting_put_bytes(ctx, mp, head_bytes, pgno, npages);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
err = compacting_put_bytes( err = compacting_put_bytes(ctx, nullptr, pgno2bytes(ctx->env, npages) - (head_bytes + tail_bytes), 0, 0);
ctx, nullptr, pgno2bytes(ctx->env, npages) - (head_bytes + tail_bytes), 0,
0);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
return compacting_put_bytes(ctx, ptr_disp(mp, ctx->env->ps - tail_bytes), return compacting_put_bytes(ctx, ptr_disp(mp, ctx->env->ps - tail_bytes), tail_bytes, 0, 0);
tail_bytes, 0, 0);
} }
__cold static int compacting_walk(ctx_t *ctx, MDBX_cursor *mc, __cold static int compacting_walk(ctx_t *ctx, MDBX_cursor *mc, pgno_t *const parent_pgno, txnid_t parent_txnid) {
pgno_t *const parent_pgno,
txnid_t parent_txnid) {
mc->top = 0; mc->top = 0;
mc->ki[0] = 0; mc->ki[0] = 0;
int rc = page_get(mc, *parent_pgno, &mc->pg[0], parent_txnid); int rc = page_get(mc, *parent_pgno, &mc->pg[0], parent_txnid);
@ -201,22 +192,18 @@ __cold static int compacting_walk(ctx_t *ctx, MDBX_cursor *mc,
node = page_node(mp, i); node = page_node(mp, i);
} }
const pgr_t lp = const pgr_t lp = page_get_large(mc, node_largedata_pgno(node), mp->txnid);
page_get_large(mc, node_largedata_pgno(node), mp->txnid);
if (unlikely((rc = lp.err) != MDBX_SUCCESS)) if (unlikely((rc = lp.err) != MDBX_SUCCESS))
goto bailout; goto bailout;
const size_t datasize = node_ds(node); const size_t datasize = node_ds(node);
const pgno_t npages = largechunk_npages(ctx->env, datasize); const pgno_t npages = largechunk_npages(ctx->env, datasize);
poke_pgno(node_data(node), ctx->first_unallocated); poke_pgno(node_data(node), ctx->first_unallocated);
rc = compacting_put_page(ctx, lp.page, PAGEHDRSZ + datasize, 0, rc = compacting_put_page(ctx, lp.page, PAGEHDRSZ + datasize, 0, npages);
npages);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
} else if (node_flags(node) & N_TREE) { } else if (node_flags(node) & N_TREE) {
if (!MDBX_DISABLE_VALIDATION && if (!MDBX_DISABLE_VALIDATION && unlikely(node_ds(node) != sizeof(tree_t))) {
unlikely(node_ds(node) != sizeof(tree_t))) { ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid dupsort sub-tree node size",
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED,
"invalid dupsort sub-tree node size",
(unsigned)node_ds(node)); (unsigned)node_ds(node));
rc = MDBX_CORRUPTED; rc = MDBX_CORRUPTED;
goto bailout; goto bailout;
@ -235,13 +222,11 @@ __cold static int compacting_walk(ctx_t *ctx, MDBX_cursor *mc,
rc = cursor_dupsort_setup(mc, node, mp); rc = cursor_dupsort_setup(mc, node, mp);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
nested = &mc->subcur->nested_tree; nested = &mc->subcur->nested_tree;
rc = compacting_walk(ctx, &mc->subcur->cursor, &nested->root, rc = compacting_walk(ctx, &mc->subcur->cursor, &nested->root, mp->txnid);
mp->txnid);
} }
} else { } else {
cASSERT(mc, (mc->flags & z_inner) == 0 && mc->subcur == 0); cASSERT(mc, (mc->flags & z_inner) == 0 && mc->subcur == 0);
cursor_couple_t *couple = cursor_couple_t *couple = container_of(mc, cursor_couple_t, outer);
container_of(mc, cursor_couple_t, outer);
nested = &couple->inner.nested_tree; nested = &couple->inner.nested_tree;
memcpy(nested, node_data(node), sizeof(tree_t)); memcpy(nested, node_data(node), sizeof(tree_t));
rc = compacting_walk_tree(ctx, nested); rc = compacting_walk_tree(ctx, nested);
@ -280,11 +265,9 @@ __cold static int compacting_walk(ctx_t *ctx, MDBX_cursor *mc,
const pgno_t pgno = ctx->first_unallocated; const pgno_t pgno = ctx->first_unallocated;
if (likely(!is_dupfix_leaf(mp))) { if (likely(!is_dupfix_leaf(mp))) {
rc = compacting_put_page(ctx, mp, PAGEHDRSZ + mp->lower, rc = compacting_put_page(ctx, mp, PAGEHDRSZ + mp->lower, ctx->env->ps - (PAGEHDRSZ + mp->upper), 1);
ctx->env->ps - (PAGEHDRSZ + mp->upper), 1);
} else { } else {
rc = compacting_put_page( rc = compacting_put_page(ctx, mp, PAGEHDRSZ + page_numkeys(mp) * mp->dupfix_ksize, 0, 1);
ctx, mp, PAGEHDRSZ + page_numkeys(mp) * mp->dupfix_ksize, 0, 1);
} }
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -326,19 +309,15 @@ __cold static int compacting_walk_tree(ctx_t *ctx, tree_t *tree) {
__cold static void compacting_fixup_meta(MDBX_env *env, meta_t *meta) { __cold static void compacting_fixup_meta(MDBX_env *env, meta_t *meta) {
eASSERT(env, meta->trees.gc.mod_txnid || meta->trees.gc.root == P_INVALID); eASSERT(env, meta->trees.gc.mod_txnid || meta->trees.gc.root == P_INVALID);
eASSERT(env, eASSERT(env, meta->trees.main.mod_txnid || meta->trees.main.root == P_INVALID);
meta->trees.main.mod_txnid || meta->trees.main.root == P_INVALID);
/* Calculate filesize taking in account shrink/growing thresholds */ /* Calculate filesize taking in account shrink/growing thresholds */
if (meta->geometry.first_unallocated != meta->geometry.now) { if (meta->geometry.first_unallocated != meta->geometry.now) {
meta->geometry.now = meta->geometry.first_unallocated; meta->geometry.now = meta->geometry.first_unallocated;
const size_t aligner = const size_t aligner = pv2pages(meta->geometry.grow_pv ? meta->geometry.grow_pv : meta->geometry.shrink_pv);
pv2pages(meta->geometry.grow_pv ? meta->geometry.grow_pv
: meta->geometry.shrink_pv);
if (aligner) { if (aligner) {
const pgno_t aligned = pgno_align2os_pgno( const pgno_t aligned = pgno_align2os_pgno(env, meta->geometry.first_unallocated + aligner -
env, meta->geometry.first_unallocated + aligner - meta->geometry.first_unallocated % aligner);
meta->geometry.first_unallocated % aligner);
meta->geometry.now = aligned; meta->geometry.now = aligned;
} }
} }
@ -366,13 +345,10 @@ __cold static void meta_make_sizeable(meta_t *meta) {
} }
} }
__cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn, __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn, mdbx_filehandle_t fd, uint8_t *buffer,
mdbx_filehandle_t fd, uint8_t *buffer, const bool dest_is_pipe, const MDBX_copy_flags_t flags) {
const bool dest_is_pipe,
const MDBX_copy_flags_t flags) {
const size_t meta_bytes = pgno2bytes(env, NUM_METAS); const size_t meta_bytes = pgno2bytes(env, NUM_METAS);
uint8_t *const data_buffer = uint8_t *const data_buffer = buffer + ceil_powerof2(meta_bytes, globals.sys_pagesize);
buffer + ceil_powerof2(meta_bytes, globals.sys_pagesize);
meta_t *const meta = meta_init_triplet(env, buffer); meta_t *const meta = meta_init_triplet(env, buffer);
meta_set_txnid(env, meta, txn->txnid); meta_set_txnid(env, meta, txn->txnid);
@ -405,22 +381,17 @@ __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn,
int rc = cursor_init(&couple.outer, txn, FREE_DBI); int rc = cursor_init(&couple.outer, txn, FREE_DBI);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
pgno_t gc_npages = txn->dbs[FREE_DBI].branch_pages + pgno_t gc_npages = txn->dbs[FREE_DBI].branch_pages + txn->dbs[FREE_DBI].leaf_pages + txn->dbs[FREE_DBI].large_pages;
txn->dbs[FREE_DBI].leaf_pages +
txn->dbs[FREE_DBI].large_pages;
MDBX_val key, data; MDBX_val key, data;
rc = outer_first(&couple.outer, &key, &data); rc = outer_first(&couple.outer, &key, &data);
while (rc == MDBX_SUCCESS) { while (rc == MDBX_SUCCESS) {
const pnl_t pnl = data.iov_base; const pnl_t pnl = data.iov_base;
if (unlikely(data.iov_len % sizeof(pgno_t) || if (unlikely(data.iov_len % sizeof(pgno_t) || data.iov_len < MDBX_PNL_SIZEOF(pnl))) {
data.iov_len < MDBX_PNL_SIZEOF(pnl))) { ERROR("%s/%d: %s %zu", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-record length", data.iov_len);
ERROR("%s/%d: %s %zu", "MDBX_CORRUPTED", MDBX_CORRUPTED,
"invalid GC-record length", data.iov_len);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
if (unlikely(!pnl_check(pnl, txn->geo.first_unallocated))) { if (unlikely(!pnl_check(pnl, txn->geo.first_unallocated))) {
ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-record content");
"invalid GC-record content");
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
gc_npages += MDBX_PNL_GETSIZE(pnl); gc_npages += MDBX_PNL_GETSIZE(pnl);
@ -466,21 +437,16 @@ __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn,
/* toggle to flush non-empty buffers */ /* toggle to flush non-empty buffers */
compacting_toggle_write_buffers(&ctx); compacting_toggle_write_buffers(&ctx);
if (likely(rc == MDBX_SUCCESS) && if (likely(rc == MDBX_SUCCESS) && unlikely(meta->geometry.first_unallocated != ctx.first_unallocated)) {
unlikely(meta->geometry.first_unallocated != ctx.first_unallocated)) {
if (ctx.first_unallocated > meta->geometry.first_unallocated) { if (ctx.first_unallocated > meta->geometry.first_unallocated) {
ERROR("the source DB %s: post-compactification used pages %" PRIaPGNO ERROR("the source DB %s: post-compactification used pages %" PRIaPGNO " %c expected %" PRIaPGNO,
" %c expected %" PRIaPGNO, "has double-used pages or other corruption", ctx.first_unallocated, '>',
"has double-used pages or other corruption", meta->geometry.first_unallocated);
ctx.first_unallocated, '>', meta->geometry.first_unallocated);
rc = MDBX_CORRUPTED; /* corrupted DB */ rc = MDBX_CORRUPTED; /* corrupted DB */
} }
if (ctx.first_unallocated < meta->geometry.first_unallocated) { if (ctx.first_unallocated < meta->geometry.first_unallocated) {
WARNING( WARNING("the source DB %s: post-compactification used pages %" PRIaPGNO " %c expected %" PRIaPGNO,
"the source DB %s: post-compactification used pages %" PRIaPGNO "has page leak(s)", ctx.first_unallocated, '<', meta->geometry.first_unallocated);
" %c expected %" PRIaPGNO,
"has page leak(s)", ctx.first_unallocated, '<',
meta->geometry.first_unallocated);
if (dest_is_pipe) if (dest_is_pipe)
/* the root within already written meta-pages is wrong */ /* the root within already written meta-pages is wrong */
rc = MDBX_CORRUPTED; rc = MDBX_CORRUPTED;
@ -493,8 +459,7 @@ __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn,
eASSERT(env, (ctx.write_len[ctx.head & 1]) == 0); eASSERT(env, (ctx.write_len[ctx.head & 1]) == 0);
compacting_toggle_write_buffers(&ctx); compacting_toggle_write_buffers(&ctx);
thread_err = osal_thread_join(thread); thread_err = osal_thread_join(thread);
eASSERT(env, (ctx.tail == ctx.head && ctx.write_len[ctx.head & 1] == 0) || eASSERT(env, (ctx.tail == ctx.head && ctx.write_len[ctx.head & 1] == 0) || ctx.error);
ctx.error);
osal_condpair_destroy(&ctx.condpair); osal_condpair_destroy(&ctx.condpair);
} }
if (unlikely(thread_err != MDBX_SUCCESS)) if (unlikely(thread_err != MDBX_SUCCESS))
@ -519,9 +484,8 @@ __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn,
const size_t used_size = pgno2bytes(env, meta->geometry.first_unallocated); const size_t used_size = pgno2bytes(env, meta->geometry.first_unallocated);
memset(data_buffer, 0, (size_t)MDBX_ENVCOPY_WRITEBUF); memset(data_buffer, 0, (size_t)MDBX_ENVCOPY_WRITEBUF);
for (size_t offset = used_size; offset < whole_size;) { for (size_t offset = used_size; offset < whole_size;) {
const size_t chunk = ((size_t)MDBX_ENVCOPY_WRITEBUF < whole_size - offset) const size_t chunk =
? (size_t)MDBX_ENVCOPY_WRITEBUF ((size_t)MDBX_ENVCOPY_WRITEBUF < whole_size - offset) ? (size_t)MDBX_ENVCOPY_WRITEBUF : whole_size - offset;
: whole_size - offset;
int rc = osal_write(fd, data_buffer, chunk); int rc = osal_write(fd, data_buffer, chunk);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
@ -533,9 +497,8 @@ __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn,
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
__cold static int copy_asis(MDBX_env *env, MDBX_txn *txn, mdbx_filehandle_t fd, __cold static int copy_asis(MDBX_env *env, MDBX_txn *txn, mdbx_filehandle_t fd, uint8_t *buffer,
uint8_t *buffer, const bool dest_is_pipe, const bool dest_is_pipe, const MDBX_copy_flags_t flags) {
const MDBX_copy_flags_t flags) {
bool should_unlock = false; bool should_unlock = false;
if ((txn->flags & MDBX_TXN_RDONLY) != 0 && (flags & MDBX_CP_RENEW_TXN) != 0) { if ((txn->flags & MDBX_TXN_RDONLY) != 0 && (flags & MDBX_CP_RENEW_TXN) != 0) {
/* Try temporarily block writers until we snapshot the meta pages */ /* Try temporarily block writers until we snapshot the meta pages */
@ -566,8 +529,7 @@ retry_snap_meta:
rc = MDBX_MVCC_RETARDED; rc = MDBX_MVCC_RETARDED;
for (size_t n = 0; n < NUM_METAS; ++n) { for (size_t n = 0; n < NUM_METAS; ++n) {
meta_t *const meta = page_meta(ptr_disp(buffer, pgno2bytes(env, n))); meta_t *const meta = page_meta(ptr_disp(buffer, pgno2bytes(env, n)));
if (troika.txnid[n] == txn->txnid && if (troika.txnid[n] == txn->txnid && ((/* is_steady */ (troika.fsm >> n) & 1) || rc != MDBX_SUCCESS)) {
((/* is_steady */ (troika.fsm >> n) & 1) || rc != MDBX_SUCCESS)) {
rc = MDBX_SUCCESS; rc = MDBX_SUCCESS;
headcopy = meta; headcopy = meta;
} else if (troika.txnid[n] > txn->txnid) } else if (troika.txnid[n] > txn->txnid)
@ -606,14 +568,12 @@ retry_snap_meta:
if (dest_is_pipe) if (dest_is_pipe)
rc = osal_write(fd, buffer, meta_bytes); rc = osal_write(fd, buffer, meta_bytes);
uint8_t *const data_buffer = uint8_t *const data_buffer = buffer + ceil_powerof2(meta_bytes, globals.sys_pagesize);
buffer + ceil_powerof2(meta_bytes, globals.sys_pagesize);
#if MDBX_USE_COPYFILERANGE #if MDBX_USE_COPYFILERANGE
static bool copyfilerange_unavailable; static bool copyfilerange_unavailable;
bool not_the_same_filesystem = false; bool not_the_same_filesystem = false;
struct statfs statfs_info; struct statfs statfs_info;
if (fstatfs(fd, &statfs_info) || if (fstatfs(fd, &statfs_info) || statfs_info.f_type == /* ECRYPTFS_SUPER_MAGIC */ 0xf15f)
statfs_info.f_type == /* ECRYPTFS_SUPER_MAGIC */ 0xf15f)
/* avoid use copyfilerange_unavailable() to ecryptfs due bugs */ /* avoid use copyfilerange_unavailable() to ecryptfs due bugs */
not_the_same_filesystem = true; not_the_same_filesystem = true;
#endif /* MDBX_USE_COPYFILERANGE */ #endif /* MDBX_USE_COPYFILERANGE */
@ -629,8 +589,7 @@ retry_snap_meta:
static bool sendfile_unavailable; static bool sendfile_unavailable;
if (dest_is_pipe && likely(!sendfile_unavailable)) { if (dest_is_pipe && likely(!sendfile_unavailable)) {
off_t in_offset = offset; off_t in_offset = offset;
const ssize_t written = const ssize_t written = sendfile(fd, env->lazy_fd, &in_offset, used_size - offset);
sendfile(fd, env->lazy_fd, &in_offset, used_size - offset);
if (likely(written > 0)) { if (likely(written > 0)) {
offset = in_offset; offset = in_offset;
if (flags & MDBX_CP_THROTTLE_MVCC) if (flags & MDBX_CP_THROTTLE_MVCC)
@ -645,11 +604,9 @@ retry_snap_meta:
#endif /* MDBX_USE_SENDFILE */ #endif /* MDBX_USE_SENDFILE */
#if MDBX_USE_COPYFILERANGE #if MDBX_USE_COPYFILERANGE
if (!dest_is_pipe && !not_the_same_filesystem && if (!dest_is_pipe && !not_the_same_filesystem && likely(!copyfilerange_unavailable)) {
likely(!copyfilerange_unavailable)) {
off_t in_offset = offset, out_offset = offset; off_t in_offset = offset, out_offset = offset;
ssize_t bytes_copied = copy_file_range( ssize_t bytes_copied = copy_file_range(env->lazy_fd, &in_offset, fd, &out_offset, used_size - offset, 0);
env->lazy_fd, &in_offset, fd, &out_offset, used_size - offset, 0);
if (likely(bytes_copied > 0)) { if (likely(bytes_copied > 0)) {
offset = in_offset; offset = in_offset;
if (flags & MDBX_CP_THROTTLE_MVCC) if (flags & MDBX_CP_THROTTLE_MVCC)
@ -672,9 +629,8 @@ retry_snap_meta:
#endif /* MDBX_USE_COPYFILERANGE */ #endif /* MDBX_USE_COPYFILERANGE */
/* fallback to portable */ /* fallback to portable */
const size_t chunk = ((size_t)MDBX_ENVCOPY_WRITEBUF < used_size - offset) const size_t chunk =
? (size_t)MDBX_ENVCOPY_WRITEBUF ((size_t)MDBX_ENVCOPY_WRITEBUF < used_size - offset) ? (size_t)MDBX_ENVCOPY_WRITEBUF : used_size - offset;
: used_size - offset;
/* copy to avoid EFAULT in case swapped-out */ /* copy to avoid EFAULT in case swapped-out */
memcpy(data_buffer, ptr_disp(env->dxb_mmap.base, offset), chunk); memcpy(data_buffer, ptr_disp(env->dxb_mmap.base, offset), chunk);
if (flags & MDBX_CP_THROTTLE_MVCC) if (flags & MDBX_CP_THROTTLE_MVCC)
@ -689,12 +645,9 @@ retry_snap_meta:
rc = osal_ftruncate(fd, whole_size); rc = osal_ftruncate(fd, whole_size);
else { else {
memset(data_buffer, 0, (size_t)MDBX_ENVCOPY_WRITEBUF); memset(data_buffer, 0, (size_t)MDBX_ENVCOPY_WRITEBUF);
for (size_t offset = used_size; for (size_t offset = used_size; rc == MDBX_SUCCESS && offset < whole_size;) {
rc == MDBX_SUCCESS && offset < whole_size;) {
const size_t chunk = const size_t chunk =
((size_t)MDBX_ENVCOPY_WRITEBUF < whole_size - offset) ((size_t)MDBX_ENVCOPY_WRITEBUF < whole_size - offset) ? (size_t)MDBX_ENVCOPY_WRITEBUF : whole_size - offset;
? (size_t)MDBX_ENVCOPY_WRITEBUF
: whole_size - offset;
rc = osal_write(fd, data_buffer, chunk); rc = osal_write(fd, data_buffer, chunk);
offset += chunk; offset += chunk;
} }
@ -706,8 +659,7 @@ retry_snap_meta:
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
__cold static int copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd, __cold static int copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
if (unlikely(txn->flags & MDBX_TXN_DIRTY)) if (unlikely(txn->flags & MDBX_TXN_DIRTY))
return MDBX_BAD_TXN; return MDBX_BAD_TXN;
@ -734,9 +686,7 @@ __cold static int copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd,
MDBX_env *const env = txn->env; MDBX_env *const env = txn->env;
const size_t buffer_size = const size_t buffer_size =
pgno_align2os_bytes(env, NUM_METAS) + pgno_align2os_bytes(env, NUM_METAS) +
ceil_powerof2(((flags & MDBX_CP_COMPACT) ceil_powerof2(((flags & MDBX_CP_COMPACT) ? 2 * (size_t)MDBX_ENVCOPY_WRITEBUF : (size_t)MDBX_ENVCOPY_WRITEBUF),
? 2 * (size_t)MDBX_ENVCOPY_WRITEBUF
: (size_t)MDBX_ENVCOPY_WRITEBUF),
globals.sys_pagesize); globals.sys_pagesize);
uint8_t *buffer = nullptr; uint8_t *buffer = nullptr;
@ -755,8 +705,7 @@ __cold static int copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd,
rc = mdbx_txn_unpark(txn, false); rc = mdbx_txn_unpark(txn, false);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
memset(buffer, 0, pgno2bytes(env, NUM_METAS)); memset(buffer, 0, pgno2bytes(env, NUM_METAS));
rc = ((flags & MDBX_CP_COMPACT) ? copy_with_compacting : copy_asis)( rc = ((flags & MDBX_CP_COMPACT) ? copy_with_compacting : copy_asis)(env, txn, fd, buffer, dest_is_pipe, flags);
env, txn, fd, buffer, dest_is_pipe, flags);
if (likely(rc == MDBX_SUCCESS)) if (likely(rc == MDBX_SUCCESS))
rc = mdbx_txn_unpark(txn, false); rc = mdbx_txn_unpark(txn, false);
@ -785,8 +734,7 @@ __cold static int copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd,
return rc; return rc;
} }
__cold static int copy2pathname(MDBX_txn *txn, const pathchar_t *dest_path, __cold static int copy2pathname(MDBX_txn *txn, const pathchar_t *dest_path, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
if (unlikely(!dest_path || *dest_path == '\0')) if (unlikely(!dest_path || *dest_path == '\0'))
return MDBX_EINVAL; return MDBX_EINVAL;
@ -813,7 +761,7 @@ __cold static int copy2pathname(MDBX_txn *txn, const pathchar_t *dest_path,
lock_op.l_start = 0; lock_op.l_start = 0;
lock_op.l_len = OFF_T_MAX; lock_op.l_len = OFF_T_MAX;
if (MDBX_FCNTL(newfd, MDBX_F_SETLK, &lock_op) if (MDBX_FCNTL(newfd, MDBX_F_SETLK, &lock_op)
#if (defined(__linux__) || defined(__gnu_linux__)) && defined(LOCK_EX) && \ #if (defined(__linux__) || defined(__gnu_linux__)) && defined(LOCK_EX) && \
(!defined(__ANDROID_API__) || __ANDROID_API__ >= 24) (!defined(__ANDROID_API__) || __ANDROID_API__ >= 24)
|| flock(newfd, LOCK_EX | LOCK_NB) || flock(newfd, LOCK_EX | LOCK_NB)
#endif /* Linux */ #endif /* Linux */
@ -837,8 +785,7 @@ __cold static int copy2pathname(MDBX_txn *txn, const pathchar_t *dest_path,
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
__cold int mdbx_txn_copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd, __cold int mdbx_txn_copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
if (likely(rc == MDBX_SUCCESS)) if (likely(rc == MDBX_SUCCESS))
rc = copy2fd(txn, fd, flags); rc = copy2fd(txn, fd, flags);
@ -847,8 +794,7 @@ __cold int mdbx_txn_copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_env_copy2fd(MDBX_env *env, mdbx_filehandle_t fd, __cold int mdbx_env_copy2fd(MDBX_env *env, mdbx_filehandle_t fd, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
if (unlikely(flags & (MDBX_CP_DISPOSE_TXN | MDBX_CP_RENEW_TXN))) if (unlikely(flags & (MDBX_CP_DISPOSE_TXN | MDBX_CP_RENEW_TXN)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -866,8 +812,7 @@ __cold int mdbx_env_copy2fd(MDBX_env *env, mdbx_filehandle_t fd,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_txn_copy2pathname(MDBX_txn *txn, const char *dest_path, __cold int mdbx_txn_copy2pathname(MDBX_txn *txn, const char *dest_path, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
wchar_t *dest_pathW = nullptr; wchar_t *dest_pathW = nullptr;
int rc = osal_mb2w(dest_path, &dest_pathW); int rc = osal_mb2w(dest_path, &dest_pathW);
@ -878,8 +823,7 @@ __cold int mdbx_txn_copy2pathname(MDBX_txn *txn, const char *dest_path,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_txn_copy2pathnameW(MDBX_txn *txn, const wchar_t *dest_path, __cold int mdbx_txn_copy2pathnameW(MDBX_txn *txn, const wchar_t *dest_path, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
#endif /* Windows */ #endif /* Windows */
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
if (likely(rc == MDBX_SUCCESS)) if (likely(rc == MDBX_SUCCESS))
@ -889,8 +833,7 @@ __cold int mdbx_txn_copy2pathnameW(MDBX_txn *txn, const wchar_t *dest_path,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_env_copy(MDBX_env *env, const char *dest_path, __cold int mdbx_env_copy(MDBX_env *env, const char *dest_path, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
wchar_t *dest_pathW = nullptr; wchar_t *dest_pathW = nullptr;
int rc = osal_mb2w(dest_path, &dest_pathW); int rc = osal_mb2w(dest_path, &dest_pathW);
@ -901,8 +844,7 @@ __cold int mdbx_env_copy(MDBX_env *env, const char *dest_path,
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold int mdbx_env_copyW(MDBX_env *env, const wchar_t *dest_path, __cold int mdbx_env_copyW(MDBX_env *env, const wchar_t *dest_path, MDBX_copy_flags_t flags) {
MDBX_copy_flags_t flags) {
#endif /* Windows */ #endif /* Windows */
if (unlikely(flags & (MDBX_CP_DISPOSE_TXN | MDBX_CP_RENEW_TXN))) if (unlikely(flags & (MDBX_CP_DISPOSE_TXN | MDBX_CP_RENEW_TXN)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -916,8 +858,7 @@ __cold int mdbx_env_copyW(MDBX_env *env, const wchar_t *dest_path,
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
rc = copy2pathname(txn, dest_path, rc = copy2pathname(txn, dest_path, flags | MDBX_CP_DISPOSE_TXN | MDBX_CP_RENEW_TXN);
flags | MDBX_CP_DISPOSE_TXN | MDBX_CP_RENEW_TXN);
mdbx_txn_abort(txn); mdbx_txn_abort(txn);
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }

View File

@ -11,14 +11,11 @@ __cold int cursor_check(const MDBX_cursor *mc) {
} else { } else {
cASSERT(mc, (mc->txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); cASSERT(mc, (mc->txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
cASSERT(mc, mc->txn->tw.dirtyroom + mc->txn->tw.dirtylist->length == cASSERT(mc, mc->txn->tw.dirtyroom + mc->txn->tw.dirtylist->length ==
(mc->txn->parent ? mc->txn->parent->tw.dirtyroom (mc->txn->parent ? mc->txn->parent->tw.dirtyroom : mc->txn->env->options.dp_limit));
: mc->txn->env->options.dp_limit));
} }
cASSERT(mc, (mc->checking & z_updating) ? mc->top + 1 <= mc->tree->height cASSERT(mc, (mc->checking & z_updating) ? mc->top + 1 <= mc->tree->height : mc->top + 1 == mc->tree->height);
: mc->top + 1 == mc->tree->height); if (unlikely((mc->checking & z_updating) ? mc->top + 1 > mc->tree->height : mc->top + 1 != mc->tree->height))
if (unlikely((mc->checking & z_updating) ? mc->top + 1 > mc->tree->height
: mc->top + 1 != mc->tree->height))
return MDBX_CURSOR_FULL; return MDBX_CURSOR_FULL;
if (is_pointed(mc) && (mc->checking & z_updating) == 0) { if (is_pointed(mc) && (mc->checking & z_updating) == 0) {
@ -40,17 +37,14 @@ __cold int cursor_check(const MDBX_cursor *mc) {
page_t *mp = mc->pg[n]; page_t *mp = mc->pg[n];
const size_t nkeys = page_numkeys(mp); const size_t nkeys = page_numkeys(mp);
const bool expect_branch = (n < mc->tree->height - 1) ? true : false; const bool expect_branch = (n < mc->tree->height - 1) ? true : false;
const bool expect_nested_leaf = const bool expect_nested_leaf = (n + 1 == mc->tree->height - 1) ? true : false;
(n + 1 == mc->tree->height - 1) ? true : false;
const bool branch = is_branch(mp) ? true : false; const bool branch = is_branch(mp) ? true : false;
cASSERT(mc, branch == expect_branch); cASSERT(mc, branch == expect_branch);
if (unlikely(branch != expect_branch)) if (unlikely(branch != expect_branch))
return MDBX_CURSOR_FULL; return MDBX_CURSOR_FULL;
if ((mc->checking & z_updating) == 0) { if ((mc->checking & z_updating) == 0) {
cASSERT(mc, nkeys > mc->ki[n] || (!branch && nkeys == mc->ki[n] && cASSERT(mc, nkeys > mc->ki[n] || (!branch && nkeys == mc->ki[n] && (mc->flags & z_hollow) != 0));
(mc->flags & z_hollow) != 0)); if (unlikely(nkeys <= mc->ki[n] && !(!branch && nkeys == mc->ki[n] && (mc->flags & z_hollow) != 0)))
if (unlikely(nkeys <= mc->ki[n] && !(!branch && nkeys == mc->ki[n] &&
(mc->flags & z_hollow) != 0)))
return MDBX_CURSOR_FULL; return MDBX_CURSOR_FULL;
} else { } else {
cASSERT(mc, nkeys + 1 >= mc->ki[n]); cASSERT(mc, nkeys + 1 >= mc->ki[n]);
@ -96,8 +90,7 @@ __cold int cursor_check_updating(MDBX_cursor *mc) {
} }
bool cursor_is_tracked(const MDBX_cursor *mc) { bool cursor_is_tracked(const MDBX_cursor *mc) {
for (MDBX_cursor *scan = mc->txn->cursors[cursor_dbi(mc)]; scan; for (MDBX_cursor *scan = mc->txn->cursors[cursor_dbi(mc)]; scan; scan = scan->next)
scan = scan->next)
if (mc == ((mc->flags & z_inner) ? &scan->subcur->cursor : scan)) if (mc == ((mc->flags & z_inner) ? &scan->subcur->cursor : scan))
return true; return true;
return false; return false;
@ -121,16 +114,14 @@ static int touch_dbi(MDBX_cursor *mc) {
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
mc->txn->dbi_state[MAIN_DBI] |= DBI_DIRTY; mc->txn->dbi_state[MAIN_DBI] |= DBI_DIRTY;
rc = tree_search(&cx.outer, &container_of(mc->clc, kvx_t, clc)->name, rc = tree_search(&cx.outer, &container_of(mc->clc, kvx_t, clc)->name, Z_MODIFY);
Z_MODIFY);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot int cursor_touch(MDBX_cursor *const mc, const MDBX_val *key, __hot int cursor_touch(MDBX_cursor *const mc, const MDBX_val *key, const MDBX_val *data) {
const MDBX_val *data) {
cASSERT(mc, (mc->txn->flags & MDBX_TXN_RDONLY) == 0); cASSERT(mc, (mc->txn->flags & MDBX_TXN_RDONLY) == 0);
cASSERT(mc, is_pointed(mc) || mc->tree->height == 0); cASSERT(mc, is_pointed(mc) || mc->tree->height == 0);
cASSERT(mc, cursor_is_tracked(mc)); cASSERT(mc, cursor_is_tracked(mc));
@ -193,13 +184,10 @@ __hot int cursor_touch(MDBX_cursor *const mc, const MDBX_val *key,
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
int cursor_shadow(MDBX_cursor *parent_cursor, MDBX_txn *nested_txn, int cursor_shadow(MDBX_cursor *parent_cursor, MDBX_txn *nested_txn, const size_t dbi) {
const size_t dbi) {
tASSERT(nested_txn, dbi > FREE_DBI && dbi < nested_txn->n_dbi); tASSERT(nested_txn, dbi > FREE_DBI && dbi < nested_txn->n_dbi);
const size_t size = parent_cursor->subcur const size_t size = parent_cursor->subcur ? sizeof(MDBX_cursor) + sizeof(subcur_t) : sizeof(MDBX_cursor);
? sizeof(MDBX_cursor) + sizeof(subcur_t)
: sizeof(MDBX_cursor);
for (MDBX_cursor *bk; parent_cursor; parent_cursor = bk->next) { for (MDBX_cursor *bk; parent_cursor; parent_cursor = bk->next) {
cASSERT(parent_cursor, parent_cursor != parent_cursor->next); cASSERT(parent_cursor, parent_cursor != parent_cursor->next);
bk = parent_cursor; bk = parent_cursor;
@ -235,8 +223,7 @@ int cursor_shadow(MDBX_cursor *parent_cursor, MDBX_txn *nested_txn,
void cursor_eot(MDBX_cursor *mc, const bool merge) { void cursor_eot(MDBX_cursor *mc, const bool merge) {
const unsigned stage = mc->signature; const unsigned stage = mc->signature;
MDBX_cursor *const bk = mc->backup; MDBX_cursor *const bk = mc->backup;
ENSURE(mc->txn->env, stage == cur_signature_live || ENSURE(mc->txn->env, stage == cur_signature_live || (stage == cur_signature_wait4eot && bk));
(stage == cur_signature_wait4eot && bk));
if (bk) { if (bk) {
subcur_t *mx = mc->subcur; subcur_t *mx = mc->subcur;
cASSERT(mc, mc->txn->parent != nullptr); cASSERT(mc, mc->txn->parent != nullptr);
@ -274,10 +261,8 @@ void cursor_eot(MDBX_cursor *mc, const bool merge) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
static __always_inline int couple_init(cursor_couple_t *couple, static __always_inline int couple_init(cursor_couple_t *couple, const MDBX_txn *const txn, tree_t *const tree,
const MDBX_txn *const txn, kvx_t *const kvx, uint8_t *const dbi_state) {
tree_t *const tree, kvx_t *const kvx,
uint8_t *const dbi_state) {
VALGRIND_MAKE_MEM_UNDEFINED(couple, sizeof(cursor_couple_t)); VALGRIND_MAKE_MEM_UNDEFINED(couple, sizeof(cursor_couple_t));
tASSERT(txn, F_ISSET(*dbi_state, DBI_VALID | DBI_LINDO)); tASSERT(txn, F_ISSET(*dbi_state, DBI_VALID | DBI_LINDO));
@ -290,12 +275,9 @@ static __always_inline int couple_init(cursor_couple_t *couple,
couple->outer.clc = &kvx->clc; couple->outer.clc = &kvx->clc;
couple->outer.dbi_state = dbi_state; couple->outer.dbi_state = dbi_state;
couple->outer.top_and_flags = z_fresh_mark; couple->outer.top_and_flags = z_fresh_mark;
STATIC_ASSERT((int)z_branch == P_BRANCH && (int)z_leaf == P_LEAF && STATIC_ASSERT((int)z_branch == P_BRANCH && (int)z_leaf == P_LEAF && (int)z_largepage == P_LARGE &&
(int)z_largepage == P_LARGE && (int)z_dupfix == P_DUPFIX); (int)z_dupfix == P_DUPFIX);
couple->outer.checking = couple->outer.checking = (AUDIT_ENABLED() || (txn->env->flags & MDBX_VALIDATION)) ? z_pagecheck | z_leaf : z_leaf;
(AUDIT_ENABLED() || (txn->env->flags & MDBX_VALIDATION))
? z_pagecheck | z_leaf
: z_leaf;
couple->outer.subcur = nullptr; couple->outer.subcur = nullptr;
if (tree->flags & MDBX_DUPSORT) { if (tree->flags & MDBX_DUPSORT) {
@ -310,8 +292,7 @@ static __always_inline int couple_init(cursor_couple_t *couple,
mx->cursor.dbi_state = dbi_state; mx->cursor.dbi_state = dbi_state;
mx->cursor.top_and_flags = z_fresh_mark | z_inner; mx->cursor.top_and_flags = z_fresh_mark | z_inner;
STATIC_ASSERT(MDBX_DUPFIXED * 2 == P_DUPFIX); STATIC_ASSERT(MDBX_DUPFIXED * 2 == P_DUPFIX);
mx->cursor.checking = mx->cursor.checking = couple->outer.checking + ((tree->flags & MDBX_DUPFIXED) << 1);
couple->outer.checking + ((tree->flags & MDBX_DUPFIXED) << 1);
} }
if (unlikely(*dbi_state & DBI_STALE)) if (unlikely(*dbi_state & DBI_STALE))
@ -323,8 +304,7 @@ static __always_inline int couple_init(cursor_couple_t *couple,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold int cursor_init4walk(cursor_couple_t *couple, const MDBX_txn *const txn, __cold int cursor_init4walk(cursor_couple_t *couple, const MDBX_txn *const txn, tree_t *const tree, kvx_t *const kvx) {
tree_t *const tree, kvx_t *const kvx) {
return couple_init(couple, txn, tree, kvx, txn->dbi_state); return couple_init(couple, txn, tree, kvx, txn->dbi_state);
} }
@ -332,21 +312,19 @@ int cursor_init(MDBX_cursor *mc, const MDBX_txn *txn, size_t dbi) {
STATIC_ASSERT(offsetof(cursor_couple_t, outer) == 0); STATIC_ASSERT(offsetof(cursor_couple_t, outer) == 0);
int rc = dbi_check(txn, dbi); int rc = dbi_check(txn, dbi);
if (likely(rc == MDBX_SUCCESS)) if (likely(rc == MDBX_SUCCESS))
rc = couple_init(container_of(mc, cursor_couple_t, outer), txn, rc = couple_init(container_of(mc, cursor_couple_t, outer), txn, &txn->dbs[dbi], &txn->env->kvs[dbi],
&txn->dbs[dbi], &txn->env->kvs[dbi], &txn->dbi_state[dbi]); &txn->dbi_state[dbi]);
return rc; return rc;
} }
__cold static int unexpected_dupsort(MDBX_cursor *mc) { __cold static int unexpected_dupsort(MDBX_cursor *mc) {
ERROR("unexpected dupsort-page/node for non-dupsort db/cursor (dbi %zu)", ERROR("unexpected dupsort-page/node for non-dupsort db/cursor (dbi %zu)", cursor_dbi(mc));
cursor_dbi(mc));
mc->txn->flags |= MDBX_TXN_ERROR; mc->txn->flags |= MDBX_TXN_ERROR;
be_poor(mc); be_poor(mc);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
int cursor_dupsort_setup(MDBX_cursor *mc, const node_t *node, int cursor_dupsort_setup(MDBX_cursor *mc, const node_t *node, const page_t *mp) {
const page_t *mp) {
cASSERT(mc, is_pointed(mc)); cASSERT(mc, is_pointed(mc));
subcur_t *mx = mc->subcur; subcur_t *mx = mc->subcur;
if (!MDBX_DISABLE_VALIDATION && unlikely(mx == nullptr)) if (!MDBX_DISABLE_VALIDATION && unlikely(mx == nullptr))
@ -359,16 +337,13 @@ int cursor_dupsort_setup(MDBX_cursor *mc, const node_t *node,
goto bailout; goto bailout;
case N_DUP | N_TREE: case N_DUP | N_TREE:
if (!MDBX_DISABLE_VALIDATION && unlikely(node_ds(node) != sizeof(tree_t))) { if (!MDBX_DISABLE_VALIDATION && unlikely(node_ds(node) != sizeof(tree_t))) {
ERROR("invalid nested-db record size (%zu, expect %zu)", node_ds(node), ERROR("invalid nested-db record size (%zu, expect %zu)", node_ds(node), sizeof(tree_t));
sizeof(tree_t));
goto bailout; goto bailout;
} }
memcpy(&mx->nested_tree, node_data(node), sizeof(tree_t)); memcpy(&mx->nested_tree, node_data(node), sizeof(tree_t));
const txnid_t pp_txnid = mp->txnid; const txnid_t pp_txnid = mp->txnid;
if (!MDBX_DISABLE_VALIDATION && if (!MDBX_DISABLE_VALIDATION && unlikely(mx->nested_tree.mod_txnid > pp_txnid)) {
unlikely(mx->nested_tree.mod_txnid > pp_txnid)) { ERROR("nested-db.mod_txnid (%" PRIaTXN ") > page-txnid (%" PRIaTXN ")", mx->nested_tree.mod_txnid, pp_txnid);
ERROR("nested-db.mod_txnid (%" PRIaTXN ") > page-txnid (%" PRIaTXN ")",
mx->nested_tree.mod_txnid, pp_txnid);
goto bailout; goto bailout;
} }
mx->cursor.top_and_flags = z_fresh_mark | z_inner; mx->cursor.top_and_flags = z_fresh_mark | z_inner;
@ -390,25 +365,21 @@ int cursor_dupsort_setup(MDBX_cursor *mc, const node_t *node,
mx->cursor.pg[0] = sp; mx->cursor.pg[0] = sp;
mx->cursor.ki[0] = 0; mx->cursor.ki[0] = 0;
mx->nested_tree.flags = flags_db2sub(mc->tree->flags); mx->nested_tree.flags = flags_db2sub(mc->tree->flags);
mx->nested_tree.dupfix_size = mx->nested_tree.dupfix_size = (mc->tree->flags & MDBX_DUPFIXED) ? sp->dupfix_ksize : 0;
(mc->tree->flags & MDBX_DUPFIXED) ? sp->dupfix_ksize : 0;
break; break;
} }
if (unlikely(mx->nested_tree.dupfix_size != mc->tree->dupfix_size)) { if (unlikely(mx->nested_tree.dupfix_size != mc->tree->dupfix_size)) {
if (!MDBX_DISABLE_VALIDATION && unlikely(mc->tree->dupfix_size != 0)) { if (!MDBX_DISABLE_VALIDATION && unlikely(mc->tree->dupfix_size != 0)) {
ERROR("cursor mismatched nested-db dupfix_size %u", ERROR("cursor mismatched nested-db dupfix_size %u", mc->tree->dupfix_size);
mc->tree->dupfix_size);
goto bailout; goto bailout;
} }
if (!MDBX_DISABLE_VALIDATION && if (!MDBX_DISABLE_VALIDATION && unlikely((mc->tree->flags & MDBX_DUPFIXED) == 0)) {
unlikely((mc->tree->flags & MDBX_DUPFIXED) == 0)) {
ERROR("mismatched nested-db flags %u", mc->tree->flags); ERROR("mismatched nested-db flags %u", mc->tree->flags);
goto bailout; goto bailout;
} }
if (!MDBX_DISABLE_VALIDATION && if (!MDBX_DISABLE_VALIDATION &&
unlikely(mx->nested_tree.dupfix_size < mc->clc->v.lmin || unlikely(mx->nested_tree.dupfix_size < mc->clc->v.lmin || mx->nested_tree.dupfix_size > mc->clc->v.lmax)) {
mx->nested_tree.dupfix_size > mc->clc->v.lmax)) {
ERROR("mismatched nested-db.dupfix_size (%u) <> min/max value-length " ERROR("mismatched nested-db.dupfix_size (%u) <> min/max value-length "
"(%zu/%zu)", "(%zu/%zu)",
mx->nested_tree.dupfix_size, mc->clc->v.lmin, mc->clc->v.lmax); mx->nested_tree.dupfix_size, mc->clc->v.lmin, mc->clc->v.lmax);
@ -418,8 +389,7 @@ int cursor_dupsort_setup(MDBX_cursor *mc, const node_t *node,
mc->clc->v.lmin = mc->clc->v.lmax = mx->nested_tree.dupfix_size; mc->clc->v.lmin = mc->clc->v.lmax = mx->nested_tree.dupfix_size;
} }
DEBUG("Sub-db dbi -%zu root page %" PRIaPGNO, cursor_dbi(&mx->cursor), DEBUG("Sub-db dbi -%zu root page %" PRIaPGNO, cursor_dbi(&mx->cursor), mx->nested_tree.root);
mx->nested_tree.root);
return MDBX_SUCCESS; return MDBX_SUCCESS;
bailout: bailout:
@ -450,14 +420,11 @@ static __always_inline int sibling(MDBX_cursor *mc, bool right) {
} }
cursor_pop(mc); cursor_pop(mc);
DEBUG("parent page is page %" PRIaPGNO ", index %u", mc->pg[mc->top]->pgno, DEBUG("parent page is page %" PRIaPGNO ", index %u", mc->pg[mc->top]->pgno, mc->ki[mc->top]);
mc->ki[mc->top]);
int err; int err;
if (right ? (mc->ki[mc->top] + (size_t)1 >= page_numkeys(mc->pg[mc->top])) if (right ? (mc->ki[mc->top] + (size_t)1 >= page_numkeys(mc->pg[mc->top])) : (mc->ki[mc->top] == 0)) {
: (mc->ki[mc->top] == 0)) { DEBUG("no more keys aside, moving to next %s sibling", right ? "right" : "left");
DEBUG("no more keys aside, moving to next %s sibling",
right ? "right" : "left");
err = right ? cursor_sibling_right(mc) : cursor_sibling_left(mc); err = right ? cursor_sibling_right(mc) : cursor_sibling_left(mc);
if (err != MDBX_SUCCESS) { if (err != MDBX_SUCCESS) {
if (likely(err == MDBX_NOTFOUND)) if (likely(err == MDBX_NOTFOUND))
@ -467,8 +434,7 @@ static __always_inline int sibling(MDBX_cursor *mc, bool right) {
} }
} else { } else {
mc->ki[mc->top] += right ? 1 : -1; mc->ki[mc->top] += right ? 1 : -1;
DEBUG("just moving to %s index key %u", right ? "right" : "left", DEBUG("just moving to %s index key %u", right ? "right" : "left", mc->ki[mc->top]);
mc->ki[mc->top]);
} }
cASSERT(mc, is_branch(mc->pg[mc->top])); cASSERT(mc, is_branch(mc->pg[mc->top]));
@ -515,10 +481,8 @@ __hot int cursor_sibling_right(MDBX_cursor *mc) {
/* Функция-шаблон: Приземляет курсор на данные в текущей позиции. /* Функция-шаблон: Приземляет курсор на данные в текущей позиции.
* В том числе, загружает данные во вложенный курсор при его наличии. */ * В том числе, загружает данные во вложенный курсор при его наличии. */
static __always_inline int cursor_bring(const bool inner, const bool tend2first, static __always_inline int cursor_bring(const bool inner, const bool tend2first, MDBX_cursor *__restrict mc,
MDBX_cursor *__restrict mc, MDBX_val *__restrict key, MDBX_val *__restrict data, bool eof) {
MDBX_val *__restrict key,
MDBX_val *__restrict data, bool eof) {
if (inner) { if (inner) {
cASSERT(mc, !data && !mc->subcur && (mc->flags & z_inner) != 0); cASSERT(mc, !data && !mc->subcur && (mc->flags & z_inner) != 0);
} else { } else {
@ -527,8 +491,7 @@ static __always_inline int cursor_bring(const bool inner, const bool tend2first,
const page_t *mp = mc->pg[mc->top]; const page_t *mp = mc->pg[mc->top];
if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) { if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) {
ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", mp->pgno, mp->flags);
mp->pgno, mp->flags);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
@ -554,8 +517,7 @@ static __always_inline int cursor_bring(const bool inner, const bool tend2first,
return err; return err;
MDBX_ANALYSIS_ASSUME(mc->subcur != nullptr); MDBX_ANALYSIS_ASSUME(mc->subcur != nullptr);
if (node_flags(node) & N_TREE) { if (node_flags(node) & N_TREE) {
err = tend2first ? inner_first(&mc->subcur->cursor, data) err = tend2first ? inner_first(&mc->subcur->cursor, data) : inner_last(&mc->subcur->cursor, data);
: inner_last(&mc->subcur->cursor, data);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
} else { } else {
@ -592,10 +554,8 @@ static __always_inline int cursor_bring(const bool inner, const bool tend2first,
} }
/* Функция-шаблон: Устанавливает курсор в начало или конец. */ /* Функция-шаблон: Устанавливает курсор в начало или конец. */
static __always_inline int cursor_brim(const bool inner, const bool tend2first, static __always_inline int cursor_brim(const bool inner, const bool tend2first, MDBX_cursor *__restrict mc,
MDBX_cursor *__restrict mc, MDBX_val *__restrict key, MDBX_val *__restrict data) {
MDBX_val *__restrict key,
MDBX_val *__restrict data) {
if (mc->top != 0) { if (mc->top != 0) {
int err = tree_search(mc, nullptr, tend2first ? Z_FIRST : Z_LAST); int err = tree_search(mc, nullptr, tend2first ? Z_FIRST : Z_LAST);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
@ -607,13 +567,9 @@ static __always_inline int cursor_brim(const bool inner, const bool tend2first,
return cursor_bring(inner, tend2first, mc, key, data, !tend2first); return cursor_bring(inner, tend2first, mc, key, data, !tend2first);
} }
__hot int inner_first(MDBX_cursor *mc, MDBX_val *data) { __hot int inner_first(MDBX_cursor *mc, MDBX_val *data) { return cursor_brim(true, true, mc, data, nullptr); }
return cursor_brim(true, true, mc, data, nullptr);
}
__hot int inner_last(MDBX_cursor *mc, MDBX_val *data) { __hot int inner_last(MDBX_cursor *mc, MDBX_val *data) { return cursor_brim(true, false, mc, data, nullptr); }
return cursor_brim(true, false, mc, data, nullptr);
}
__hot int outer_first(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data) { __hot int outer_first(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data) {
return cursor_brim(false, true, mc, key, data); return cursor_brim(false, true, mc, key, data);
@ -627,23 +583,18 @@ __hot int outer_last(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data) {
/* Функция-шаблон: Передвигает курсор на одну позицию. /* Функция-шаблон: Передвигает курсор на одну позицию.
* При необходимости управляет вложенным курсором. */ * При необходимости управляет вложенным курсором. */
static __always_inline int cursor_step(const bool inner, const bool forward, static __always_inline int cursor_step(const bool inner, const bool forward, MDBX_cursor *__restrict mc,
MDBX_cursor *__restrict mc, MDBX_val *__restrict key, MDBX_val *__restrict data, MDBX_cursor_op op) {
MDBX_val *__restrict key,
MDBX_val *__restrict data,
MDBX_cursor_op op) {
if (forward) { if (forward) {
if (inner) if (inner)
cASSERT(mc, op == MDBX_NEXT); cASSERT(mc, op == MDBX_NEXT);
else else
cASSERT(mc, cASSERT(mc, op == MDBX_NEXT || op == MDBX_NEXT_DUP || op == MDBX_NEXT_NODUP);
op == MDBX_NEXT || op == MDBX_NEXT_DUP || op == MDBX_NEXT_NODUP);
} else { } else {
if (inner) if (inner)
cASSERT(mc, op == MDBX_PREV); cASSERT(mc, op == MDBX_PREV);
else else
cASSERT(mc, cASSERT(mc, op == MDBX_PREV || op == MDBX_PREV_DUP || op == MDBX_PREV_NODUP);
op == MDBX_PREV || op == MDBX_PREV_DUP || op == MDBX_PREV_NODUP);
} }
if (inner) { if (inner) {
cASSERT(mc, !data && !mc->subcur && (mc->flags & z_inner) != 0); cASSERT(mc, !data && !mc->subcur && (mc->flags & z_inner) != 0);
@ -668,15 +619,13 @@ static __always_inline int cursor_step(const bool inner, const bool forward,
cASSERT(mc, nkeys > 0); cASSERT(mc, nkeys > 0);
intptr_t ki = mc->ki[mc->top]; intptr_t ki = mc->ki[mc->top];
const uint8_t state = const uint8_t state = mc->flags & (z_after_delete | z_hollow | z_eof_hard | z_eof_soft);
mc->flags & (z_after_delete | z_hollow | z_eof_hard | z_eof_soft);
if (likely(state == 0)) { if (likely(state == 0)) {
cASSERT(mc, ki < nkeys); cASSERT(mc, ki < nkeys);
if (!inner && op != (forward ? MDBX_NEXT_NODUP : MDBX_PREV_NODUP)) { if (!inner && op != (forward ? MDBX_NEXT_NODUP : MDBX_PREV_NODUP)) {
int err = MDBX_NOTFOUND; int err = MDBX_NOTFOUND;
if (inner_pointed(mc)) { if (inner_pointed(mc)) {
err = forward ? inner_next(&mc->subcur->cursor, data) err = forward ? inner_next(&mc->subcur->cursor, data) : inner_prev(&mc->subcur->cursor, data);
: inner_prev(&mc->subcur->cursor, data);
if (likely(err == MDBX_SUCCESS)) { if (likely(err == MDBX_SUCCESS)) {
get_key_optional(page_node(mp, ki), key); get_key_optional(page_node(mp, ki), key);
return MDBX_SUCCESS; return MDBX_SUCCESS;
@ -715,9 +664,8 @@ static __always_inline int cursor_step(const bool inner, const bool forward,
} }
} }
DEBUG("turn-%s: top page was %" PRIaPGNO " in cursor %p, ki %zi of %zi", DEBUG("turn-%s: top page was %" PRIaPGNO " in cursor %p, ki %zi of %zi", forward ? "next" : "prev", mp->pgno,
forward ? "next" : "prev", mp->pgno, __Wpedantic_format_voidptr(mc), ki, __Wpedantic_format_voidptr(mc), ki, nkeys);
nkeys);
if (forward) { if (forward) {
if (likely(++ki < nkeys)) if (likely(++ki < nkeys))
mc->ki[mc->top] = (indx_t)ki; mc->ki[mc->top] = (indx_t)ki;
@ -727,8 +675,7 @@ static __always_inline int cursor_step(const bool inner, const bool forward,
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
mp = mc->pg[mc->top]; mp = mc->pg[mc->top];
DEBUG("next page is %" PRIaPGNO ", key index %u", mp->pgno, DEBUG("next page is %" PRIaPGNO ", key index %u", mp->pgno, mc->ki[mc->top]);
mc->ki[mc->top]);
} }
} else { } else {
if (likely(--ki >= 0)) if (likely(--ki >= 0))
@ -739,47 +686,38 @@ static __always_inline int cursor_step(const bool inner, const bool forward,
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
mp = mc->pg[mc->top]; mp = mc->pg[mc->top];
DEBUG("prev page is %" PRIaPGNO ", key index %u", mp->pgno, DEBUG("prev page is %" PRIaPGNO ", key index %u", mp->pgno, mc->ki[mc->top]);
mc->ki[mc->top]);
} }
} }
DEBUG("==> cursor points to page %" PRIaPGNO " with %zu keys, key index %u", DEBUG("==> cursor points to page %" PRIaPGNO " with %zu keys, key index %u", mp->pgno, page_numkeys(mp),
mp->pgno, page_numkeys(mp), mc->ki[mc->top]); mc->ki[mc->top]);
bring: bring:
return cursor_bring(inner, forward, mc, key, data, false); return cursor_bring(inner, forward, mc, key, data, false);
} }
__hot int inner_next(MDBX_cursor *mc, MDBX_val *data) { __hot int inner_next(MDBX_cursor *mc, MDBX_val *data) { return cursor_step(true, true, mc, data, nullptr, MDBX_NEXT); }
return cursor_step(true, true, mc, data, nullptr, MDBX_NEXT);
}
__hot int inner_prev(MDBX_cursor *mc, MDBX_val *data) { __hot int inner_prev(MDBX_cursor *mc, MDBX_val *data) { return cursor_step(true, false, mc, data, nullptr, MDBX_PREV); }
return cursor_step(true, false, mc, data, nullptr, MDBX_PREV);
}
__hot int outer_next(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, __hot int outer_next(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op) {
MDBX_cursor_op op) {
return cursor_step(false, true, mc, key, data, op); return cursor_step(false, true, mc, key, data, op);
} }
__hot int outer_prev(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, __hot int outer_prev(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op) {
MDBX_cursor_op op) {
return cursor_step(false, false, mc, key, data, op); return cursor_step(false, false, mc, key, data, op);
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
__hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, unsigned flags) {
unsigned flags) {
int err; int err;
DKBUF_DEBUG; DKBUF_DEBUG;
MDBX_env *const env = mc->txn->env; MDBX_env *const env = mc->txn->env;
if (LOG_ENABLED(MDBX_LOG_DEBUG) && (flags & MDBX_RESERVE)) if (LOG_ENABLED(MDBX_LOG_DEBUG) && (flags & MDBX_RESERVE))
data->iov_base = nullptr; data->iov_base = nullptr;
DEBUG("==> put db %d key [%s], size %" PRIuPTR ", data [%s] size %" PRIuPTR, DEBUG("==> put db %d key [%s], size %" PRIuPTR ", data [%s] size %" PRIuPTR, cursor_dbi_dbg(mc), DKEY_DEBUG(key),
cursor_dbi_dbg(mc), DKEY_DEBUG(key), key->iov_len, DVAL_DEBUG(data), key->iov_len, DVAL_DEBUG(data), data->iov_len);
data->iov_len);
if ((flags & MDBX_CURRENT) != 0 && (mc->flags & z_inner) == 0) { if ((flags & MDBX_CURRENT) != 0 && (mc->flags & z_inner) == 0) {
if (unlikely(flags & (MDBX_APPEND | MDBX_NOOVERWRITE))) if (unlikely(flags & (MDBX_APPEND | MDBX_NOOVERWRITE)))
@ -805,8 +743,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
/* Если за ключом более одного значения, либо если размер данных /* Если за ключом более одного значения, либо если размер данных
* отличается, то вместо обновления требуется удаление и * отличается, то вместо обновления требуется удаление и
* последующая вставка. */ * последующая вставка. */
if (mc->subcur->nested_tree.items > 1 || if (mc->subcur->nested_tree.items > 1 || current_data.iov_len != data->iov_len) {
current_data.iov_len != data->iov_len) {
drop_current: drop_current:
err = cursor_del(mc, flags & MDBX_ALLDUPS); err = cursor_del(mc, flags & MDBX_ALLDUPS);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
@ -826,8 +763,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
goto skip_check_samedata; goto skip_check_samedata;
} }
} }
if (!(flags & MDBX_RESERVE) && if (!(flags & MDBX_RESERVE) && unlikely(cmp_lenfast(&current_data, data) == 0))
unlikely(cmp_lenfast(&current_data, data) == 0))
return MDBX_SUCCESS /* the same data, nothing to update */; return MDBX_SUCCESS /* the same data, nothing to update */;
skip_check_samedata:; skip_check_samedata:;
} }
@ -843,8 +779,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
if ((flags & MDBX_APPEND) && mc->tree->items > 0) { if ((flags & MDBX_APPEND) && mc->tree->items > 0) {
old_data.iov_base = nullptr; old_data.iov_base = nullptr;
old_data.iov_len = 0; old_data.iov_len = 0;
rc = (mc->flags & z_inner) ? inner_last(mc, &last_key) rc = (mc->flags & z_inner) ? inner_last(mc, &last_key) : outer_last(mc, &last_key, &old_data);
: outer_last(mc, &last_key, &old_data);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
const int cmp = mc->clc->k.cmp(key, &last_key); const int cmp = mc->clc->k.cmp(key, &last_key);
if (likely(cmp > 0)) { if (likely(cmp > 0)) {
@ -875,11 +810,10 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
if (unlikely(mc->flags & z_inner)) { if (unlikely(mc->flags & z_inner)) {
/* nested subtree of DUPSORT-database with the same key, /* nested subtree of DUPSORT-database with the same key,
* nothing to update */ * nothing to update */
eASSERT(env, eASSERT(env, data->iov_len == 0 && (old_data.iov_len == 0 ||
data->iov_len == 0 && (old_data.iov_len == 0 || /* olddata may not be updated in case
/* olddata may not be updated in case DUPFIX-page of dupfix-table */
DUPFIX-page of dupfix-table */ (mc->tree->flags & MDBX_DUPFIXED)));
(mc->tree->flags & MDBX_DUPFIXED)));
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
if (unlikely(flags & MDBX_ALLDUPS) && inner_pointed(mc)) { if (unlikely(flags & MDBX_ALLDUPS) && inner_pointed(mc)) {
@ -936,13 +870,11 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
mc->tree->root = npr.page->pgno; mc->tree->root = npr.page->pgno;
mc->tree->height++; mc->tree->height++;
if (mc->tree->flags & MDBX_INTEGERKEY) { if (mc->tree->flags & MDBX_INTEGERKEY) {
assert(key->iov_len >= mc->clc->k.lmin && assert(key->iov_len >= mc->clc->k.lmin && key->iov_len <= mc->clc->k.lmax);
key->iov_len <= mc->clc->k.lmax);
mc->clc->k.lmin = mc->clc->k.lmax = key->iov_len; mc->clc->k.lmin = mc->clc->k.lmax = key->iov_len;
} }
if (mc->tree->flags & (MDBX_INTEGERDUP | MDBX_DUPFIXED)) { if (mc->tree->flags & (MDBX_INTEGERDUP | MDBX_DUPFIXED)) {
assert(data->iov_len >= mc->clc->v.lmin && assert(data->iov_len >= mc->clc->v.lmin && data->iov_len <= mc->clc->v.lmax);
data->iov_len <= mc->clc->v.lmax);
assert(mc->subcur != nullptr); assert(mc->subcur != nullptr);
mc->tree->dupfix_size = /* mc->subcur->nested_tree.dupfix_size = */ mc->tree->dupfix_size = /* mc->subcur->nested_tree.dupfix_size = */
(unsigned)(mc->clc->v.lmin = mc->clc->v.lmax = data->iov_len); (unsigned)(mc->clc->v.lmin = mc->clc->v.lmax = data->iov_len);
@ -966,12 +898,10 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
if (insert_key) { if (insert_key) {
/* The key does not exist */ /* The key does not exist */
DEBUG("inserting key at index %i", mc->ki[mc->top]); DEBUG("inserting key at index %i", mc->ki[mc->top]);
if ((mc->tree->flags & MDBX_DUPSORT) && if ((mc->tree->flags & MDBX_DUPSORT) && node_size(key, data) > env->leaf_nodemax) {
node_size(key, data) > env->leaf_nodemax) {
/* Too big for a node, insert in sub-DB. Set up an empty /* Too big for a node, insert in sub-DB. Set up an empty
* "old sub-page" for convert_to_subtree to expand to a full page. */ * "old sub-page" for convert_to_subtree to expand to a full page. */
fp->dupfix_ksize = fp->dupfix_ksize = (mc->tree->flags & MDBX_DUPFIXED) ? (uint16_t)data->iov_len : 0;
(mc->tree->flags & MDBX_DUPFIXED) ? (uint16_t)data->iov_len : 0;
fp->lower = fp->upper = 0; fp->lower = fp->upper = 0;
old_data.iov_len = PAGEHDRSZ; old_data.iov_len = PAGEHDRSZ;
goto convert_to_subtree; goto convert_to_subtree;
@ -1022,9 +952,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
/* Large/Overflow page overwrites need special handling */ /* Large/Overflow page overwrites need special handling */
if (unlikely(node_flags(node) & N_BIG)) { if (unlikely(node_flags(node) & N_BIG)) {
const size_t dpages = (node_size(key, data) > env->leaf_nodemax) const size_t dpages = (node_size(key, data) > env->leaf_nodemax) ? largechunk_npages(env, data->iov_len) : 0;
? largechunk_npages(env, data->iov_len)
: 0;
const pgno_t pgno = node_largedata_pgno(node); const pgno_t pgno = node_largedata_pgno(node);
pgr_t lp = page_get_large(mc, pgno, mc->pg[mc->top]->txnid); pgr_t lp = page_get_large(mc, pgno, mc->pg[mc->top]->txnid);
@ -1035,11 +963,8 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
/* Is the ov page from this txn (or a parent) and big enough? */ /* Is the ov page from this txn (or a parent) and big enough? */
const size_t ovpages = lp.page->pages; const size_t ovpages = lp.page->pages;
const size_t extra_threshold = const size_t extra_threshold =
(mc->tree == &mc->txn->dbs[FREE_DBI]) (mc->tree == &mc->txn->dbs[FREE_DBI]) ? 1 : /* LY: add configurable threshold to keep reserve space */ 0;
? 1 if (!is_frozen(mc->txn, lp.page) && ovpages >= dpages && ovpages <= dpages + extra_threshold) {
: /* LY: add configurable threshold to keep reserve space */ 0;
if (!is_frozen(mc->txn, lp.page) && ovpages >= dpages &&
ovpages <= dpages + extra_threshold) {
/* yes, overwrite it. */ /* yes, overwrite it. */
if (!is_modifable(mc->txn, lp.page)) { if (!is_modifable(mc->txn, lp.page)) {
if (is_spilled(mc->txn, lp.page)) { if (is_spilled(mc->txn, lp.page)) {
@ -1052,10 +977,8 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
if (unlikely(!mc->txn->parent)) { if (unlikely(!mc->txn->parent)) {
ERROR("Unexpected not frozen/modifiable/spilled but shadowed %s " ERROR("Unexpected not frozen/modifiable/spilled but shadowed %s "
"page %" PRIaPGNO " mod-txnid %" PRIaTXN "," "page %" PRIaPGNO " mod-txnid %" PRIaTXN ","
" without parent transaction, current txn %" PRIaTXN " without parent transaction, current txn %" PRIaTXN " front %" PRIaTXN,
" front %" PRIaTXN, "large/overflow", pgno, lp.page->txnid, mc->txn->txnid, mc->txn->front_txnid);
"large/overflow", pgno, lp.page->txnid, mc->txn->txnid,
mc->txn->front_txnid);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
@ -1094,8 +1017,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
} else { } else {
old_data.iov_len = node_ds(node); old_data.iov_len = node_ds(node);
old_data.iov_base = node_data(node); old_data.iov_base = node_data(node);
cASSERT(mc, ptr_disp(old_data.iov_base, old_data.iov_len) <= cASSERT(mc, ptr_disp(old_data.iov_base, old_data.iov_len) <= ptr_disp(mc->pg[mc->top], env->ps));
ptr_disp(mc->pg[mc->top], env->ps));
/* DB has dups? */ /* DB has dups? */
if (mc->tree->flags & MDBX_DUPSORT) { if (mc->tree->flags & MDBX_DUPSORT) {
@ -1133,8 +1055,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
} }
/* Back up original data item */ /* Back up original data item */
memcpy(old_singledup.iov_base = fp + 1, old_data.iov_base, memcpy(old_singledup.iov_base = fp + 1, old_data.iov_base, old_singledup.iov_len = old_data.iov_len);
old_singledup.iov_len = old_data.iov_len);
/* Make sub-page header for the dup items, with dummy body */ /* Make sub-page header for the dup items, with dummy body */
fp->flags = P_LEAF | P_SUBP; fp->flags = P_LEAF | P_SUBP;
@ -1149,13 +1070,11 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
* не сразу расширять созданную под-страницу. * не сразу расширять созданную под-страницу.
* Резервирование в целом сомнительно (см ниже), но может сработать * Резервирование в целом сомнительно (см ниже), но может сработать
* в плюс (а если в минус то несущественный) при коротких ключах. */ * в плюс (а если в минус то несущественный) при коротких ключах. */
xdata.iov_len += page_subleaf2_reserve( xdata.iov_len +=
env, page_room(mc->pg[mc->top]) + old_data.iov_len, page_subleaf2_reserve(env, page_room(mc->pg[mc->top]) + old_data.iov_len, xdata.iov_len, data->iov_len);
xdata.iov_len, data->iov_len);
cASSERT(mc, (xdata.iov_len & 1) == 0); cASSERT(mc, (xdata.iov_len & 1) == 0);
} else { } else {
xdata.iov_len += 2 * (sizeof(indx_t) + NODESIZE) + xdata.iov_len += 2 * (sizeof(indx_t) + NODESIZE) + (old_data.iov_len & 1) + (data->iov_len & 1);
(old_data.iov_len & 1) + (data->iov_len & 1);
} }
cASSERT(mc, (xdata.iov_len & 1) == 0); cASSERT(mc, (xdata.iov_len & 1) == 0);
fp->upper = (uint16_t)(xdata.iov_len - PAGEHDRSZ); fp->upper = (uint16_t)(xdata.iov_len - PAGEHDRSZ);
@ -1169,9 +1088,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
fp = old_data.iov_base; fp = old_data.iov_base;
switch (flags) { switch (flags) {
default: default:
growth = is_dupfix_leaf(fp) growth = is_dupfix_leaf(fp) ? fp->dupfix_ksize : (node_size(data, nullptr) + sizeof(indx_t));
? fp->dupfix_ksize
: (node_size(data, nullptr) + sizeof(indx_t));
if (page_room(fp) >= growth) { if (page_room(fp) >= growth) {
/* На текущей под-странице есть место для добавления элемента. /* На текущей под-странице есть место для добавления элемента.
* Оптимальнее продолжить использовать эту страницу, ибо * Оптимальнее продолжить использовать эту страницу, ибо
@ -1241,9 +1158,8 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
* subpage_reserve_prereq = leaf_nodemax (1000). * subpage_reserve_prereq = leaf_nodemax (1000).
*/ */
if (is_dupfix_leaf(fp)) if (is_dupfix_leaf(fp))
growth += page_subleaf2_reserve( growth += page_subleaf2_reserve(env, page_room(mc->pg[mc->top]) + old_data.iov_len, xdata.iov_len,
env, page_room(mc->pg[mc->top]) + old_data.iov_len, data->iov_len);
xdata.iov_len, data->iov_len);
else { else {
/* TODO: Если добавить возможность для пользователя задавать /* TODO: Если добавить возможность для пользователя задавать
* min/max размеров ключей/данных, то здесь разумно реализовать * min/max размеров ключей/данных, то здесь разумно реализовать
@ -1265,13 +1181,10 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
} }
fp_flags = fp->flags; fp_flags = fp->flags;
if (xdata.iov_len > env->subpage_limit || if (xdata.iov_len > env->subpage_limit || node_size_len(node_ks(node), xdata.iov_len) > env->leaf_nodemax ||
node_size_len(node_ks(node), xdata.iov_len) > env->leaf_nodemax ||
(env->subpage_room_threshold && (env->subpage_room_threshold &&
page_room(mc->pg[mc->top]) + page_room(mc->pg[mc->top]) + node_size_len(node_ks(node), old_data.iov_len) <
node_size_len(node_ks(node), old_data.iov_len) < env->subpage_room_threshold + node_size_len(node_ks(node), xdata.iov_len))) {
env->subpage_room_threshold +
node_size_len(node_ks(node), xdata.iov_len))) {
/* Too big for a sub-page, convert to sub-DB */ /* Too big for a sub-page, convert to sub-DB */
convert_to_subtree: convert_to_subtree:
fp_flags &= ~P_SUBP; fp_flags &= ~P_SUBP;
@ -1310,17 +1223,13 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
cASSERT(mc, fp->upper + growth < UINT16_MAX); cASSERT(mc, fp->upper + growth < UINT16_MAX);
mp->upper = fp->upper + (indx_t)growth; mp->upper = fp->upper + (indx_t)growth;
if (unlikely(fp_flags & P_DUPFIX)) { if (unlikely(fp_flags & P_DUPFIX)) {
memcpy(page_data(mp), page_data(fp), memcpy(page_data(mp), page_data(fp), page_numkeys(fp) * fp->dupfix_ksize);
page_numkeys(fp) * fp->dupfix_ksize); cASSERT(mc, (((mp->dupfix_ksize & page_numkeys(mp)) ^ mp->upper) & 1) == 0);
cASSERT(mc, (((mp->dupfix_ksize & page_numkeys(mp)) ^ mp->upper) &
1) == 0);
} else { } else {
cASSERT(mc, (mp->upper & 1) == 0); cASSERT(mc, (mp->upper & 1) == 0);
memcpy(ptr_disp(mp, mp->upper + PAGEHDRSZ), memcpy(ptr_disp(mp, mp->upper + PAGEHDRSZ), ptr_disp(fp, fp->upper + PAGEHDRSZ),
ptr_disp(fp, fp->upper + PAGEHDRSZ),
old_data.iov_len - fp->upper - PAGEHDRSZ); old_data.iov_len - fp->upper - PAGEHDRSZ);
memcpy(mp->entries, fp->entries, memcpy(mp->entries, fp->entries, page_numkeys(fp) * sizeof(mp->entries[0]));
page_numkeys(fp) * sizeof(mp->entries[0]));
for (size_t i = 0; i < page_numkeys(fp); i++) { for (size_t i = 0; i < page_numkeys(fp); i++) {
cASSERT(mc, mp->entries[i] + growth <= UINT16_MAX); cASSERT(mc, mp->entries[i] + growth <= UINT16_MAX);
mp->entries[i] += (indx_t)growth; mp->entries[i] += (indx_t)growth;
@ -1357,8 +1266,7 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
cASSERT(mc, key->iov_len < UINT16_MAX); cASSERT(mc, key->iov_len < UINT16_MAX);
node_set_ks(node, key->iov_len); node_set_ks(node, key->iov_len);
memcpy(node_key(node), key->iov_base, key->iov_len); memcpy(node_key(node), key->iov_base, key->iov_len);
cASSERT(mc, ptr_disp(node_key(node), node_ds(node)) < cASSERT(mc, ptr_disp(node_key(node), node_ds(node)) < ptr_disp(mc->pg[mc->top], env->ps));
ptr_disp(mc->pg[mc->top], env->ps));
goto fix_parent; goto fix_parent;
} }
@ -1377,12 +1285,9 @@ __hot int cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
insert_node:; insert_node:;
const unsigned naf = flags & NODE_ADD_FLAGS; const unsigned naf = flags & NODE_ADD_FLAGS;
size_t nsize = is_dupfix_leaf(mc->pg[mc->top]) size_t nsize = is_dupfix_leaf(mc->pg[mc->top]) ? key->iov_len : leaf_size(env, key, ref_data);
? key->iov_len
: leaf_size(env, key, ref_data);
if (page_room(mc->pg[mc->top]) < nsize) { if (page_room(mc->pg[mc->top]) < nsize) {
rc = page_split(mc, key, ref_data, P_INVALID, rc = page_split(mc, key, ref_data, P_INVALID, insert_key ? naf : naf | MDBX_SPLIT_REPLACE);
insert_key ? naf : naf | MDBX_SPLIT_REPLACE);
if (rc == MDBX_SUCCESS && AUDIT_ENABLED()) if (rc == MDBX_SUCCESS && AUDIT_ENABLED())
rc = insert_key ? cursor_check(mc) : cursor_check_updating(mc); rc = insert_key ? cursor_check(mc) : cursor_check_updating(mc);
} else { } else {
@ -1420,12 +1325,8 @@ insert_node:;
empty.iov_base = nullptr; empty.iov_base = nullptr;
node_t *node = page_node(mc->pg[mc->top], mc->ki[mc->top]); node_t *node = page_node(mc->pg[mc->top], mc->ki[mc->top]);
#define SHIFT_MDBX_NODUPDATA_TO_MDBX_NOOVERWRITE 1 #define SHIFT_MDBX_NODUPDATA_TO_MDBX_NOOVERWRITE 1
STATIC_ASSERT( STATIC_ASSERT((MDBX_NODUPDATA >> SHIFT_MDBX_NODUPDATA_TO_MDBX_NOOVERWRITE) == MDBX_NOOVERWRITE);
(MDBX_NODUPDATA >> SHIFT_MDBX_NODUPDATA_TO_MDBX_NOOVERWRITE) == unsigned inner_flags = MDBX_CURRENT | ((flags & MDBX_NODUPDATA) >> SHIFT_MDBX_NODUPDATA_TO_MDBX_NOOVERWRITE);
MDBX_NOOVERWRITE);
unsigned inner_flags =
MDBX_CURRENT | ((flags & MDBX_NODUPDATA) >>
SHIFT_MDBX_NODUPDATA_TO_MDBX_NOOVERWRITE);
if ((flags & MDBX_CURRENT) == 0) { if ((flags & MDBX_CURRENT) == 0) {
inner_flags -= MDBX_CURRENT; inner_flags -= MDBX_CURRENT;
rc = cursor_dupsort_setup(mc, node, mc->pg[mc->top]); rc = cursor_dupsort_setup(mc, node, mc->pg[mc->top]);
@ -1434,8 +1335,7 @@ insert_node:;
} }
subcur_t *const mx = mc->subcur; subcur_t *const mx = mc->subcur;
if (sub_root) { if (sub_root) {
cASSERT(mc, mx->nested_tree.height == 1 && cASSERT(mc, mx->nested_tree.height == 1 && mx->nested_tree.root == sub_root->pgno);
mx->nested_tree.root == sub_root->pgno);
mx->cursor.flags = z_inner; mx->cursor.flags = z_inner;
mx->cursor.top = 0; mx->cursor.top = 0;
mx->cursor.pg[0] = sub_root; mx->cursor.pg[0] = sub_root;
@ -1470,9 +1370,7 @@ insert_node:;
m2->subcur->cursor.top_and_flags = z_inner; m2->subcur->cursor.top_and_flags = z_inner;
m2->subcur->cursor.ki[0] = 0; m2->subcur->cursor.ki[0] = 0;
} }
DEBUG("Sub-dbi -%zu root page %" PRIaPGNO, DEBUG("Sub-dbi -%zu root page %" PRIaPGNO, cursor_dbi(&m2->subcur->cursor), m2->subcur->nested_tree.root);
cursor_dbi(&m2->subcur->cursor),
m2->subcur->nested_tree.root);
} else if (!insert_key && m2->ki[mc->top] < nkeys) } else if (!insert_key && m2->ki[mc->top] < nkeys)
cursor_inner_refresh(m2, mp, m2->ki[mc->top]); cursor_inner_refresh(m2, mp, m2->ki[mc->top]);
} }
@ -1480,10 +1378,8 @@ insert_node:;
cASSERT(mc, mc->subcur->nested_tree.items < PTRDIFF_MAX); cASSERT(mc, mc->subcur->nested_tree.items < PTRDIFF_MAX);
const size_t probe = (size_t)mc->subcur->nested_tree.items; const size_t probe = (size_t)mc->subcur->nested_tree.items;
#define SHIFT_MDBX_APPENDDUP_TO_MDBX_APPEND 1 #define SHIFT_MDBX_APPENDDUP_TO_MDBX_APPEND 1
STATIC_ASSERT((MDBX_APPENDDUP >> SHIFT_MDBX_APPENDDUP_TO_MDBX_APPEND) == STATIC_ASSERT((MDBX_APPENDDUP >> SHIFT_MDBX_APPENDDUP_TO_MDBX_APPEND) == MDBX_APPEND);
MDBX_APPEND); inner_flags |= (flags & MDBX_APPENDDUP) >> SHIFT_MDBX_APPENDDUP_TO_MDBX_APPEND;
inner_flags |=
(flags & MDBX_APPENDDUP) >> SHIFT_MDBX_APPENDDUP_TO_MDBX_APPEND;
rc = cursor_put(&mc->subcur->cursor, data, &empty, inner_flags); rc = cursor_put(&mc->subcur->cursor, data, &empty, inner_flags);
if (flags & N_TREE) { if (flags & N_TREE) {
void *db = node_data(node); void *db = node_data(node);
@ -1530,16 +1426,13 @@ insert_node:;
return rc; return rc;
} }
__hot int cursor_put_checklen(MDBX_cursor *mc, const MDBX_val *key, __hot int cursor_put_checklen(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, unsigned flags) {
MDBX_val *data, unsigned flags) {
cASSERT(mc, (mc->flags & z_inner) == 0); cASSERT(mc, (mc->flags & z_inner) == 0);
if (unlikely(key->iov_len > mc->clc->k.lmax || if (unlikely(key->iov_len > mc->clc->k.lmax || key->iov_len < mc->clc->k.lmin)) {
key->iov_len < mc->clc->k.lmin)) {
cASSERT(mc, !"Invalid key-size"); cASSERT(mc, !"Invalid key-size");
return MDBX_BAD_VALSIZE; return MDBX_BAD_VALSIZE;
} }
if (unlikely(data->iov_len > mc->clc->v.lmax || if (unlikely(data->iov_len > mc->clc->v.lmax || data->iov_len < mc->clc->v.lmin)) {
data->iov_len < mc->clc->v.lmin)) {
cASSERT(mc, !"Invalid data-size"); cASSERT(mc, !"Invalid data-size");
return MDBX_BAD_VALSIZE; return MDBX_BAD_VALSIZE;
} }
@ -1611,8 +1504,7 @@ __hot int cursor_del(MDBX_cursor *mc, unsigned flags) {
page_t *mp = mc->pg[mc->top]; page_t *mp = mc->pg[mc->top];
cASSERT(mc, is_modifable(mc->txn, mp)); cASSERT(mc, is_modifable(mc->txn, mp));
if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) { if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) {
ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", mp->pgno, mp->flags);
mp->pgno, mp->flags);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
if (is_dupfix_leaf(mp)) if (is_dupfix_leaf(mp))
@ -1644,8 +1536,7 @@ __hot int cursor_del(MDBX_cursor *mc, unsigned flags) {
node = node_shrink(mp, mc->ki[mc->top], node); node = node_shrink(mp, mc->ki[mc->top], node);
mc->subcur->cursor.pg[0] = node_data(node); mc->subcur->cursor.pg[0] = node_data(node);
/* fix other sub-DB cursors pointed at sub-pages on this page */ /* fix other sub-DB cursors pointed at sub-pages on this page */
for (MDBX_cursor *m2 = mc->txn->cursors[cursor_dbi(mc)]; m2; for (MDBX_cursor *m2 = mc->txn->cursors[cursor_dbi(mc)]; m2; m2 = m2->next) {
m2 = m2->next) {
if (!is_related(mc, m2) || m2->pg[mc->top] != mp) if (!is_related(mc, m2) || m2->pg[mc->top] != mp)
continue; continue;
const node_t *inner = node; const node_t *inner = node;
@ -1664,8 +1555,7 @@ __hot int cursor_del(MDBX_cursor *mc, unsigned flags) {
} }
} }
mc->tree->items -= 1; mc->tree->items -= 1;
cASSERT(mc, mc->tree->items > 0 && mc->tree->height > 0 && cASSERT(mc, mc->tree->items > 0 && mc->tree->height > 0 && mc->tree->root != P_INVALID);
mc->tree->root != P_INVALID);
return rc; return rc;
} }
/* otherwise fall thru and delete the sub-DB */ /* otherwise fall thru and delete the sub-DB */
@ -1725,9 +1615,7 @@ del_key:
/* DB is totally empty now, just bail out. /* DB is totally empty now, just bail out.
* Other cursors adjustments were already done * Other cursors adjustments were already done
* by rebalance and aren't needed here. */ * by rebalance and aren't needed here. */
cASSERT(mc, mc->tree->items == 0 && cASSERT(mc, mc->tree->items == 0 && (mc->tree->root == P_INVALID || (is_inner(mc) && !mc->tree->root)) &&
(mc->tree->root == P_INVALID ||
(is_inner(mc) && !mc->tree->root)) &&
mc->flags < 0); mc->flags < 0);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -1736,9 +1624,7 @@ del_key:
mp = mc->pg[mc->top]; mp = mc->pg[mc->top];
cASSERT(mc, is_leaf(mc->pg[mc->top])); cASSERT(mc, is_leaf(mc->pg[mc->top]));
size_t nkeys = page_numkeys(mp); size_t nkeys = page_numkeys(mp);
cASSERT(mc, cASSERT(mc, (mc->tree->items > 0 && nkeys > 0) || ((mc->flags & z_inner) && mc->tree->items == 0 && nkeys == 0));
(mc->tree->items > 0 && nkeys > 0) ||
((mc->flags & z_inner) && mc->tree->items == 0 && nkeys == 0));
/* Adjust this and other cursors pointing to mp */ /* Adjust this and other cursors pointing to mp */
const intptr_t top = /* может быть сброшен в -1 */ mc->top; const intptr_t top = /* может быть сброшен в -1 */ mc->top;
@ -1777,8 +1663,7 @@ del_key:
* нужно установить на первый дубликат. */ * нужно установить на первый дубликат. */
if (is_pointed(&m3->subcur->cursor)) { if (is_pointed(&m3->subcur->cursor)) {
if ((node_flags(node) & N_TREE) == 0) { if ((node_flags(node) & N_TREE) == 0) {
cASSERT(m3, m3->subcur->cursor.top == 0 && cASSERT(m3, m3->subcur->cursor.top == 0 && m3->subcur->nested_tree.height == 1);
m3->subcur->nested_tree.height == 1);
m3->subcur->cursor.pg[0] = node_data(node); m3->subcur->cursor.pg[0] = node_data(node);
} }
} else { } else {
@ -1808,14 +1693,12 @@ fail:
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
__hot csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, __hot csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op) {
MDBX_cursor_op op) {
DKBUF_DEBUG; DKBUF_DEBUG;
csr_t ret; csr_t ret;
ret.exact = false; ret.exact = false;
if (unlikely(key->iov_len < mc->clc->k.lmin || if (unlikely(key->iov_len < mc->clc->k.lmin || key->iov_len > mc->clc->k.lmax)) {
key->iov_len > mc->clc->k.lmax)) {
cASSERT(mc, !"Invalid key-size"); cASSERT(mc, !"Invalid key-size");
ret.err = MDBX_BAD_VALSIZE; ret.err = MDBX_BAD_VALSIZE;
return ret; return ret;
@ -1848,9 +1731,8 @@ __hot csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
const size_t nkeys = page_numkeys(mp); const size_t nkeys = page_numkeys(mp);
if (unlikely(nkeys == 0)) { if (unlikely(nkeys == 0)) {
/* при создании первой листовой страницы */ /* при создании первой листовой страницы */
cASSERT(mc, mc->top == 0 && mc->tree->height == 1 && cASSERT(mc, mc->top == 0 && mc->tree->height == 1 && mc->tree->branch_pages == 0 && mc->tree->leaf_pages == 1 &&
mc->tree->branch_pages == 0 && mc->ki[0] == 0);
mc->tree->leaf_pages == 1 && mc->ki[0] == 0);
/* Логически верно, но нет смысла, ибо это мимолетная/временная /* Логически верно, но нет смысла, ибо это мимолетная/временная
* ситуация до добавления элемента выше по стеку вызовов: * ситуация до добавления элемента выше по стеку вызовов:
mc->flags |= z_eof_soft | z_hollow; */ mc->flags |= z_eof_soft | z_hollow; */
@ -1901,8 +1783,7 @@ __hot csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
* первой/последний и соответственно такое сравнение было выше. */ * первой/последний и соответственно такое сравнение было выше. */
if (mc->ki[mc->top] > 0 && mc->ki[mc->top] < nkeys - 1) { if (mc->ki[mc->top] > 0 && mc->ki[mc->top] < nkeys - 1) {
if (is_dupfix_leaf(mp)) { if (is_dupfix_leaf(mp)) {
nodekey.iov_base = nodekey.iov_base = page_dupfix_ptr(mp, mc->ki[mc->top], nodekey.iov_len);
page_dupfix_ptr(mp, mc->ki[mc->top], nodekey.iov_len);
} else { } else {
node = page_node(mp, mc->ki[mc->top]); node = page_node(mp, mc->ki[mc->top]);
nodekey = get_key(node); nodekey = get_key(node);
@ -1928,8 +1809,7 @@ __hot csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
mc->ki[mc->top] = (indx_t)nkeys; mc->ki[mc->top] = (indx_t)nkeys;
if (op < MDBX_SET_RANGE) { if (op < MDBX_SET_RANGE) {
target_not_found: target_not_found:
cASSERT(mc, op == MDBX_SET || op == MDBX_SET_KEY || cASSERT(mc, op == MDBX_SET || op == MDBX_SET_KEY || op == MDBX_GET_BOTH || op == MDBX_GET_BOTH_RANGE);
op == MDBX_GET_BOTH || op == MDBX_GET_BOTH_RANGE);
/* Операция предполагает поиск конкретного ключа, который не найден. /* Операция предполагает поиск конкретного ключа, который не найден.
* Поэтому переводим курсор в неустановленное состояние, но без сброса * Поэтому переводим курсор в неустановленное состояние, но без сброса
* top, что позволяет работать fastpath при последующем поиске по дереву * top, что позволяет работать fastpath при последующем поиске по дереву
@ -1991,8 +1871,7 @@ got_node:
cASSERT(mc, is_pointed(mc) && !inner_pointed(mc)); cASSERT(mc, is_pointed(mc) && !inner_pointed(mc));
cASSERT(mc, mc->ki[mc->top] < page_numkeys(mc->pg[mc->top])); cASSERT(mc, mc->ki[mc->top] < page_numkeys(mc->pg[mc->top]));
if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) { if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) {
ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", mp->pgno, mp->flags);
mp->pgno, mp->flags);
ret.err = MDBX_CORRUPTED; ret.err = MDBX_CORRUPTED;
return ret; return ret;
} }
@ -2037,8 +1916,7 @@ got_node:
} }
} else if (likely(data)) { } else if (likely(data)) {
if (op <= MDBX_GET_BOTH_RANGE) { if (op <= MDBX_GET_BOTH_RANGE) {
if (unlikely(data->iov_len < mc->clc->v.lmin || if (unlikely(data->iov_len < mc->clc->v.lmin || data->iov_len > mc->clc->v.lmax)) {
data->iov_len > mc->clc->v.lmax)) {
cASSERT(mc, !"Invalid data-size"); cASSERT(mc, !"Invalid data-size");
ret.err = MDBX_BAD_VALSIZE; ret.err = MDBX_BAD_VALSIZE;
return ret; return ret;
@ -2049,13 +1927,11 @@ got_node:
if (aligned_data.iov_len == 8) { if (aligned_data.iov_len == 8) {
if (unlikely(7 & (uintptr_t)aligned_data.iov_base)) if (unlikely(7 & (uintptr_t)aligned_data.iov_base))
/* copy instead of return error to avoid break compatibility */ /* copy instead of return error to avoid break compatibility */
aligned_data.iov_base = aligned_data.iov_base = bcopy_8(&aligned_databytes, aligned_data.iov_base);
bcopy_8(&aligned_databytes, aligned_data.iov_base);
} else if (aligned_data.iov_len == 4) { } else if (aligned_data.iov_len == 4) {
if (unlikely(3 & (uintptr_t)aligned_data.iov_base)) if (unlikely(3 & (uintptr_t)aligned_data.iov_base))
/* copy instead of return error to avoid break compatibility */ /* copy instead of return error to avoid break compatibility */
aligned_data.iov_base = aligned_data.iov_base = bcopy_4(&aligned_databytes, aligned_data.iov_base);
bcopy_4(&aligned_databytes, aligned_data.iov_base);
} else { } else {
cASSERT(mc, !"data-size is invalid for MDBX_INTEGERDUP"); cASSERT(mc, !"data-size is invalid for MDBX_INTEGERDUP");
ret.err = MDBX_BAD_VALSIZE; ret.err = MDBX_BAD_VALSIZE;
@ -2089,18 +1965,15 @@ got_node:
if (op >= MDBX_SET_KEY) if (op >= MDBX_SET_KEY)
get_key_optional(node, key); get_key_optional(node, key);
DEBUG("==> cursor placed on key [%s], data [%s]", DKEY_DEBUG(key), DEBUG("==> cursor placed on key [%s], data [%s]", DKEY_DEBUG(key), DVAL_DEBUG(data));
DVAL_DEBUG(data));
ret.err = MDBX_SUCCESS; ret.err = MDBX_SUCCESS;
be_filled(mc); be_filled(mc);
return ret; return ret;
} }
__hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, const MDBX_cursor_op op) {
const MDBX_cursor_op op) {
if (op != MDBX_GET_CURRENT) if (op != MDBX_GET_CURRENT)
DEBUG(">> cursor %p(0x%x), ops %u, key %p, value %p", DEBUG(">> cursor %p(0x%x), ops %u, key %p, value %p", __Wpedantic_format_voidptr(mc), mc->flags, op,
__Wpedantic_format_voidptr(mc), mc->flags, op,
__Wpedantic_format_voidptr(key), __Wpedantic_format_voidptr(data)); __Wpedantic_format_voidptr(key), __Wpedantic_format_voidptr(data));
int rc; int rc;
@ -2163,8 +2036,7 @@ __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
cASSERT(mc, is_filled(mc)); cASSERT(mc, is_filled(mc));
else if (rc == MDBX_NOTFOUND && mc->tree->items) { else if (rc == MDBX_NOTFOUND && mc->tree->items) {
cASSERT(mc, is_pointed(mc)); cASSERT(mc, is_pointed(mc));
cASSERT(mc, op == MDBX_SET_RANGE || op == MDBX_GET_BOTH_RANGE || cASSERT(mc, op == MDBX_SET_RANGE || op == MDBX_GET_BOTH_RANGE || is_hollow(mc));
is_hollow(mc));
cASSERT(mc, op == MDBX_GET_BOTH_RANGE || inner_hollow(mc)); cASSERT(mc, op == MDBX_GET_BOTH_RANGE || inner_hollow(mc));
} else } else
cASSERT(mc, is_poor(mc) && !is_filled(mc)); cASSERT(mc, is_poor(mc) && !is_filled(mc));
@ -2271,8 +2143,7 @@ __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
if ((node_flags(node) & N_DUP) == 0) if ((node_flags(node) & N_DUP) == 0)
return node_read(mc, node, data, mc->pg[mc->top]); return node_read(mc, node, data, mc->pg[mc->top]);
else if (MDBX_DISABLE_VALIDATION || likely(mc->subcur)) else if (MDBX_DISABLE_VALIDATION || likely(mc->subcur))
return ((op == MDBX_FIRST_DUP) ? inner_first return ((op == MDBX_FIRST_DUP) ? inner_first : inner_last)(&mc->subcur->cursor, data);
: inner_last)(&mc->subcur->cursor, data);
else else
return unexpected_dupsort(mc); return unexpected_dupsort(mc);
} }
@ -2338,8 +2209,7 @@ __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
rc = outer_prev(mc, key, data, MDBX_PREV_NODUP); rc = outer_prev(mc, key, data, MDBX_PREV_NODUP);
else if (op == MDBX_TO_KEY_GREATER_THAN) else if (op == MDBX_TO_KEY_GREATER_THAN)
rc = outer_next(mc, key, data, MDBX_NEXT_NODUP); rc = outer_next(mc, key, data, MDBX_NEXT_NODUP);
} else if (op < MDBX_TO_KEY_EQUAL && } else if (op < MDBX_TO_KEY_EQUAL && (rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
(rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
rc = outer_prev(mc, key, data, MDBX_PREV_NODUP); rc = outer_prev(mc, key, data, MDBX_PREV_NODUP);
else if (op == MDBX_TO_KEY_EQUAL && rc == MDBX_SUCCESS) else if (op == MDBX_TO_KEY_EQUAL && rc == MDBX_SUCCESS)
rc = MDBX_NOTFOUND; rc = MDBX_NOTFOUND;
@ -2371,8 +2241,7 @@ __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
rc = inner_prev(mx, data); rc = inner_prev(mx, data);
else if (op == MDBX_TO_EXACT_KEY_VALUE_GREATER_THAN) else if (op == MDBX_TO_EXACT_KEY_VALUE_GREATER_THAN)
rc = inner_next(mx, data); rc = inner_next(mx, data);
} else if (op < MDBX_TO_EXACT_KEY_VALUE_EQUAL && } else if (op < MDBX_TO_EXACT_KEY_VALUE_EQUAL && (rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
(rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
rc = inner_prev(mx, data); rc = inner_prev(mx, data);
else if (op == MDBX_TO_EXACT_KEY_VALUE_EQUAL && rc == MDBX_SUCCESS) else if (op == MDBX_TO_EXACT_KEY_VALUE_EQUAL && rc == MDBX_SUCCESS)
rc = MDBX_NOTFOUND; rc = MDBX_NOTFOUND;
@ -2425,8 +2294,7 @@ __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
rc = outer_prev(mc, key, data, MDBX_PREV); rc = outer_prev(mc, key, data, MDBX_PREV);
else if (op == MDBX_TO_PAIR_GREATER_THAN) else if (op == MDBX_TO_PAIR_GREATER_THAN)
rc = outer_next(mc, key, data, MDBX_NEXT); rc = outer_next(mc, key, data, MDBX_NEXT);
} else if (op < MDBX_TO_PAIR_EQUAL && } else if (op < MDBX_TO_PAIR_EQUAL && (rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
(rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
rc = outer_prev(mc, key, data, MDBX_PREV); rc = outer_prev(mc, key, data, MDBX_PREV);
else if (op == MDBX_TO_PAIR_EQUAL && rc == MDBX_SUCCESS) else if (op == MDBX_TO_PAIR_EQUAL && rc == MDBX_SUCCESS)
rc = MDBX_NOTFOUND; rc = MDBX_NOTFOUND;
@ -2458,8 +2326,7 @@ __hot int cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
break; break;
} }
} }
} else if (op < MDBX_TO_PAIR_EQUAL && } else if (op < MDBX_TO_PAIR_EQUAL && (rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
(rc == MDBX_NOTFOUND || rc == MDBX_SUCCESS))
rc = outer_prev(mc, key, data, MDBX_PREV_NODUP); rc = outer_prev(mc, key, data, MDBX_PREV_NODUP);
else if (op == MDBX_TO_PAIR_EQUAL && rc == MDBX_SUCCESS) else if (op == MDBX_TO_PAIR_EQUAL && rc == MDBX_SUCCESS)
rc = MDBX_NOTFOUND; rc = MDBX_NOTFOUND;

View File

@ -125,13 +125,11 @@ enum cursor_state {
z_fresh_mark = z_poor_mark | z_fresh z_fresh_mark = z_poor_mark | z_fresh
}; };
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_inner(const MDBX_cursor *mc) {
is_inner(const MDBX_cursor *mc) {
return (mc->flags & z_inner) != 0; return (mc->flags & z_inner) != 0;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_poor(const MDBX_cursor *mc) {
is_poor(const MDBX_cursor *mc) {
const bool r = mc->top < 0; const bool r = mc->top < 0;
cASSERT(mc, r == (mc->top_and_flags < 0)); cASSERT(mc, r == (mc->top_and_flags < 0));
if (r && mc->subcur) if (r && mc->subcur)
@ -139,8 +137,7 @@ is_poor(const MDBX_cursor *mc) {
return r; return r;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_pointed(const MDBX_cursor *mc) {
is_pointed(const MDBX_cursor *mc) {
const bool r = mc->top >= 0; const bool r = mc->top >= 0;
cASSERT(mc, r == (mc->top_and_flags >= 0)); cASSERT(mc, r == (mc->top_and_flags >= 0));
if (!r && mc->subcur) if (!r && mc->subcur)
@ -148,49 +145,41 @@ is_pointed(const MDBX_cursor *mc) {
return r; return r;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_hollow(const MDBX_cursor *mc) {
is_hollow(const MDBX_cursor *mc) {
const bool r = mc->flags < 0; const bool r = mc->flags < 0;
if (!r) { if (!r) {
cASSERT(mc, mc->top >= 0); cASSERT(mc, mc->top >= 0);
cASSERT(mc, (mc->flags & z_eof_hard) || cASSERT(mc, (mc->flags & z_eof_hard) || mc->ki[mc->top] < page_numkeys(mc->pg[mc->top]));
mc->ki[mc->top] < page_numkeys(mc->pg[mc->top]));
} else if (mc->subcur) } else if (mc->subcur)
cASSERT(mc, is_poor(&mc->subcur->cursor)); cASSERT(mc, is_poor(&mc->subcur->cursor));
return r; return r;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_eof(const MDBX_cursor *mc) {
is_eof(const MDBX_cursor *mc) {
const bool r = z_eof_soft <= (uint8_t)mc->flags; const bool r = z_eof_soft <= (uint8_t)mc->flags;
return r; return r;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_filled(const MDBX_cursor *mc) {
is_filled(const MDBX_cursor *mc) {
const bool r = z_eof_hard > (uint8_t)mc->flags; const bool r = z_eof_hard > (uint8_t)mc->flags;
return r; return r;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool inner_filled(const MDBX_cursor *mc) {
inner_filled(const MDBX_cursor *mc) {
return mc->subcur && is_filled(&mc->subcur->cursor); return mc->subcur && is_filled(&mc->subcur->cursor);
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool inner_pointed(const MDBX_cursor *mc) {
inner_pointed(const MDBX_cursor *mc) {
return mc->subcur && is_pointed(&mc->subcur->cursor); return mc->subcur && is_pointed(&mc->subcur->cursor);
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool inner_hollow(const MDBX_cursor *mc) {
inner_hollow(const MDBX_cursor *mc) {
return !mc->subcur || is_hollow(&mc->subcur->cursor); return !mc->subcur || is_hollow(&mc->subcur->cursor);
} }
MDBX_MAYBE_UNUSED static inline void inner_gone(MDBX_cursor *mc) { MDBX_MAYBE_UNUSED static inline void inner_gone(MDBX_cursor *mc) {
if (mc->subcur) { if (mc->subcur) {
TRACE("reset inner cursor %p", TRACE("reset inner cursor %p", __Wpedantic_format_voidptr(&mc->subcur->cursor));
__Wpedantic_format_voidptr(&mc->subcur->cursor));
mc->subcur->nested_tree.root = 0; mc->subcur->nested_tree.root = 0;
mc->subcur->cursor.top_and_flags = z_inner | z_poor_mark; mc->subcur->cursor.top_and_flags = z_inner | z_poor_mark;
} }
@ -218,8 +207,7 @@ MDBX_MAYBE_UNUSED static inline void be_filled(MDBX_cursor *mc) {
cASSERT(mc, inner == is_inner(mc)); cASSERT(mc, inner == is_inner(mc));
} }
MDBX_MAYBE_UNUSED static inline bool is_related(const MDBX_cursor *base, MDBX_MAYBE_UNUSED static inline bool is_related(const MDBX_cursor *base, const MDBX_cursor *scan) {
const MDBX_cursor *scan) {
cASSERT(base, base->top >= 0); cASSERT(base, base->top >= 0);
return base->top <= scan->top && base != scan; return base->top <= scan->top && base != scan;
} }
@ -238,36 +226,30 @@ enum cursor_checking {
MDBX_INTERNAL int __must_check_result cursor_check(const MDBX_cursor *mc); MDBX_INTERNAL int __must_check_result cursor_check(const MDBX_cursor *mc);
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline size_t cursor_dbi(const MDBX_cursor *mc) {
cursor_dbi(const MDBX_cursor *mc) {
cASSERT(mc, mc->txn && mc->txn->signature == txn_signature); cASSERT(mc, mc->txn && mc->txn->signature == txn_signature);
size_t dbi = mc->dbi_state - mc->txn->dbi_state; size_t dbi = mc->dbi_state - mc->txn->dbi_state;
cASSERT(mc, dbi < mc->txn->env->n_dbi); cASSERT(mc, dbi < mc->txn->env->n_dbi);
return dbi; return dbi;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool cursor_dbi_changed(const MDBX_cursor *mc) {
cursor_dbi_changed(const MDBX_cursor *mc) {
return dbi_changed(mc->txn, cursor_dbi(mc)); return dbi_changed(mc->txn, cursor_dbi(mc));
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t * MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t *cursor_dbi_state(const MDBX_cursor *mc) {
cursor_dbi_state(const MDBX_cursor *mc) {
return mc->dbi_state; return mc->dbi_state;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool cursor_is_gc(const MDBX_cursor *mc) {
cursor_is_gc(const MDBX_cursor *mc) {
return mc->dbi_state == mc->txn->dbi_state + FREE_DBI; return mc->dbi_state == mc->txn->dbi_state + FREE_DBI;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool cursor_is_main(const MDBX_cursor *mc) {
cursor_is_main(const MDBX_cursor *mc) {
return mc->dbi_state == mc->txn->dbi_state + MAIN_DBI; return mc->dbi_state == mc->txn->dbi_state + MAIN_DBI;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool cursor_is_core(const MDBX_cursor *mc) {
cursor_is_core(const MDBX_cursor *mc) {
return mc->dbi_state < mc->txn->dbi_state + CORE_DBS; return mc->dbi_state < mc->txn->dbi_state + CORE_DBS;
} }
@ -277,10 +259,8 @@ MDBX_MAYBE_UNUSED static inline int cursor_dbi_dbg(const MDBX_cursor *mc) {
return (mc->flags & z_inner) ? -dbi : dbi; return (mc->flags & z_inner) ? -dbi : dbi;
} }
MDBX_MAYBE_UNUSED static inline int __must_check_result MDBX_MAYBE_UNUSED static inline int __must_check_result cursor_push(MDBX_cursor *mc, page_t *mp, indx_t ki) {
cursor_push(MDBX_cursor *mc, page_t *mp, indx_t ki) { TRACE("pushing page %" PRIaPGNO " on db %d cursor %p", mp->pgno, cursor_dbi_dbg(mc), __Wpedantic_format_voidptr(mc));
TRACE("pushing page %" PRIaPGNO " on db %d cursor %p", mp->pgno,
cursor_dbi_dbg(mc), __Wpedantic_format_voidptr(mc));
if (unlikely(mc->top >= CURSOR_STACK_SIZE - 1)) { if (unlikely(mc->top >= CURSOR_STACK_SIZE - 1)) {
be_poor(mc); be_poor(mc);
mc->txn->flags |= MDBX_TXN_ERROR; mc->txn->flags |= MDBX_TXN_ERROR;
@ -293,43 +273,32 @@ cursor_push(MDBX_cursor *mc, page_t *mp, indx_t ki) {
} }
MDBX_MAYBE_UNUSED static inline void cursor_pop(MDBX_cursor *mc) { MDBX_MAYBE_UNUSED static inline void cursor_pop(MDBX_cursor *mc) {
TRACE("popped page %" PRIaPGNO " off db %d cursor %p", mc->pg[mc->top]->pgno, TRACE("popped page %" PRIaPGNO " off db %d cursor %p", mc->pg[mc->top]->pgno, cursor_dbi_dbg(mc),
cursor_dbi_dbg(mc), __Wpedantic_format_voidptr(mc)); __Wpedantic_format_voidptr(mc));
cASSERT(mc, mc->top >= 0); cASSERT(mc, mc->top >= 0);
mc->top -= 1; mc->top -= 1;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_NOTHROW_PURE_FUNCTION static inline bool check_leaf_type(const MDBX_cursor *mc, const page_t *mp) {
check_leaf_type(const MDBX_cursor *mc, const page_t *mp) { return (((page_type(mp) ^ mc->checking) & (z_branch | z_leaf | z_largepage | z_dupfix)) == 0);
return (((page_type(mp) ^ mc->checking) &
(z_branch | z_leaf | z_largepage | z_dupfix)) == 0);
} }
MDBX_INTERNAL void cursor_eot(MDBX_cursor *mc, const bool merge); MDBX_INTERNAL void cursor_eot(MDBX_cursor *mc, const bool merge);
MDBX_INTERNAL int cursor_shadow(MDBX_cursor *parent_cursor, MDBX_INTERNAL int cursor_shadow(MDBX_cursor *parent_cursor, MDBX_txn *nested_txn, const size_t dbi);
MDBX_txn *nested_txn, const size_t dbi);
MDBX_INTERNAL MDBX_cursor *cursor_cpstk(const MDBX_cursor *csrc, MDBX_INTERNAL MDBX_cursor *cursor_cpstk(const MDBX_cursor *csrc, MDBX_cursor *cdst);
MDBX_cursor *cdst);
MDBX_INTERNAL int __must_check_result cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_INTERNAL int __must_check_result cursor_ops(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data,
MDBX_val *data,
const MDBX_cursor_op op); const MDBX_cursor_op op);
MDBX_INTERNAL int __must_check_result cursor_put_checklen(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result cursor_put_checklen(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data,
const MDBX_val *key,
MDBX_val *data,
unsigned flags); unsigned flags);
MDBX_INTERNAL int __must_check_result cursor_put(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, unsigned flags);
const MDBX_val *key,
MDBX_val *data,
unsigned flags);
MDBX_INTERNAL int __must_check_result cursor_check_updating(MDBX_cursor *mc); MDBX_INTERNAL int __must_check_result cursor_check_updating(MDBX_cursor *mc);
MDBX_INTERNAL int __must_check_result cursor_del(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result cursor_del(MDBX_cursor *mc, unsigned flags);
unsigned flags);
MDBX_INTERNAL int __must_check_result cursor_sibling_left(MDBX_cursor *mc); MDBX_INTERNAL int __must_check_result cursor_sibling_left(MDBX_cursor *mc);
MDBX_INTERNAL int __must_check_result cursor_sibling_right(MDBX_cursor *mc); MDBX_INTERNAL int __must_check_result cursor_sibling_right(MDBX_cursor *mc);
@ -339,56 +308,37 @@ typedef struct cursor_set_result {
bool exact; bool exact;
} csr_t; } csr_t;
MDBX_INTERNAL csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_INTERNAL csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op);
MDBX_cursor_op op);
MDBX_INTERNAL int __must_check_result inner_first(MDBX_cursor *__restrict mc, MDBX_INTERNAL int __must_check_result inner_first(MDBX_cursor *__restrict mc, MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result inner_last(MDBX_cursor *__restrict mc, MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result outer_first(MDBX_cursor *__restrict mc, MDBX_val *__restrict key,
MDBX_val *__restrict data); MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result inner_last(MDBX_cursor *__restrict mc, MDBX_INTERNAL int __must_check_result outer_last(MDBX_cursor *__restrict mc, MDBX_val *__restrict key,
MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result outer_first(MDBX_cursor *__restrict mc,
MDBX_val *__restrict key,
MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result outer_last(MDBX_cursor *__restrict mc,
MDBX_val *__restrict key,
MDBX_val *__restrict data); MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result inner_next(MDBX_cursor *__restrict mc, MDBX_INTERNAL int __must_check_result inner_next(MDBX_cursor *__restrict mc, MDBX_val *__restrict data);
MDBX_val *__restrict data); MDBX_INTERNAL int __must_check_result inner_prev(MDBX_cursor *__restrict mc, MDBX_val *__restrict data);
MDBX_INTERNAL int __must_check_result inner_prev(MDBX_cursor *__restrict mc, MDBX_INTERNAL int __must_check_result outer_next(MDBX_cursor *__restrict mc, MDBX_val *__restrict key,
MDBX_val *__restrict data); MDBX_val *__restrict data, MDBX_cursor_op op);
MDBX_INTERNAL int __must_check_result outer_next(MDBX_cursor *__restrict mc, MDBX_INTERNAL int __must_check_result outer_prev(MDBX_cursor *__restrict mc, MDBX_val *__restrict key,
MDBX_val *__restrict key, MDBX_val *__restrict data, MDBX_cursor_op op);
MDBX_val *__restrict data,
MDBX_cursor_op op);
MDBX_INTERNAL int __must_check_result outer_prev(MDBX_cursor *__restrict mc,
MDBX_val *__restrict key,
MDBX_val *__restrict data,
MDBX_cursor_op op);
MDBX_INTERNAL int cursor_init4walk(cursor_couple_t *couple, MDBX_INTERNAL int cursor_init4walk(cursor_couple_t *couple, const MDBX_txn *const txn, tree_t *const tree,
const MDBX_txn *const txn, kvx_t *const kvx);
tree_t *const tree, kvx_t *const kvx);
MDBX_INTERNAL int __must_check_result cursor_init(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result cursor_init(MDBX_cursor *mc, const MDBX_txn *txn, size_t dbi);
const MDBX_txn *txn,
size_t dbi);
MDBX_INTERNAL int __must_check_result cursor_dupsort_setup(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result cursor_dupsort_setup(MDBX_cursor *mc, const node_t *node, const page_t *mp);
const node_t *node,
const page_t *mp);
MDBX_INTERNAL int __must_check_result cursor_touch(MDBX_cursor *const mc, MDBX_INTERNAL int __must_check_result cursor_touch(MDBX_cursor *const mc, const MDBX_val *key, const MDBX_val *data);
const MDBX_val *key,
const MDBX_val *data);
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
/* Update sub-page pointer, if any, in mc->subcur. /* Update sub-page pointer, if any, in mc->subcur.
* Needed when the node which contains the sub-page may have moved. * Needed when the node which contains the sub-page may have moved.
* Called with mp = mc->pg[mc->top], ki = mc->ki[mc->top]. */ * Called with mp = mc->pg[mc->top], ki = mc->ki[mc->top]. */
MDBX_MAYBE_UNUSED static inline void MDBX_MAYBE_UNUSED static inline void cursor_inner_refresh(const MDBX_cursor *mc, const page_t *mp, unsigned ki) {
cursor_inner_refresh(const MDBX_cursor *mc, const page_t *mp, unsigned ki) {
cASSERT(mc, is_leaf(mp)); cASSERT(mc, is_leaf(mp));
const node_t *node = page_node(mp, ki); const node_t *node = page_node(mp, ki);
if ((node_flags(node) & (N_DUP | N_TREE)) == N_DUP) if ((node_flags(node) & (N_DUP | N_TREE)) == N_DUP)

238
src/dbi.c
View File

@ -8,16 +8,14 @@ size_t dbi_bitmap_ctz_fallback(const MDBX_txn *txn, intptr_t bmi) {
tASSERT(txn, bmi > 0); tASSERT(txn, bmi > 0);
bmi &= -bmi; bmi &= -bmi;
if (sizeof(txn->dbi_sparse[0]) > 4) { if (sizeof(txn->dbi_sparse[0]) > 4) {
static const uint8_t debruijn_ctz64[64] = { static const uint8_t debruijn_ctz64[64] = {0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12};
51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12};
return debruijn_ctz64[(UINT64_C(0x022FDD63CC95386D) * (uint64_t)bmi) >> 58]; return debruijn_ctz64[(UINT64_C(0x022FDD63CC95386D) * (uint64_t)bmi) >> 58];
} else { } else {
static const uint8_t debruijn_ctz32[32] = { static const uint8_t debruijn_ctz32[32] = {0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
return debruijn_ctz32[(UINT32_C(0x077CB531) * (uint32_t)bmi) >> 27]; return debruijn_ctz32[(UINT32_C(0x077CB531) * (uint32_t)bmi) >> 27];
} }
} }
@ -45,8 +43,7 @@ __noinline int dbi_import(MDBX_txn *txn, const size_t dbi) {
const size_t bitmap_indx = dbi / bitmap_chunk; const size_t bitmap_indx = dbi / bitmap_chunk;
const size_t bitmap_mask = (size_t)1 << dbi % bitmap_chunk; const size_t bitmap_mask = (size_t)1 << dbi % bitmap_chunk;
if (dbi >= txn->n_dbi) { if (dbi >= txn->n_dbi) {
for (size_t i = (txn->n_dbi + bitmap_chunk - 1) / bitmap_chunk; for (size_t i = (txn->n_dbi + bitmap_chunk - 1) / bitmap_chunk; bitmap_indx >= i; ++i)
bitmap_indx >= i; ++i)
txn->dbi_sparse[i] = 0; txn->dbi_sparse[i] = 0;
eASSERT(env, (txn->dbi_sparse[bitmap_indx] & bitmap_mask) == 0); eASSERT(env, (txn->dbi_sparse[bitmap_indx] & bitmap_mask) == 0);
MDBX_txn *scan = txn; MDBX_txn *scan = txn;
@ -92,8 +89,7 @@ __noinline int dbi_import(MDBX_txn *txn, const size_t dbi) {
int rc = dbi_check(parent, dbi); int rc = dbi_check(parent, dbi);
/* копируем состояние table очищая new-флаги. */ /* копируем состояние table очищая new-флаги. */
eASSERT(env, txn->dbi_seqs == parent->dbi_seqs); eASSERT(env, txn->dbi_seqs == parent->dbi_seqs);
txn->dbi_state[dbi] = txn->dbi_state[dbi] = parent->dbi_state[dbi] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY);
parent->dbi_state[dbi] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
txn->dbs[dbi] = parent->dbs[dbi]; txn->dbs[dbi] = parent->dbs[dbi];
if (parent->cursors[dbi]) { if (parent->cursors[dbi]) {
@ -111,8 +107,7 @@ __noinline int dbi_import(MDBX_txn *txn, const size_t dbi) {
txn->dbi_state[dbi] = DBI_LINDO; txn->dbi_state[dbi] = DBI_LINDO;
} else { } else {
eASSERT(env, txn->dbi_seqs[dbi] != env->dbi_seqs[dbi].weak); eASSERT(env, txn->dbi_seqs[dbi] != env->dbi_seqs[dbi].weak);
if (unlikely((txn->dbi_state[dbi] & (DBI_VALID | DBI_OLDEN)) || if (unlikely((txn->dbi_state[dbi] & (DBI_VALID | DBI_OLDEN)) || txn->cursors[dbi])) {
txn->cursors[dbi])) {
/* хендл уже использовался в транзакции, но был закрыт или переоткрыт, /* хендл уже использовался в транзакции, но был закрыт или переоткрыт,
* либо при явном пере-открытии хендла есть висячие курсоры */ * либо при явном пере-открытии хендла есть висячие курсоры */
eASSERT(env, (txn->dbi_state[dbi] & DBI_STALE) == 0); eASSERT(env, (txn->dbi_state[dbi] & DBI_STALE) == 0);
@ -137,8 +132,7 @@ __noinline int dbi_import(MDBX_txn *txn, const size_t dbi) {
return MDBX_BAD_DBI; return MDBX_BAD_DBI;
} }
static int defer_and_release(MDBX_env *const env, static int defer_and_release(MDBX_env *const env, defer_free_item_t *const chain) {
defer_free_item_t *const chain) {
size_t length = 0; size_t length = 0;
defer_free_item_t *obsolete_chain = nullptr; defer_free_item_t *obsolete_chain = nullptr;
#if MDBX_ENABLE_DBI_LOCKFREE #if MDBX_ENABLE_DBI_LOCKFREE
@ -232,8 +226,7 @@ int dbi_update(MDBX_txn *txn, int keep) {
while ((env->dbs_flags[i - 1] & DB_VALID) == 0) { while ((env->dbs_flags[i - 1] & DB_VALID) == 0) {
--i; --i;
eASSERT(env, i >= CORE_DBS); eASSERT(env, i >= CORE_DBS);
eASSERT(env, !env->dbs_flags[i] && !env->kvs[i].name.iov_len && eASSERT(env, !env->dbs_flags[i] && !env->kvs[i].name.iov_len && !env->kvs[i].name.iov_base);
!env->kvs[i].name.iov_base);
} }
env->n_dbi = (unsigned)i; env->n_dbi = (unsigned)i;
defer_and_release(env, defer_chain); defer_and_release(env, defer_chain);
@ -241,21 +234,17 @@ int dbi_update(MDBX_txn *txn, int keep) {
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags, int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags, MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) {
MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) {
const MDBX_env *const env = txn->env; const MDBX_env *const env = txn->env;
eASSERT(env, dbi < txn->n_dbi && dbi < env->n_dbi); eASSERT(env, dbi < txn->n_dbi && dbi < env->n_dbi);
eASSERT(env, dbi_state(txn, dbi) & DBI_LINDO); eASSERT(env, dbi_state(txn, dbi) & DBI_LINDO);
eASSERT(env, env->dbs_flags[dbi] != DB_POISON); eASSERT(env, env->dbs_flags[dbi] != DB_POISON);
if ((env->dbs_flags[dbi] & DB_VALID) == 0) { if ((env->dbs_flags[dbi] & DB_VALID) == 0) {
eASSERT(env, !env->kvs[dbi].clc.k.cmp && !env->kvs[dbi].clc.v.cmp && eASSERT(env, !env->kvs[dbi].clc.k.cmp && !env->kvs[dbi].clc.v.cmp && !env->kvs[dbi].name.iov_len &&
!env->kvs[dbi].name.iov_len && !env->kvs[dbi].name.iov_base && !env->kvs[dbi].clc.k.lmax && !env->kvs[dbi].clc.k.lmin &&
!env->kvs[dbi].name.iov_base &&
!env->kvs[dbi].clc.k.lmax && !env->kvs[dbi].clc.k.lmin &&
!env->kvs[dbi].clc.v.lmax && !env->kvs[dbi].clc.v.lmin); !env->kvs[dbi].clc.v.lmax && !env->kvs[dbi].clc.v.lmin);
} else { } else {
eASSERT(env, !(txn->dbi_state[dbi] & DBI_VALID) || eASSERT(env, !(txn->dbi_state[dbi] & DBI_VALID) || (txn->dbs[dbi].flags | DB_VALID) == env->dbs_flags[dbi]);
(txn->dbs[dbi].flags | DB_VALID) == env->dbs_flags[dbi]);
eASSERT(env, env->kvs[dbi].name.iov_base || dbi < CORE_DBS); eASSERT(env, env->kvs[dbi].name.iov_base || dbi < CORE_DBS);
} }
@ -271,8 +260,7 @@ int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags,
* 4) user_flags отличаются, но table пустая и задан флаг MDBX_CREATE * 4) user_flags отличаются, но table пустая и задан флаг MDBX_CREATE
* = предполагаем что пользователь пересоздает table; * = предполагаем что пользователь пересоздает table;
*/ */
if ((user_flags & ~MDBX_CREATE) != if ((user_flags & ~MDBX_CREATE) != (unsigned)(env->dbs_flags[dbi] & DB_PERSISTENT_FLAGS)) {
(unsigned)(env->dbs_flags[dbi] & DB_PERSISTENT_FLAGS)) {
/* flags are differs, check other conditions */ /* flags are differs, check other conditions */
if ((!user_flags && (!keycmp || keycmp == env->kvs[dbi].clc.k.cmp) && if ((!user_flags && (!keycmp || keycmp == env->kvs[dbi].clc.k.cmp) &&
(!datacmp || datacmp == env->kvs[dbi].clc.v.cmp)) || (!datacmp || datacmp == env->kvs[dbi].clc.v.cmp)) ||
@ -287,11 +275,8 @@ int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags,
if (unlikely(err == MDBX_SUCCESS)) if (unlikely(err == MDBX_SUCCESS))
return err; return err;
} }
eASSERT(env, ((env->dbs_flags[dbi] ^ txn->dbs[dbi].flags) & eASSERT(env, ((env->dbs_flags[dbi] ^ txn->dbs[dbi].flags) & DB_PERSISTENT_FLAGS) == 0);
DB_PERSISTENT_FLAGS) == 0); eASSERT(env, (txn->dbi_state[dbi] & (DBI_LINDO | DBI_VALID | DBI_STALE)) == (DBI_LINDO | DBI_VALID));
eASSERT(env,
(txn->dbi_state[dbi] & (DBI_LINDO | DBI_VALID | DBI_STALE)) ==
(DBI_LINDO | DBI_VALID));
if (unlikely(txn->dbs[dbi].leaf_pages)) if (unlikely(txn->dbs[dbi].leaf_pages))
return /* FIXME: return extended info */ MDBX_INCOMPATIBLE; return /* FIXME: return extended info */ MDBX_INCOMPATIBLE;
@ -299,13 +284,11 @@ int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags,
if (unlikely(txn->cursors[dbi])) if (unlikely(txn->cursors[dbi]))
return MDBX_DANGLING_DBI; return MDBX_DANGLING_DBI;
env->dbs_flags[dbi] = DB_POISON; env->dbs_flags[dbi] = DB_POISON;
atomic_store32(&env->dbi_seqs[dbi], dbi_seq_next(env, dbi), atomic_store32(&env->dbi_seqs[dbi], dbi_seq_next(env, dbi), mo_AcquireRelease);
mo_AcquireRelease);
const uint32_t seq = dbi_seq_next(env, dbi); const uint32_t seq = dbi_seq_next(env, dbi);
const uint16_t db_flags = user_flags & DB_PERSISTENT_FLAGS; const uint16_t db_flags = user_flags & DB_PERSISTENT_FLAGS;
eASSERT(env, txn->dbs[dbi].height == 0 && txn->dbs[dbi].items == 0 && eASSERT(env, txn->dbs[dbi].height == 0 && txn->dbs[dbi].items == 0 && txn->dbs[dbi].root == P_INVALID);
txn->dbs[dbi].root == P_INVALID);
env->kvs[dbi].clc.k.cmp = keycmp ? keycmp : builtin_keycmp(user_flags); env->kvs[dbi].clc.k.cmp = keycmp ? keycmp : builtin_keycmp(user_flags);
env->kvs[dbi].clc.v.cmp = datacmp ? datacmp : builtin_datacmp(user_flags); env->kvs[dbi].clc.v.cmp = datacmp ? datacmp : builtin_datacmp(user_flags);
txn->dbs[dbi].flags = db_flags; txn->dbs[dbi].flags = db_flags;
@ -325,8 +308,7 @@ int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags,
} }
if (!keycmp) if (!keycmp)
keycmp = (env->dbs_flags[dbi] & DB_VALID) ? env->kvs[dbi].clc.k.cmp keycmp = (env->dbs_flags[dbi] & DB_VALID) ? env->kvs[dbi].clc.k.cmp : builtin_keycmp(user_flags);
: builtin_keycmp(user_flags);
if (env->kvs[dbi].clc.k.cmp != keycmp) { if (env->kvs[dbi].clc.k.cmp != keycmp) {
if (env->dbs_flags[dbi] & DB_VALID) if (env->dbs_flags[dbi] & DB_VALID)
return MDBX_EINVAL; return MDBX_EINVAL;
@ -334,8 +316,7 @@ int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags,
} }
if (!datacmp) if (!datacmp)
datacmp = (env->dbs_flags[dbi] & DB_VALID) ? env->kvs[dbi].clc.v.cmp datacmp = (env->dbs_flags[dbi] & DB_VALID) ? env->kvs[dbi].clc.v.cmp : builtin_datacmp(user_flags);
: builtin_datacmp(user_flags);
if (env->kvs[dbi].clc.v.cmp != datacmp) { if (env->kvs[dbi].clc.v.cmp != datacmp) {
if (env->dbs_flags[dbi] & DB_VALID) if (env->dbs_flags[dbi] & DB_VALID)
return MDBX_EINVAL; return MDBX_EINVAL;
@ -346,19 +327,15 @@ int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags,
} }
static inline size_t dbi_namelen(const MDBX_val name) { static inline size_t dbi_namelen(const MDBX_val name) {
return (name.iov_len > sizeof(defer_free_item_t)) ? name.iov_len return (name.iov_len > sizeof(defer_free_item_t)) ? name.iov_len : sizeof(defer_free_item_t);
: sizeof(defer_free_item_t);
} }
static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi, static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi, MDBX_cmp_func *keycmp,
MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp, MDBX_cmp_func *datacmp, MDBX_val name) {
MDBX_val name) {
MDBX_env *const env = txn->env; MDBX_env *const env = txn->env;
/* Cannot mix named table(s) with DUPSORT flags */ /* Cannot mix named table(s) with DUPSORT flags */
tASSERT(txn, tASSERT(txn, (txn->dbi_state[MAIN_DBI] & (DBI_LINDO | DBI_VALID | DBI_STALE)) == (DBI_LINDO | DBI_VALID));
(txn->dbi_state[MAIN_DBI] & (DBI_LINDO | DBI_VALID | DBI_STALE)) ==
(DBI_LINDO | DBI_VALID));
if (unlikely(txn->dbs[MAIN_DBI].flags & MDBX_DUPSORT)) { if (unlikely(txn->dbs[MAIN_DBI].flags & MDBX_DUPSORT)) {
if (unlikely((user_flags & MDBX_CREATE) == 0)) if (unlikely((user_flags & MDBX_CREATE) == 0))
return MDBX_NOTFOUND; return MDBX_NOTFOUND;
@ -367,18 +344,15 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
return MDBX_INCOMPATIBLE; return MDBX_INCOMPATIBLE;
/* Пересоздаём MainDB когда там пусто. */ /* Пересоздаём MainDB когда там пусто. */
tASSERT(txn, txn->dbs[MAIN_DBI].height == 0 && tASSERT(txn,
txn->dbs[MAIN_DBI].items == 0 && txn->dbs[MAIN_DBI].height == 0 && txn->dbs[MAIN_DBI].items == 0 && txn->dbs[MAIN_DBI].root == P_INVALID);
txn->dbs[MAIN_DBI].root == P_INVALID);
if (unlikely(txn->cursors[MAIN_DBI])) if (unlikely(txn->cursors[MAIN_DBI]))
return MDBX_DANGLING_DBI; return MDBX_DANGLING_DBI;
env->dbs_flags[MAIN_DBI] = DB_POISON; env->dbs_flags[MAIN_DBI] = DB_POISON;
atomic_store32(&env->dbi_seqs[MAIN_DBI], dbi_seq_next(env, MAIN_DBI), atomic_store32(&env->dbi_seqs[MAIN_DBI], dbi_seq_next(env, MAIN_DBI), mo_AcquireRelease);
mo_AcquireRelease);
const uint32_t seq = dbi_seq_next(env, MAIN_DBI); const uint32_t seq = dbi_seq_next(env, MAIN_DBI);
const uint16_t main_flags = const uint16_t main_flags = txn->dbs[MAIN_DBI].flags & (MDBX_REVERSEKEY | MDBX_INTEGERKEY);
txn->dbs[MAIN_DBI].flags & (MDBX_REVERSEKEY | MDBX_INTEGERKEY);
env->kvs[MAIN_DBI].clc.k.cmp = builtin_keycmp(main_flags); env->kvs[MAIN_DBI].clc.k.cmp = builtin_keycmp(main_flags);
env->kvs[MAIN_DBI].clc.v.cmp = builtin_datacmp(main_flags); env->kvs[MAIN_DBI].clc.v.cmp = builtin_datacmp(main_flags);
txn->dbs[MAIN_DBI].flags = main_flags; txn->dbs[MAIN_DBI].flags = main_flags;
@ -391,8 +365,7 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
return err; return err;
} }
env->dbs_flags[MAIN_DBI] = main_flags | DB_VALID; env->dbs_flags[MAIN_DBI] = main_flags | DB_VALID;
txn->dbi_seqs[MAIN_DBI] = txn->dbi_seqs[MAIN_DBI] = atomic_store32(&env->dbi_seqs[MAIN_DBI], seq, mo_AcquireRelease);
atomic_store32(&env->dbi_seqs[MAIN_DBI], seq, mo_AcquireRelease);
txn->dbi_state[MAIN_DBI] |= DBI_DIRTY; txn->dbi_state[MAIN_DBI] |= DBI_DIRTY;
txn->flags |= MDBX_TXN_DIRTY; txn->flags |= MDBX_TXN_DIRTY;
} }
@ -410,8 +383,7 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
if (!env->kvs[MAIN_DBI].clc.k.cmp(&name, &env->kvs[scan].name)) { if (!env->kvs[MAIN_DBI].clc.k.cmp(&name, &env->kvs[scan].name)) {
slot = scan; slot = scan;
int err = dbi_check(txn, slot); int err = dbi_check(txn, slot);
if (err == MDBX_BAD_DBI && if (err == MDBX_BAD_DBI && txn->dbi_state[slot] == (DBI_OLDEN | DBI_LINDO)) {
txn->dbi_state[slot] == (DBI_OLDEN | DBI_LINDO)) {
/* хендл использовался, стал невалидным, /* хендл использовался, стал невалидным,
* но теперь явно пере-открывается в этой транзакци */ * но теперь явно пере-открывается в этой транзакци */
eASSERT(env, !txn->cursors[slot]); eASSERT(env, !txn->cursors[slot]);
@ -433,12 +405,10 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
return MDBX_DBS_FULL; return MDBX_DBS_FULL;
if (env->n_dbi == slot) if (env->n_dbi == slot)
eASSERT(env, !env->dbs_flags[slot] && !env->kvs[slot].name.iov_len && eASSERT(env, !env->dbs_flags[slot] && !env->kvs[slot].name.iov_len && !env->kvs[slot].name.iov_base);
!env->kvs[slot].name.iov_base);
env->dbs_flags[slot] = DB_POISON; env->dbs_flags[slot] = DB_POISON;
atomic_store32(&env->dbi_seqs[slot], dbi_seq_next(env, slot), atomic_store32(&env->dbi_seqs[slot], dbi_seq_next(env, slot), mo_AcquireRelease);
mo_AcquireRelease);
memset(&env->kvs[slot], 0, sizeof(env->kvs[slot])); memset(&env->kvs[slot], 0, sizeof(env->kvs[slot]));
if (env->n_dbi == slot) if (env->n_dbi == slot)
env->n_dbi = (unsigned)slot + 1; env->n_dbi = (unsigned)slot + 1;
@ -461,13 +431,11 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
return rc; return rc;
} else { } else {
/* make sure this is actually a table */ /* make sure this is actually a table */
node_t *node = node_t *node = page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
if (unlikely((node_flags(node) & (N_DUP | N_TREE)) != N_TREE)) if (unlikely((node_flags(node) & (N_DUP | N_TREE)) != N_TREE))
return MDBX_INCOMPATIBLE; return MDBX_INCOMPATIBLE;
if (!MDBX_DISABLE_VALIDATION && unlikely(body.iov_len != sizeof(tree_t))) { if (!MDBX_DISABLE_VALIDATION && unlikely(body.iov_len != sizeof(tree_t))) {
ERROR("%s/%d: %s %zu", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %zu", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid table node size", body.iov_len);
"invalid table node size", body.iov_len);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
memcpy(&txn->dbs[slot], body.iov_base, sizeof(tree_t)); memcpy(&txn->dbs[slot], body.iov_base, sizeof(tree_t));
@ -490,8 +458,7 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
txn->dbs[slot].flags = user_flags & DB_PERSISTENT_FLAGS; txn->dbs[slot].flags = user_flags & DB_PERSISTENT_FLAGS;
cx.outer.next = txn->cursors[MAIN_DBI]; cx.outer.next = txn->cursors[MAIN_DBI];
txn->cursors[MAIN_DBI] = &cx.outer; txn->cursors[MAIN_DBI] = &cx.outer;
rc = rc = cursor_put_checklen(&cx.outer, &name, &body, N_TREE | MDBX_NOOVERWRITE);
cursor_put_checklen(&cx.outer, &name, &body, N_TREE | MDBX_NOOVERWRITE);
txn->cursors[MAIN_DBI] = cx.outer.next; txn->cursors[MAIN_DBI] = cx.outer.next;
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -503,9 +470,8 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
/* Got info, register DBI in this txn */ /* Got info, register DBI in this txn */
const uint32_t seq = dbi_seq_next(env, slot); const uint32_t seq = dbi_seq_next(env, slot);
eASSERT(env, eASSERT(env, env->dbs_flags[slot] == DB_POISON && !txn->cursors[slot] &&
env->dbs_flags[slot] == DB_POISON && !txn->cursors[slot] && (txn->dbi_state[slot] & (DBI_LINDO | DBI_VALID)) == DBI_LINDO);
(txn->dbi_state[slot] & (DBI_LINDO | DBI_VALID)) == DBI_LINDO);
txn->dbi_state[slot] = dbi_state; txn->dbi_state[slot] = dbi_state;
memcpy(&txn->dbs[slot], body.iov_base, sizeof(txn->dbs[slot])); memcpy(&txn->dbs[slot], body.iov_base, sizeof(txn->dbs[slot]));
env->dbs_flags[slot] = txn->dbs[slot].flags; env->dbs_flags[slot] = txn->dbs[slot].flags;
@ -515,8 +481,7 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi,
env->kvs[slot].name = name; env->kvs[slot].name = name;
env->dbs_flags[slot] = txn->dbs[slot].flags | DB_VALID; env->dbs_flags[slot] = txn->dbs[slot].flags | DB_VALID;
txn->dbi_seqs[slot] = txn->dbi_seqs[slot] = atomic_store32(&env->dbi_seqs[slot], seq, mo_AcquireRelease);
atomic_store32(&env->dbi_seqs[slot], seq, mo_AcquireRelease);
done: done:
*dbi = (MDBX_dbi)slot; *dbi = (MDBX_dbi)slot;
@ -525,8 +490,7 @@ done:
return MDBX_SUCCESS; return MDBX_SUCCESS;
bailout: bailout:
eASSERT(env, !txn->cursors[slot] && !env->kvs[slot].name.iov_len && eASSERT(env, !txn->cursors[slot] && !env->kvs[slot].name.iov_len && !env->kvs[slot].name.iov_base);
!env->kvs[slot].name.iov_base);
txn->dbi_state[slot] &= DBI_LINDO | DBI_OLDEN; txn->dbi_state[slot] &= DBI_LINDO | DBI_OLDEN;
env->dbs_flags[slot] = 0; env->dbs_flags[slot] = 0;
osal_free(clone); osal_free(clone);
@ -535,14 +499,13 @@ bailout:
return rc; return rc;
} }
int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags, int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags, MDBX_dbi *dbi, MDBX_cmp_func *keycmp,
MDBX_dbi *dbi, MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) { MDBX_cmp_func *datacmp) {
if (unlikely(!dbi)) if (unlikely(!dbi))
return MDBX_EINVAL; return MDBX_EINVAL;
*dbi = 0; *dbi = 0;
if (user_flags != MDBX_ACCEDE && if (user_flags != MDBX_ACCEDE && unlikely(!check_table_flags(user_flags & ~MDBX_CREATE)))
unlikely(!check_table_flags(user_flags & ~MDBX_CREATE)))
return MDBX_EINVAL; return MDBX_EINVAL;
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
@ -567,8 +530,7 @@ int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags,
} }
if (unlikely(name == MDBX_CHK_META || name->iov_base == MDBX_CHK_META)) if (unlikely(name == MDBX_CHK_META || name->iov_base == MDBX_CHK_META))
return MDBX_EINVAL; return MDBX_EINVAL;
if (unlikely(name->iov_len > if (unlikely(name->iov_len > txn->env->leaf_nodemax - NODESIZE - sizeof(tree_t)))
txn->env->leaf_nodemax - NODESIZE - sizeof(tree_t)))
return MDBX_EINVAL; return MDBX_EINVAL;
#if MDBX_ENABLE_DBI_LOCKFREE #if MDBX_ENABLE_DBI_LOCKFREE
@ -582,31 +544,24 @@ int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags,
continue; continue;
} }
const uint32_t snap_seq = const uint32_t snap_seq = atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease);
atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease);
const uint16_t snap_flags = env->dbs_flags[i]; const uint16_t snap_flags = env->dbs_flags[i];
const MDBX_val snap_name = env->kvs[i].name; const MDBX_val snap_name = env->kvs[i].name;
if (user_flags != MDBX_ACCEDE && if (user_flags != MDBX_ACCEDE &&
(((user_flags ^ snap_flags) & DB_PERSISTENT_FLAGS) || (((user_flags ^ snap_flags) & DB_PERSISTENT_FLAGS) || (keycmp && keycmp != env->kvs[i].clc.k.cmp) ||
(keycmp && keycmp != env->kvs[i].clc.k.cmp) ||
(datacmp && datacmp != env->kvs[i].clc.v.cmp))) (datacmp && datacmp != env->kvs[i].clc.v.cmp)))
continue; continue;
const uint32_t main_seq = const uint32_t main_seq = atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease);
atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease);
MDBX_cmp_func *const snap_cmp = env->kvs[MAIN_DBI].clc.k.cmp; MDBX_cmp_func *const snap_cmp = env->kvs[MAIN_DBI].clc.k.cmp;
if (unlikely(!(snap_flags & DB_VALID) || !snap_name.iov_base || if (unlikely(!(snap_flags & DB_VALID) || !snap_name.iov_base || !snap_name.iov_len || !snap_cmp))
!snap_name.iov_len || !snap_cmp))
continue; continue;
const bool name_match = snap_cmp(&snap_name, name) == 0; const bool name_match = snap_cmp(&snap_name, name) == 0;
osal_flush_incoherent_cpu_writeback(); osal_flush_incoherent_cpu_writeback();
if (unlikely( if (unlikely(snap_seq != atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease) ||
snap_seq != atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease) || main_seq != atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease) ||
main_seq != snap_flags != env->dbs_flags[i] || snap_name.iov_base != env->kvs[i].name.iov_base ||
atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease) || snap_name.iov_len != env->kvs[i].name.iov_len))
snap_flags != env->dbs_flags[i] ||
snap_name.iov_base != env->kvs[i].name.iov_base ||
snap_name.iov_len != env->kvs[i].name.iov_len))
goto retry; goto retry;
if (name_match) { if (name_match) {
rc = dbi_check(txn, i); rc = dbi_check(txn, i);
@ -634,18 +589,15 @@ int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags,
rc = osal_fastmutex_acquire(&txn->env->dbi_lock); rc = osal_fastmutex_acquire(&txn->env->dbi_lock);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
rc = dbi_open_locked(txn, user_flags, dbi, keycmp, datacmp, *name); rc = dbi_open_locked(txn, user_flags, dbi, keycmp, datacmp, *name);
ENSURE(txn->env, ENSURE(txn->env, osal_fastmutex_release(&txn->env->dbi_lock) == MDBX_SUCCESS);
osal_fastmutex_release(&txn->env->dbi_lock) == MDBX_SUCCESS);
} }
return rc; return rc;
} }
static int dbi_open_cstr(MDBX_txn *txn, const char *name_cstr, static int dbi_open_cstr(MDBX_txn *txn, const char *name_cstr, MDBX_db_flags_t flags, MDBX_dbi *dbi,
MDBX_db_flags_t flags, MDBX_dbi *dbi,
MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) { MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) {
MDBX_val thunk, *name; MDBX_val thunk, *name;
if (name_cstr == MDBX_CHK_MAIN || name_cstr == MDBX_CHK_GC || if (name_cstr == MDBX_CHK_MAIN || name_cstr == MDBX_CHK_GC || name_cstr == MDBX_CHK_META)
name_cstr == MDBX_CHK_META)
name = (void *)name_cstr; name = (void *)name_cstr;
else { else {
thunk.iov_len = strlen(name_cstr); thunk.iov_len = strlen(name_cstr);
@ -660,8 +612,7 @@ struct dbi_rename_result {
int err; int err;
}; };
__cold static struct dbi_rename_result __cold static struct dbi_rename_result dbi_rename_locked(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val new_name) {
dbi_rename_locked(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val new_name) {
struct dbi_rename_result pair; struct dbi_rename_result pair;
pair.defer = nullptr; pair.defer = nullptr;
pair.err = dbi_check(txn, dbi); pair.err = dbi_check(txn, dbi);
@ -670,8 +621,7 @@ dbi_rename_locked(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val new_name) {
MDBX_env *const env = txn->env; MDBX_env *const env = txn->env;
MDBX_val old_name = env->kvs[dbi].name; MDBX_val old_name = env->kvs[dbi].name;
if (env->kvs[MAIN_DBI].clc.k.cmp(&new_name, &old_name) == 0 && if (env->kvs[MAIN_DBI].clc.k.cmp(&new_name, &old_name) == 0 && MDBX_DEBUG == 0)
MDBX_DEBUG == 0)
return pair; return pair;
cursor_couple_t cx; cursor_couple_t cx;
@ -695,8 +645,7 @@ dbi_rename_locked(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val new_name) {
txn->cursors[MAIN_DBI] = &cx.outer; txn->cursors[MAIN_DBI] = &cx.outer;
MDBX_val data = {&txn->dbs[dbi], sizeof(tree_t)}; MDBX_val data = {&txn->dbs[dbi], sizeof(tree_t)};
pair.err = cursor_put_checklen(&cx.outer, &new_name, &data, pair.err = cursor_put_checklen(&cx.outer, &new_name, &data, N_TREE | MDBX_NOOVERWRITE);
N_TREE | MDBX_NOOVERWRITE);
if (likely(pair.err == MDBX_SUCCESS)) { if (likely(pair.err == MDBX_SUCCESS)) {
pair.err = cursor_seek(&cx.outer, &old_name, nullptr, MDBX_SET).err; pair.err = cursor_seek(&cx.outer, &old_name, nullptr, MDBX_SET).err;
if (likely(pair.err == MDBX_SUCCESS)) if (likely(pair.err == MDBX_SUCCESS))
@ -732,8 +681,7 @@ static defer_free_item_t *dbi_close_locked(MDBX_env *env, MDBX_dbi dbi) {
do { do {
--i; --i;
eASSERT(env, i >= CORE_DBS); eASSERT(env, i >= CORE_DBS);
eASSERT(env, !env->dbs_flags[i] && !env->kvs[i].name.iov_len && eASSERT(env, !env->dbs_flags[i] && !env->kvs[i].name.iov_len && !env->kvs[i].name.iov_base);
!env->kvs[i].name.iov_base);
} while (i > CORE_DBS && !env->kvs[i - 1].name.iov_base); } while (i > CORE_DBS && !env->kvs[i - 1].name.iov_base);
env->n_dbi = (unsigned)i; env->n_dbi = (unsigned)i;
} }
@ -745,25 +693,21 @@ static defer_free_item_t *dbi_close_locked(MDBX_env *env, MDBX_dbi dbi) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
/* API */ /* API */
int mdbx_dbi_open(MDBX_txn *txn, const char *name, MDBX_db_flags_t flags, int mdbx_dbi_open(MDBX_txn *txn, const char *name, MDBX_db_flags_t flags, MDBX_dbi *dbi) {
MDBX_dbi *dbi) {
return LOG_IFERR(dbi_open_cstr(txn, name, flags, dbi, nullptr, nullptr)); return LOG_IFERR(dbi_open_cstr(txn, name, flags, dbi, nullptr, nullptr));
} }
int mdbx_dbi_open2(MDBX_txn *txn, const MDBX_val *name, MDBX_db_flags_t flags, int mdbx_dbi_open2(MDBX_txn *txn, const MDBX_val *name, MDBX_db_flags_t flags, MDBX_dbi *dbi) {
MDBX_dbi *dbi) {
return LOG_IFERR(dbi_open(txn, name, flags, dbi, nullptr, nullptr)); return LOG_IFERR(dbi_open(txn, name, flags, dbi, nullptr, nullptr));
} }
int mdbx_dbi_open_ex(MDBX_txn *txn, const char *name, MDBX_db_flags_t flags, int mdbx_dbi_open_ex(MDBX_txn *txn, const char *name, MDBX_db_flags_t flags, MDBX_dbi *dbi, MDBX_cmp_func *keycmp,
MDBX_dbi *dbi, MDBX_cmp_func *keycmp,
MDBX_cmp_func *datacmp) { MDBX_cmp_func *datacmp) {
return LOG_IFERR(dbi_open_cstr(txn, name, flags, dbi, keycmp, datacmp)); return LOG_IFERR(dbi_open_cstr(txn, name, flags, dbi, keycmp, datacmp));
} }
int mdbx_dbi_open_ex2(MDBX_txn *txn, const MDBX_val *name, int mdbx_dbi_open_ex2(MDBX_txn *txn, const MDBX_val *name, MDBX_db_flags_t flags, MDBX_dbi *dbi, MDBX_cmp_func *keycmp,
MDBX_db_flags_t flags, MDBX_dbi *dbi, MDBX_cmp_func *datacmp) {
MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) {
return LOG_IFERR(dbi_open(txn, name, flags, dbi, keycmp, datacmp)); return LOG_IFERR(dbi_open(txn, name, flags, dbi, keycmp, datacmp));
} }
@ -780,8 +724,7 @@ __cold int mdbx_drop(MDBX_txn *txn, MDBX_dbi dbi, bool del) {
if (txn->dbs[dbi].height) { if (txn->dbs[dbi].height) {
cx.outer.next = txn->cursors[dbi]; cx.outer.next = txn->cursors[dbi];
txn->cursors[dbi] = &cx.outer; txn->cursors[dbi] = &cx.outer;
rc = tree_drop(&cx.outer, rc = tree_drop(&cx.outer, dbi == MAIN_DBI || (cx.outer.tree->flags & MDBX_DUPSORT));
dbi == MAIN_DBI || (cx.outer.tree->flags & MDBX_DUPSORT));
txn->cursors[dbi] = cx.outer.next; txn->cursors[dbi] = cx.outer.next;
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -832,8 +775,7 @@ __cold int mdbx_drop(MDBX_txn *txn, MDBX_dbi dbi, bool del) {
__cold int mdbx_dbi_rename(MDBX_txn *txn, MDBX_dbi dbi, const char *name_cstr) { __cold int mdbx_dbi_rename(MDBX_txn *txn, MDBX_dbi dbi, const char *name_cstr) {
MDBX_val thunk, *name; MDBX_val thunk, *name;
if (name_cstr == MDBX_CHK_MAIN || name_cstr == MDBX_CHK_GC || if (name_cstr == MDBX_CHK_MAIN || name_cstr == MDBX_CHK_GC || name_cstr == MDBX_CHK_META)
name_cstr == MDBX_CHK_META)
name = (void *)name_cstr; name = (void *)name_cstr;
else { else {
thunk.iov_len = strlen(name_cstr); thunk.iov_len = strlen(name_cstr);
@ -860,8 +802,7 @@ int mdbx_dbi_close(MDBX_env *env, MDBX_dbi dbi) {
rc = osal_fastmutex_acquire(&env->dbi_lock); rc = osal_fastmutex_acquire(&env->dbi_lock);
if (likely(rc == MDBX_SUCCESS && dbi < env->n_dbi)) { if (likely(rc == MDBX_SUCCESS && dbi < env->n_dbi)) {
retry: retry:
if (env->basal_txn && (env->dbs_flags[dbi] & DB_VALID) && if (env->basal_txn && (env->dbs_flags[dbi] & DB_VALID) && (env->basal_txn->flags & MDBX_TXN_FINISHED) == 0) {
(env->basal_txn->flags & MDBX_TXN_FINISHED) == 0) {
/* LY: Опасный код, так как env->txn может быть изменено в другом потоке. /* LY: Опасный код, так как env->txn может быть изменено в другом потоке.
* К сожалению тут нет надежного решения и может быть падение при неверном * К сожалению тут нет надежного решения и может быть падение при неверном
* использовании API (вызове mdbx_dbi_close конкурентно с завершением * использовании API (вызове mdbx_dbi_close конкурентно с завершением
@ -884,8 +825,7 @@ int mdbx_dbi_close(MDBX_env *env, MDBX_dbi dbi) {
* транзакции, и поэтому этот путь потенциально более опасен. */ * транзакции, и поэтому этот путь потенциально более опасен. */
const MDBX_txn *const hazard = env->txn; const MDBX_txn *const hazard = env->txn;
osal_compiler_barrier(); osal_compiler_barrier();
if ((dbi_state(env->basal_txn, dbi) & if ((dbi_state(env->basal_txn, dbi) & (DBI_LINDO | DBI_DIRTY | DBI_CREAT)) > DBI_LINDO) {
(DBI_LINDO | DBI_DIRTY | DBI_CREAT)) > DBI_LINDO) {
bailout_dirty_dbi: bailout_dirty_dbi:
osal_fastmutex_release(&env->dbi_lock); osal_fastmutex_release(&env->dbi_lock);
return LOG_IFERR(MDBX_DANGLING_DBI); return LOG_IFERR(MDBX_DANGLING_DBI);
@ -893,11 +833,9 @@ int mdbx_dbi_close(MDBX_env *env, MDBX_dbi dbi) {
osal_memory_barrier(); osal_memory_barrier();
if (unlikely(hazard != env->txn)) if (unlikely(hazard != env->txn))
goto retry; goto retry;
if (hazard != env->basal_txn && hazard && if (hazard != env->basal_txn && hazard && (hazard->flags & MDBX_TXN_FINISHED) == 0 &&
(hazard->flags & MDBX_TXN_FINISHED) == 0 &&
hazard->signature == txn_signature && hazard->signature == txn_signature &&
(dbi_state(hazard, dbi) & (DBI_LINDO | DBI_DIRTY | DBI_CREAT)) > (dbi_state(hazard, dbi) & (DBI_LINDO | DBI_DIRTY | DBI_CREAT)) > DBI_LINDO)
DBI_LINDO)
goto bailout_dirty_dbi; goto bailout_dirty_dbi;
osal_compiler_barrier(); osal_compiler_barrier();
if (unlikely(hazard != env->txn)) if (unlikely(hazard != env->txn))
@ -908,8 +846,7 @@ int mdbx_dbi_close(MDBX_env *env, MDBX_dbi dbi) {
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
int mdbx_dbi_flags_ex(const MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags, int mdbx_dbi_flags_ex(const MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags, unsigned *state) {
unsigned *state) {
if (unlikely(!flags || !state)) if (unlikely(!flags || !state))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -928,21 +865,17 @@ int mdbx_dbi_flags_ex(const MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags,
} }
*flags = txn->dbs[dbi].flags & DB_PERSISTENT_FLAGS; *flags = txn->dbs[dbi].flags & DB_PERSISTENT_FLAGS;
*state = *state = txn->dbi_state[dbi] & (DBI_FRESH | DBI_CREAT | DBI_DIRTY | DBI_STALE);
txn->dbi_state[dbi] & (DBI_FRESH | DBI_CREAT | DBI_DIRTY | DBI_STALE);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold int mdbx_dbi_rename2(MDBX_txn *txn, MDBX_dbi dbi, __cold int mdbx_dbi_rename2(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *new_name) {
const MDBX_val *new_name) {
int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED); int rc = check_txn_rw(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
if (unlikely(new_name == MDBX_CHK_MAIN || if (unlikely(new_name == MDBX_CHK_MAIN || new_name->iov_base == MDBX_CHK_MAIN || new_name == MDBX_CHK_GC ||
new_name->iov_base == MDBX_CHK_MAIN || new_name == MDBX_CHK_GC || new_name->iov_base == MDBX_CHK_GC || new_name == MDBX_CHK_META || new_name->iov_base == MDBX_CHK_META))
new_name->iov_base == MDBX_CHK_GC || new_name == MDBX_CHK_META ||
new_name->iov_base == MDBX_CHK_META))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(dbi < CORE_DBS)) if (unlikely(dbi < CORE_DBS))
@ -968,13 +901,11 @@ static void stat_get(const tree_t *db, MDBX_stat *st, size_t bytes) {
st->ms_leaf_pages = db->leaf_pages; st->ms_leaf_pages = db->leaf_pages;
st->ms_overflow_pages = db->large_pages; st->ms_overflow_pages = db->large_pages;
st->ms_entries = db->items; st->ms_entries = db->items;
if (likely(bytes >= if (likely(bytes >= offsetof(MDBX_stat, ms_mod_txnid) + sizeof(st->ms_mod_txnid)))
offsetof(MDBX_stat, ms_mod_txnid) + sizeof(st->ms_mod_txnid)))
st->ms_mod_txnid = db->mod_txnid; st->ms_mod_txnid = db->mod_txnid;
} }
__cold int mdbx_dbi_stat(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_stat *dest, __cold int mdbx_dbi_stat(const MDBX_txn *txn, MDBX_dbi dbi, MDBX_stat *dest, size_t bytes) {
size_t bytes) {
if (unlikely(!dest)) if (unlikely(!dest))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -1012,8 +943,7 @@ bailout:
return LOG_IFERR(rc); return LOG_IFERR(rc);
} }
__cold const tree_t *dbi_dig(const MDBX_txn *txn, const size_t dbi, __cold const tree_t *dbi_dig(const MDBX_txn *txn, const size_t dbi, tree_t *fallback) {
tree_t *fallback) {
const MDBX_txn *dig = txn; const MDBX_txn *dig = txn;
do { do {
tASSERT(txn, txn->n_dbi == dig->n_dbi); tASSERT(txn, txn->n_dbi == dig->n_dbi);
@ -1036,8 +966,7 @@ __cold const tree_t *dbi_dig(const MDBX_txn *txn, const size_t dbi,
return fallback; return fallback;
} }
__cold int mdbx_enumerate_tables(const MDBX_txn *txn, __cold int mdbx_enumerate_tables(const MDBX_txn *txn, MDBX_table_enum_func *func, void *ctx) {
MDBX_table_enum_func *func, void *ctx) {
if (unlikely(!func)) if (unlikely(!func))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
@ -1054,13 +983,12 @@ __cold int mdbx_enumerate_tables(const MDBX_txn *txn,
txn->cursors[MAIN_DBI] = &cx.outer; txn->cursors[MAIN_DBI] = &cx.outer;
for (rc = outer_first(&cx.outer, nullptr, nullptr); rc == MDBX_SUCCESS; for (rc = outer_first(&cx.outer, nullptr, nullptr); rc == MDBX_SUCCESS;
rc = outer_next(&cx.outer, nullptr, nullptr, MDBX_NEXT_NODUP)) { rc = outer_next(&cx.outer, nullptr, nullptr, MDBX_NEXT_NODUP)) {
node_t *node = node_t *node = page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
page_node(cx.outer.pg[cx.outer.top], cx.outer.ki[cx.outer.top]);
if (node_flags(node) != N_TREE) if (node_flags(node) != N_TREE)
continue; continue;
if (unlikely(node_ds(node) != sizeof(tree_t))) { if (unlikely(node_ds(node) != sizeof(tree_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid dupsort sub-tree node size",
"invalid dupsort sub-tree node size", (unsigned)node_ds(node)); (unsigned)node_ds(node));
rc = MDBX_CORRUPTED; rc = MDBX_CORRUPTED;
break; break;
} }

View File

@ -7,8 +7,8 @@
#if MDBX_ENABLE_DBI_SPARSE #if MDBX_ENABLE_DBI_SPARSE
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED MDBX_INTERNAL size_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED MDBX_INTERNAL size_t dbi_bitmap_ctz_fallback(const MDBX_txn *txn,
dbi_bitmap_ctz_fallback(const MDBX_txn *txn, intptr_t bmi); intptr_t bmi);
static inline size_t dbi_bitmap_ctz(const MDBX_txn *txn, intptr_t bmi) { static inline size_t dbi_bitmap_ctz(const MDBX_txn *txn, intptr_t bmi) {
tASSERT(txn, bmi > 0); tASSERT(txn, bmi > 0);
@ -18,8 +18,7 @@ static inline size_t dbi_bitmap_ctz(const MDBX_txn *txn, intptr_t bmi) {
return __builtin_ctz((int)bmi); return __builtin_ctz((int)bmi);
if (sizeof(txn->dbi_sparse[0]) == sizeof(long)) if (sizeof(txn->dbi_sparse[0]) == sizeof(long))
return __builtin_ctzl((long)bmi); return __builtin_ctzl((long)bmi);
#if (defined(__SIZEOF_LONG_LONG__) && __SIZEOF_LONG_LONG__ == 8) || \ #if (defined(__SIZEOF_LONG_LONG__) && __SIZEOF_LONG_LONG__ == 8) || __has_builtin(__builtin_ctzll)
__has_builtin(__builtin_ctzll)
return __builtin_ctzll(bmi); return __builtin_ctzll(bmi);
#endif /* have(long long) && long long == uint64_t */ #endif /* have(long long) && long long == uint64_t */
#endif /* GNU C */ #endif /* GNU C */
@ -46,27 +45,27 @@ static inline size_t dbi_bitmap_ctz(const MDBX_txn *txn, intptr_t bmi) {
/* LY: Макрос целенаправленно сделан с одним циклом, чтобы сохранить возможность /* LY: Макрос целенаправленно сделан с одним циклом, чтобы сохранить возможность
* использования оператора break */ * использования оператора break */
#define TXN_FOREACH_DBI_FROM(TXN, I, FROM) \ #define TXN_FOREACH_DBI_FROM(TXN, I, FROM) \
for (size_t bitmap_chunk = CHAR_BIT * sizeof(TXN->dbi_sparse[0]), \ for (size_t bitmap_chunk = CHAR_BIT * sizeof(TXN->dbi_sparse[0]), bitmap_item = TXN->dbi_sparse[0] >> FROM, \
bitmap_item = TXN->dbi_sparse[0] >> FROM, I = FROM; \ I = FROM; \
I < TXN->n_dbi; ++I) \ I < TXN->n_dbi; ++I) \
if (bitmap_item == 0) { \ if (bitmap_item == 0) { \
I = (I - 1) | (bitmap_chunk - 1); \ I = (I - 1) | (bitmap_chunk - 1); \
bitmap_item = TXN->dbi_sparse[(1 + I) / bitmap_chunk]; \ bitmap_item = TXN->dbi_sparse[(1 + I) / bitmap_chunk]; \
if (!bitmap_item) \ if (!bitmap_item) \
I += bitmap_chunk; \ I += bitmap_chunk; \
continue; \ continue; \
} else if ((bitmap_item & 1) == 0) { \ } else if ((bitmap_item & 1) == 0) { \
size_t bitmap_skip = dbi_bitmap_ctz(txn, bitmap_item); \ size_t bitmap_skip = dbi_bitmap_ctz(txn, bitmap_item); \
bitmap_item >>= bitmap_skip; \ bitmap_item >>= bitmap_skip; \
I += bitmap_skip - 1; \ I += bitmap_skip - 1; \
continue; \ continue; \
} else if (bitmap_item >>= 1, TXN->dbi_state[I]) } else if (bitmap_item >>= 1, TXN->dbi_state[I])
#else #else
#define TXN_FOREACH_DBI_FROM(TXN, I, SKIP) \ #define TXN_FOREACH_DBI_FROM(TXN, I, SKIP) \
for (size_t I = SKIP; I < TXN->n_dbi; ++I) \ for (size_t I = SKIP; I < TXN->n_dbi; ++I) \
if (TXN->dbi_state[I]) if (TXN->dbi_state[I])
#endif /* MDBX_ENABLE_DBI_SPARSE */ #endif /* MDBX_ENABLE_DBI_SPARSE */
@ -80,24 +79,19 @@ struct dbi_snap_result {
uint32_t sequence; uint32_t sequence;
unsigned flags; unsigned flags;
}; };
MDBX_INTERNAL struct dbi_snap_result dbi_snap(const MDBX_env *env, MDBX_INTERNAL struct dbi_snap_result dbi_snap(const MDBX_env *env, const size_t dbi);
const size_t dbi);
MDBX_INTERNAL int dbi_update(MDBX_txn *txn, int keep); MDBX_INTERNAL int dbi_update(MDBX_txn *txn, int keep);
static inline uint8_t dbi_state(const MDBX_txn *txn, const size_t dbi) { static inline uint8_t dbi_state(const MDBX_txn *txn, const size_t dbi) {
STATIC_ASSERT( STATIC_ASSERT((int)DBI_DIRTY == MDBX_DBI_DIRTY && (int)DBI_STALE == MDBX_DBI_STALE &&
(int)DBI_DIRTY == MDBX_DBI_DIRTY && (int)DBI_STALE == MDBX_DBI_STALE && (int)DBI_FRESH == MDBX_DBI_FRESH && (int)DBI_CREAT == MDBX_DBI_CREAT);
(int)DBI_FRESH == MDBX_DBI_FRESH && (int)DBI_CREAT == MDBX_DBI_CREAT);
#if MDBX_ENABLE_DBI_SPARSE #if MDBX_ENABLE_DBI_SPARSE
const size_t bitmap_chunk = CHAR_BIT * sizeof(txn->dbi_sparse[0]); const size_t bitmap_chunk = CHAR_BIT * sizeof(txn->dbi_sparse[0]);
const size_t bitmap_indx = dbi / bitmap_chunk; const size_t bitmap_indx = dbi / bitmap_chunk;
const size_t bitmap_mask = (size_t)1 << dbi % bitmap_chunk; const size_t bitmap_mask = (size_t)1 << dbi % bitmap_chunk;
return likely(dbi < txn->n_dbi && return likely(dbi < txn->n_dbi && (txn->dbi_sparse[bitmap_indx] & bitmap_mask) != 0) ? txn->dbi_state[dbi] : 0;
(txn->dbi_sparse[bitmap_indx] & bitmap_mask) != 0)
? txn->dbi_state[dbi]
: 0;
#else #else
return likely(dbi < txn->n_dbi) ? txn->dbi_state[dbi] : 0; return likely(dbi < txn->n_dbi) ? txn->dbi_state[dbi] : 0;
#endif /* MDBX_ENABLE_DBI_SPARSE */ #endif /* MDBX_ENABLE_DBI_SPARSE */
@ -106,8 +100,7 @@ static inline uint8_t dbi_state(const MDBX_txn *txn, const size_t dbi) {
static inline bool dbi_changed(const MDBX_txn *txn, const size_t dbi) { static inline bool dbi_changed(const MDBX_txn *txn, const size_t dbi) {
const MDBX_env *const env = txn->env; const MDBX_env *const env = txn->env;
eASSERT(env, dbi_state(txn, dbi) & DBI_LINDO); eASSERT(env, dbi_state(txn, dbi) & DBI_LINDO);
const uint32_t snap_seq = const uint32_t snap_seq = atomic_load32(&env->dbi_seqs[dbi], mo_AcquireRelease);
atomic_load32(&env->dbi_seqs[dbi], mo_AcquireRelease);
return snap_seq != txn->dbi_seqs[dbi]; return snap_seq != txn->dbi_seqs[dbi];
} }
@ -125,12 +118,10 @@ static inline uint32_t dbi_seq_next(const MDBX_env *const env, size_t dbi) {
return v ? v : 1; return v ? v : 1;
} }
MDBX_INTERNAL int dbi_open(MDBX_txn *txn, const MDBX_val *const name, MDBX_INTERNAL int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags, MDBX_dbi *dbi,
unsigned user_flags, MDBX_dbi *dbi,
MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp); MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp);
MDBX_INTERNAL int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags, MDBX_INTERNAL int dbi_bind(MDBX_txn *txn, const size_t dbi, unsigned user_flags, MDBX_cmp_func *keycmp,
MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp); MDBX_cmp_func *datacmp);
MDBX_INTERNAL const tree_t *dbi_dig(const MDBX_txn *txn, const size_t dbi, MDBX_INTERNAL const tree_t *dbi_dig(const MDBX_txn *txn, const size_t dbi, tree_t *fallback);
tree_t *fallback);

View File

@ -9,28 +9,22 @@
#pragma push_macro("eASSERT") #pragma push_macro("eASSERT")
#undef TRACE #undef TRACE
#define TRACE(fmt, ...) \ #define TRACE(fmt, ...) debug_log(MDBX_LOG_TRACE, __func__, __LINE__, fmt "\n", __VA_ARGS__)
debug_log(MDBX_LOG_TRACE, __func__, __LINE__, fmt "\n", __VA_ARGS__)
#undef DEBUG #undef DEBUG
#define DEBUG(fmt, ...) \ #define DEBUG(fmt, ...) debug_log(MDBX_LOG_DEBUG, __func__, __LINE__, fmt "\n", __VA_ARGS__)
debug_log(MDBX_LOG_DEBUG, __func__, __LINE__, fmt "\n", __VA_ARGS__)
#undef VERBOSE #undef VERBOSE
#define VERBOSE(fmt, ...) \ #define VERBOSE(fmt, ...) debug_log(MDBX_LOG_VERBOSE, __func__, __LINE__, fmt "\n", __VA_ARGS__)
debug_log(MDBX_LOG_VERBOSE, __func__, __LINE__, fmt "\n", __VA_ARGS__)
#undef NOTICE #undef NOTICE
#define NOTICE(fmt, ...) \ #define NOTICE(fmt, ...) debug_log(MDBX_LOG_NOTICE, __func__, __LINE__, fmt "\n", __VA_ARGS__)
debug_log(MDBX_LOG_NOTICE, __func__, __LINE__, fmt "\n", __VA_ARGS__)
#undef WARNING #undef WARNING
#define WARNING(fmt, ...) \ #define WARNING(fmt, ...) debug_log(MDBX_LOG_WARN, __func__, __LINE__, fmt "\n", __VA_ARGS__)
debug_log(MDBX_LOG_WARN, __func__, __LINE__, fmt "\n", __VA_ARGS__)
#undef ERROR #undef ERROR
#define ERROR(fmt, ...) \ #define ERROR(fmt, ...) debug_log(MDBX_LOG_ERROR, __func__, __LINE__, fmt "\n", __VA_ARGS__)
debug_log(MDBX_LOG_ERROR, __func__, __LINE__, fmt "\n", __VA_ARGS__)
#undef eASSERT #undef eASSERT
#define eASSERT(env, expr) ENSURE(env, expr) #define eASSERT(env, expr) ENSURE(env, expr)

104
src/dpl.c
View File

@ -9,12 +9,10 @@ static inline size_t dpl_size2bytes(ptrdiff_t size) {
size += size; size += size;
#endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */ #endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */
STATIC_ASSERT(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(dpl_t) + STATIC_ASSERT(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(dpl_t) +
(PAGELIST_LIMIT * (MDBX_DPL_PREALLOC_FOR_RADIXSORT + 1)) * (PAGELIST_LIMIT * (MDBX_DPL_PREALLOC_FOR_RADIXSORT + 1)) * sizeof(dp_t) +
sizeof(dp_t) +
MDBX_PNL_GRANULATE * sizeof(void *) * 2 < MDBX_PNL_GRANULATE * sizeof(void *) * 2 <
SIZE_MAX / 4 * 3); SIZE_MAX / 4 * 3);
size_t bytes = ceil_powerof2(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(dpl_t) + size_t bytes = ceil_powerof2(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(dpl_t) + size * sizeof(dp_t),
size * sizeof(dp_t),
MDBX_PNL_GRANULATE * sizeof(void *) * 2) - MDBX_PNL_GRANULATE * sizeof(void *) * 2) -
MDBX_ASSUME_MALLOC_OVERHEAD; MDBX_ASSUME_MALLOC_OVERHEAD;
return bytes; return bytes;
@ -22,8 +20,7 @@ static inline size_t dpl_size2bytes(ptrdiff_t size) {
static inline size_t dpl_bytes2size(const ptrdiff_t bytes) { static inline size_t dpl_bytes2size(const ptrdiff_t bytes) {
size_t size = (bytes - sizeof(dpl_t)) / sizeof(dp_t); size_t size = (bytes - sizeof(dpl_t)) / sizeof(dp_t);
assert(size > CURSOR_STACK_SIZE && assert(size > CURSOR_STACK_SIZE && size <= PAGELIST_LIMIT + MDBX_PNL_GRANULATE);
size <= PAGELIST_LIMIT + MDBX_PNL_GRANULATE);
#if MDBX_DPL_PREALLOC_FOR_RADIXSORT #if MDBX_DPL_PREALLOC_FOR_RADIXSORT
size >>= 1; size >>= 1;
#endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */ #endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */
@ -41,8 +38,7 @@ dpl_t *dpl_reserve(MDBX_txn *txn, size_t size) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
size_t bytes = size_t bytes = dpl_size2bytes((size < PAGELIST_LIMIT) ? size : PAGELIST_LIMIT);
dpl_size2bytes((size < PAGELIST_LIMIT) ? size : PAGELIST_LIMIT);
dpl_t *const dl = osal_realloc(txn->tw.dirtylist, bytes); dpl_t *const dl = osal_realloc(txn->tw.dirtylist, bytes);
if (likely(dl)) { if (likely(dl)) {
#if __GLIBC_PREREQ(2, 12) || defined(__FreeBSD__) || defined(malloc_usable_size) #if __GLIBC_PREREQ(2, 12) || defined(__FreeBSD__) || defined(malloc_usable_size)
@ -59,16 +55,13 @@ int dpl_alloc(MDBX_txn *txn) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
const size_t wanna = (txn->env->options.dp_initial < txn->geo.upper) const size_t wanna = (txn->env->options.dp_initial < txn->geo.upper) ? txn->env->options.dp_initial : txn->geo.upper;
? txn->env->options.dp_initial
: txn->geo.upper;
#if MDBX_FORCE_ASSERTIONS || MDBX_DEBUG #if MDBX_FORCE_ASSERTIONS || MDBX_DEBUG
if (txn->tw.dirtylist) if (txn->tw.dirtylist)
/* обнуляем чтобы не сработал ассерт внутри dpl_reserve() */ /* обнуляем чтобы не сработал ассерт внутри dpl_reserve() */
txn->tw.dirtylist->sorted = txn->tw.dirtylist->length = 0; txn->tw.dirtylist->sorted = txn->tw.dirtylist->length = 0;
#endif /* asertions enabled */ #endif /* asertions enabled */
if (unlikely(!txn->tw.dirtylist || txn->tw.dirtylist->detent < wanna || if (unlikely(!txn->tw.dirtylist || txn->tw.dirtylist->detent < wanna || txn->tw.dirtylist->detent > wanna + wanna) &&
txn->tw.dirtylist->detent > wanna + wanna) &&
unlikely(!dpl_reserve(txn, wanna))) unlikely(!dpl_reserve(txn, wanna)))
return MDBX_ENOMEM; return MDBX_ENOMEM;
@ -77,8 +70,7 @@ int dpl_alloc(MDBX_txn *txn) {
} }
#define MDBX_DPL_EXTRACT_KEY(ptr) ((ptr)->pgno) #define MDBX_DPL_EXTRACT_KEY(ptr) ((ptr)->pgno)
RADIXSORT_IMPL(dp, dp_t, MDBX_DPL_EXTRACT_KEY, MDBX_DPL_PREALLOC_FOR_RADIXSORT, RADIXSORT_IMPL(dp, dp_t, MDBX_DPL_EXTRACT_KEY, MDBX_DPL_PREALLOC_FOR_RADIXSORT, 1)
1)
#define DP_SORT_CMP(first, last) ((first).pgno < (last).pgno) #define DP_SORT_CMP(first, last) ((first).pgno < (last).pgno)
SORT_IMPL(dp_sort, false, dp_t, DP_SORT_CMP) SORT_IMPL(dp_sort, false, dp_t, DP_SORT_CMP)
@ -90,16 +82,13 @@ __hot __noinline dpl_t *dpl_sort_slowpath(const MDBX_txn *txn) {
dpl_t *dl = txn->tw.dirtylist; dpl_t *dl = txn->tw.dirtylist;
assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
const size_t unsorted = dl->length - dl->sorted; const size_t unsorted = dl->length - dl->sorted;
if (likely(unsorted < MDBX_RADIXSORT_THRESHOLD) || if (likely(unsorted < MDBX_RADIXSORT_THRESHOLD) || unlikely(!dp_radixsort(dl->items + 1, dl->length))) {
unlikely(!dp_radixsort(dl->items + 1, dl->length))) {
if (dl->sorted > unsorted / 4 + 4 && if (dl->sorted > unsorted / 4 + 4 &&
(MDBX_DPL_PREALLOC_FOR_RADIXSORT || (MDBX_DPL_PREALLOC_FOR_RADIXSORT || dl->length + unsorted < dl->detent + dpl_gap_mergesort)) {
dl->length + unsorted < dl->detent + dpl_gap_mergesort)) {
dp_t *const sorted_begin = dl->items + 1; dp_t *const sorted_begin = dl->items + 1;
dp_t *const sorted_end = sorted_begin + dl->sorted; dp_t *const sorted_end = sorted_begin + dl->sorted;
dp_t *const end = dl->items + (MDBX_DPL_PREALLOC_FOR_RADIXSORT dp_t *const end =
? dl->length + dl->length + 1 dl->items + (MDBX_DPL_PREALLOC_FOR_RADIXSORT ? dl->length + dl->length + 1 : dl->detent + dpl_reserve_gap);
: dl->detent + dpl_reserve_gap);
dp_t *const tmp = end - unsorted; dp_t *const tmp = end - unsorted;
assert(dl->items + dl->length + 1 < tmp); assert(dl->items + dl->length + 1 < tmp);
/* copy unsorted to the end of allocated space and sort it */ /* copy unsorted to the end of allocated space and sort it */
@ -120,19 +109,16 @@ __hot __noinline dpl_t *dpl_sort_slowpath(const MDBX_txn *txn) {
#endif #endif
} while (likely(--w > l)); } while (likely(--w > l));
assert(r == tmp - 1); assert(r == tmp - 1);
assert(dl->items[0].pgno == 0 && assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
if (ASSERT_ENABLED()) if (ASSERT_ENABLED())
for (size_t i = 0; i <= dl->length; ++i) for (size_t i = 0; i <= dl->length; ++i)
assert(dl->items[i].pgno < dl->items[i + 1].pgno); assert(dl->items[i].pgno < dl->items[i + 1].pgno);
} else { } else {
dp_sort(dl->items + 1, dl->items + dl->length + 1); dp_sort(dl->items + 1, dl->items + dl->length + 1);
assert(dl->items[0].pgno == 0 && assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
} }
} else { } else {
assert(dl->items[0].pgno == 0 && assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
} }
dl->sorted = dl->length; dl->sorted = dl->length;
return dl; return dl;
@ -143,8 +129,7 @@ __hot __noinline dpl_t *dpl_sort_slowpath(const MDBX_txn *txn) {
#define DP_SEARCH_CMP(dp, id) ((dp).pgno < (id)) #define DP_SEARCH_CMP(dp, id) ((dp).pgno < (id))
SEARCH_IMPL(dp_bsearch, dp_t, pgno_t, DP_SEARCH_CMP) SEARCH_IMPL(dp_bsearch, dp_t, pgno_t, DP_SEARCH_CMP)
__hot __noinline MDBX_INTERNAL size_t dpl_search(const MDBX_txn *txn, __hot __noinline MDBX_INTERNAL size_t dpl_search(const MDBX_txn *txn, pgno_t pgno) {
pgno_t pgno) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
@ -166,10 +151,10 @@ __hot __noinline MDBX_INTERNAL size_t dpl_search(const MDBX_txn *txn,
/* whole sorted cases */ /* whole sorted cases */
break; break;
#define LINEAR_SEARCH_CASE(N) \ #define LINEAR_SEARCH_CASE(N) \
case N: \ case N: \
if (dl->items[dl->length - N + 1].pgno == pgno) \ if (dl->items[dl->length - N + 1].pgno == pgno) \
return dl->length - N + 1; \ return dl->length - N + 1; \
__fallthrough __fallthrough
/* use linear scan until the threshold */ /* use linear scan until the threshold */
@ -193,8 +178,7 @@ const page_t *debug_dpl_find(const MDBX_txn *txn, const pgno_t pgno) {
const dpl_t *dl = txn->tw.dirtylist; const dpl_t *dl = txn->tw.dirtylist;
if (dl) { if (dl) {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
assert(dl->items[0].pgno == 0 && assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
for (size_t i = dl->length; i > dl->sorted; --i) for (size_t i = dl->length; i > dl->sorted; --i)
if (dl->items[i].pgno == pgno) if (dl->items[i].pgno == pgno)
return dl->items[i].ptr; return dl->items[i].ptr;
@ -220,13 +204,11 @@ void dpl_remove_ex(const MDBX_txn *txn, size_t i, size_t npages) {
dl->pages_including_loose -= npages; dl->pages_including_loose -= npages;
dl->sorted -= dl->sorted >= i; dl->sorted -= dl->sorted >= i;
dl->length -= 1; dl->length -= 1;
memmove(dl->items + i, dl->items + i + 1, memmove(dl->items + i, dl->items + i + 1, (dl->length - i + 2) * sizeof(dl->items[0]));
(dl->length - i + 2) * sizeof(dl->items[0]));
assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
} }
int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page, int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page, size_t npages) {
size_t npages) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
const dp_t dp = {page, pgno, (pgno_t)npages}; const dp_t dp = {page, pgno, (pgno_t)npages};
@ -237,8 +219,7 @@ int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page,
dpl_t *dl = txn->tw.dirtylist; dpl_t *dl = txn->tw.dirtylist;
tASSERT(txn, dl->length <= PAGELIST_LIMIT + MDBX_PNL_GRANULATE); tASSERT(txn, dl->length <= PAGELIST_LIMIT + MDBX_PNL_GRANULATE);
tASSERT(txn, dl->items[0].pgno == 0 && tASSERT(txn, dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
for (size_t i = dl->length; i > 0; --i) { for (size_t i = dl->length; i > 0; --i) {
assert(dl->items[i].pgno != dp.pgno); assert(dl->items[i].pgno != dp.pgno);
@ -254,9 +235,7 @@ int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page,
ERROR("DPL is full (PAGELIST_LIMIT %zu)", PAGELIST_LIMIT); ERROR("DPL is full (PAGELIST_LIMIT %zu)", PAGELIST_LIMIT);
return MDBX_TXN_FULL; return MDBX_TXN_FULL;
} }
const size_t size = (dl->detent < MDBX_PNL_INITIAL * 42) const size_t size = (dl->detent < MDBX_PNL_INITIAL * 42) ? dl->detent + dl->detent : dl->detent + dl->detent / 2;
? dl->detent + dl->detent
: dl->detent + dl->detent / 2;
dl = dpl_reserve(txn, size); dl = dpl_reserve(txn, size);
if (unlikely(!dl)) if (unlikely(!dl))
return MDBX_ENOMEM; return MDBX_ENOMEM;
@ -288,10 +267,7 @@ int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page,
const ptrdiff_t pivot = (ptrdiff_t)dl->length - dpl_insertion_threshold; const ptrdiff_t pivot = (ptrdiff_t)dl->length - dpl_insertion_threshold;
#if MDBX_HAVE_CMOV #if MDBX_HAVE_CMOV
const pgno_t pivot_pgno = const pgno_t pivot_pgno =
dl->items[(dl->length < dpl_insertion_threshold) dl->items[(dl->length < dpl_insertion_threshold) ? 0 : dl->length - dpl_insertion_threshold].pgno;
? 0
: dl->length - dpl_insertion_threshold]
.pgno;
#endif /* MDBX_HAVE_CMOV */ #endif /* MDBX_HAVE_CMOV */
/* copy the stub beyond the end */ /* copy the stub beyond the end */
@ -310,9 +286,7 @@ int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page,
while (i >= dl->items + dl->sorted) { while (i >= dl->items + dl->sorted) {
#if !defined(__GNUC__) /* пытаемся избежать вызова memmove() */ #if !defined(__GNUC__) /* пытаемся избежать вызова memmove() */
i[1] = *i; i[1] = *i;
#elif MDBX_WORDBITS == 64 && \ #elif MDBX_WORDBITS == 64 && (defined(__SIZEOF_INT128__) || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128))
(defined(__SIZEOF_INT128__) || \
(defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128))
STATIC_ASSERT(sizeof(dp) == sizeof(__uint128_t)); STATIC_ASSERT(sizeof(dp) == sizeof(__uint128_t));
((__uint128_t *)i)[1] = *(volatile __uint128_t *)i; ((__uint128_t *)i)[1] = *(volatile __uint128_t *)i;
#else #else
@ -347,9 +321,8 @@ __cold bool dpl_check(MDBX_txn *txn) {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
tASSERT(txn, txn->tw.dirtyroom + dl->length == tASSERT(txn,
(txn->parent ? txn->parent->tw.dirtyroom txn->tw.dirtyroom + dl->length == (txn->parent ? txn->parent->tw.dirtyroom : txn->env->options.dp_limit));
: txn->env->options.dp_limit));
if (!AUDIT_ENABLED()) if (!AUDIT_ENABLED())
return true; return true;
@ -389,16 +362,12 @@ __cold bool dpl_check(MDBX_txn *txn) {
return false; return false;
} }
const size_t rpa = const size_t rpa = pnl_search(txn->tw.relist, dp->pgno, txn->geo.first_unallocated);
pnl_search(txn->tw.relist, dp->pgno, txn->geo.first_unallocated); tASSERT(txn, rpa > MDBX_PNL_GETSIZE(txn->tw.relist) || txn->tw.relist[rpa] != dp->pgno);
tASSERT(txn, rpa > MDBX_PNL_GETSIZE(txn->tw.relist) || if (rpa <= MDBX_PNL_GETSIZE(txn->tw.relist) && unlikely(txn->tw.relist[rpa] == dp->pgno))
txn->tw.relist[rpa] != dp->pgno);
if (rpa <= MDBX_PNL_GETSIZE(txn->tw.relist) &&
unlikely(txn->tw.relist[rpa] == dp->pgno))
return false; return false;
if (num > 1) { if (num > 1) {
const size_t rpb = pnl_search(txn->tw.relist, dp->pgno + num - 1, const size_t rpb = pnl_search(txn->tw.relist, dp->pgno + num - 1, txn->geo.first_unallocated);
txn->geo.first_unallocated);
tASSERT(txn, rpa == rpb); tASSERT(txn, rpa == rpb);
if (unlikely(rpa != rpb)) if (unlikely(rpa != rpb))
return false; return false;
@ -432,8 +401,7 @@ __noinline void dpl_lru_reduce(MDBX_txn *txn) {
txn->tw.dirtylru >>= 1; txn->tw.dirtylru >>= 1;
dpl_t *dl = txn->tw.dirtylist; dpl_t *dl = txn->tw.dirtylist;
for (size_t i = 1; i <= dl->length; ++i) { for (size_t i = 1; i <= dl->length; ++i) {
size_t *const ptr = size_t *const ptr = ptr_disp(dl->items[i].ptr, -(ptrdiff_t)sizeof(size_t));
ptr_disp(dl->items[i].ptr, -(ptrdiff_t)sizeof(size_t));
*ptr >>= 1; *ptr >>= 1;
} }
txn = txn->parent; txn = txn->parent;
@ -444,8 +412,7 @@ void dpl_sift(MDBX_txn *const txn, pnl_t pl, const bool spilled) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
if (MDBX_PNL_GETSIZE(pl) && txn->tw.dirtylist->length) { if (MDBX_PNL_GETSIZE(pl) && txn->tw.dirtylist->length) {
tASSERT(txn, pnl_check_allocated(pl, (size_t)txn->geo.first_unallocated tASSERT(txn, pnl_check_allocated(pl, (size_t)txn->geo.first_unallocated << spilled));
<< spilled));
dpl_t *dl = dpl_sort(txn); dpl_t *dl = dpl_sort(txn);
/* Scanning in ascend order */ /* Scanning in ascend order */
@ -501,8 +468,7 @@ void dpl_sift(MDBX_txn *const txn, pnl_t pl, const bool spilled) {
dl->sorted = dpl_setlen(dl, w - 1); dl->sorted = dpl_setlen(dl, w - 1);
txn->tw.dirtyroom += r - w; txn->tw.dirtyroom += r - w;
tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length == tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length ==
(txn->parent ? txn->parent->tw.dirtyroom (txn->parent ? txn->parent->tw.dirtyroom : txn->env->options.dp_limit));
: txn->env->options.dp_limit));
return; return;
} }
} }

View File

@ -49,38 +49,32 @@ static inline dpl_t *dpl_sort(const MDBX_txn *txn) {
dpl_t *dl = txn->tw.dirtylist; dpl_t *dl = txn->tw.dirtylist;
tASSERT(txn, dl->length <= PAGELIST_LIMIT); tASSERT(txn, dl->length <= PAGELIST_LIMIT);
tASSERT(txn, dl->sorted <= dl->length); tASSERT(txn, dl->sorted <= dl->length);
tASSERT(txn, dl->items[0].pgno == 0 && tASSERT(txn, dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
return likely(dl->sorted == dl->length) ? dl : dpl_sort_slowpath(txn); return likely(dl->sorted == dl->length) ? dl : dpl_sort_slowpath(txn);
} }
MDBX_INTERNAL __noinline size_t dpl_search(const MDBX_txn *txn, pgno_t pgno); MDBX_INTERNAL __noinline size_t dpl_search(const MDBX_txn *txn, pgno_t pgno);
MDBX_MAYBE_UNUSED MDBX_INTERNAL const page_t * MDBX_MAYBE_UNUSED MDBX_INTERNAL const page_t *debug_dpl_find(const MDBX_txn *txn, const pgno_t pgno);
debug_dpl_find(const MDBX_txn *txn, const pgno_t pgno);
MDBX_NOTHROW_PURE_FUNCTION static inline unsigned dpl_npages(const dpl_t *dl, MDBX_NOTHROW_PURE_FUNCTION static inline unsigned dpl_npages(const dpl_t *dl, size_t i) {
size_t i) {
assert(0 <= (intptr_t)i && i <= dl->length); assert(0 <= (intptr_t)i && i <= dl->length);
unsigned n = dl->items[i].npages; unsigned n = dl->items[i].npages;
assert(n == (is_largepage(dl->items[i].ptr) ? dl->items[i].ptr->pages : 1)); assert(n == (is_largepage(dl->items[i].ptr) ? dl->items[i].ptr->pages : 1));
return n; return n;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t dpl_endpgno(const dpl_t *dl, MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t dpl_endpgno(const dpl_t *dl, size_t i) {
size_t i) {
return dpl_npages(dl, i) + dl->items[i].pgno; return dpl_npages(dl, i) + dl->items[i].pgno;
} }
static inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno, static inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno, size_t npages) {
size_t npages) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
dpl_t *dl = txn->tw.dirtylist; dpl_t *dl = txn->tw.dirtylist;
tASSERT(txn, dl->sorted == dl->length); tASSERT(txn, dl->sorted == dl->length);
tASSERT(txn, dl->items[0].pgno == 0 && tASSERT(txn, dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID);
dl->items[dl->length + 1].pgno == P_INVALID);
size_t const n = dpl_search(txn, pgno); size_t const n = dpl_search(txn, pgno);
tASSERT(txn, n >= 1 && n <= dl->length + 1); tASSERT(txn, n >= 1 && n <= dl->length + 1);
tASSERT(txn, pgno <= dl->items[n].pgno); tASSERT(txn, pgno <= dl->items[n].pgno);
@ -92,8 +86,7 @@ static inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno,
bool check = false; bool check = false;
for (size_t i = 1; i <= dl->length; ++i) { for (size_t i = 1; i <= dl->length; ++i) {
const page_t *const dp = dl->items[i].ptr; const page_t *const dp = dl->items[i].ptr;
if (!(dp->pgno /* begin */ >= /* end */ pgno + npages || if (!(dp->pgno /* begin */ >= /* end */ pgno + npages || dpl_endpgno(dl, i) /* end */ <= /* begin */ pgno))
dpl_endpgno(dl, i) /* end */ <= /* begin */ pgno))
check |= true; check |= true;
} }
tASSERT(txn, check == rc); tASSERT(txn, check == rc);
@ -101,8 +94,7 @@ static inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno,
return rc; return rc;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline size_t dpl_exist(const MDBX_txn *txn, MDBX_NOTHROW_PURE_FUNCTION static inline size_t dpl_exist(const MDBX_txn *txn, pgno_t pgno) {
pgno_t pgno) {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
dpl_t *dl = txn->tw.dirtylist; dpl_t *dl = txn->tw.dirtylist;
size_t i = dpl_search(txn, pgno); size_t i = dpl_search(txn, pgno);
@ -116,13 +108,11 @@ static inline void dpl_remove(const MDBX_txn *txn, size_t i) {
dpl_remove_ex(txn, i, dpl_npages(txn->tw.dirtylist, i)); dpl_remove_ex(txn, i, dpl_npages(txn->tw.dirtylist, i));
} }
MDBX_INTERNAL int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, MDBX_INTERNAL int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, page_t *page, size_t npages);
page_t *page, size_t npages);
MDBX_MAYBE_UNUSED MDBX_INTERNAL bool dpl_check(MDBX_txn *txn); MDBX_MAYBE_UNUSED MDBX_INTERNAL bool dpl_check(MDBX_txn *txn);
MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t dpl_age(const MDBX_txn *txn, MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t dpl_age(const MDBX_txn *txn, size_t i) {
size_t i) {
tASSERT(txn, (txn->flags & (MDBX_TXN_RDONLY | MDBX_WRITEMAP)) == 0); tASSERT(txn, (txn->flags & (MDBX_TXN_RDONLY | MDBX_WRITEMAP)) == 0);
const dpl_t *dl = txn->tw.dirtylist; const dpl_t *dl = txn->tw.dirtylist;
assert((intptr_t)i > 0 && i <= dl->length); assert((intptr_t)i > 0 && i <= dl->length);
@ -134,8 +124,7 @@ MDBX_INTERNAL void dpl_lru_reduce(MDBX_txn *txn);
static inline uint32_t dpl_lru_turn(MDBX_txn *txn) { static inline uint32_t dpl_lru_turn(MDBX_txn *txn) {
txn->tw.dirtylru += 1; txn->tw.dirtylru += 1;
if (unlikely(txn->tw.dirtylru > UINT32_MAX / 3) && if (unlikely(txn->tw.dirtylru > UINT32_MAX / 3) && (txn->flags & MDBX_WRITEMAP) == 0)
(txn->flags & MDBX_WRITEMAP) == 0)
dpl_lru_reduce(txn); dpl_lru_reduce(txn);
return txn->tw.dirtylru; return txn->tw.dirtylru;
} }

712
src/dxb.c

File diff suppressed because it is too large Load Diff

View File

@ -6,17 +6,11 @@
__cold static unsigned default_rp_augment_limit(const MDBX_env *env) { __cold static unsigned default_rp_augment_limit(const MDBX_env *env) {
const size_t timeframe = /* 16 секунд */ 16 << 16; const size_t timeframe = /* 16 секунд */ 16 << 16;
const size_t remain_1sec = const size_t remain_1sec =
(env->options.gc_time_limit < timeframe) (env->options.gc_time_limit < timeframe) ? timeframe - (size_t)env->options.gc_time_limit : 0;
? timeframe - (size_t)env->options.gc_time_limit const size_t minimum = (env->maxgc_large1page * 2 > MDBX_PNL_INITIAL) ? env->maxgc_large1page * 2 : MDBX_PNL_INITIAL;
: 0;
const size_t minimum = (env->maxgc_large1page * 2 > MDBX_PNL_INITIAL)
? env->maxgc_large1page * 2
: MDBX_PNL_INITIAL;
const size_t one_third = env->geo_in_bytes.now / 3 >> env->ps2ln; const size_t one_third = env->geo_in_bytes.now / 3 >> env->ps2ln;
const size_t augment_limit = const size_t augment_limit =
(one_third > minimum) (one_third > minimum) ? minimum + (one_third - minimum) / timeframe * remain_1sec : minimum;
? minimum + (one_third - minimum) / timeframe * remain_1sec
: minimum;
eASSERT(env, augment_limit < PAGELIST_LIMIT); eASSERT(env, augment_limit < PAGELIST_LIMIT);
return pnl_bytes2size(pnl_size2bytes(augment_limit)); return pnl_bytes2size(pnl_size2bytes(augment_limit));
} }
@ -86,29 +80,23 @@ void env_options_adjust_defaults(MDBX_env *env) {
const size_t basis = env->geo_in_bytes.now; const size_t basis = env->geo_in_bytes.now;
/* TODO: use options? */ /* TODO: use options? */
const unsigned factor = 9; const unsigned factor = 9;
size_t threshold = (basis < ((size_t)65536 << factor)) size_t threshold = (basis < ((size_t)65536 << factor)) ? 65536 /* minimal threshold */
? 65536 /* minimal threshold */ : (basis > (MEGABYTE * 4 << factor)) ? MEGABYTE * 4 /* maximal threshold */
: (basis > (MEGABYTE * 4 << factor)) : basis >> factor;
? MEGABYTE * 4 /* maximal threshold */
: basis >> factor;
threshold = threshold =
(threshold < env->geo_in_bytes.shrink || !env->geo_in_bytes.shrink) (threshold < env->geo_in_bytes.shrink || !env->geo_in_bytes.shrink) ? threshold : env->geo_in_bytes.shrink;
? threshold
: env->geo_in_bytes.shrink;
env->madv_threshold = bytes2pgno(env, bytes_align2os_bytes(env, threshold)); env->madv_threshold = bytes2pgno(env, bytes_align2os_bytes(env, threshold));
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
__cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option, __cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option, uint64_t value) {
uint64_t value) {
int err = check_env(env, false); int err = check_env(env, false);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return LOG_IFERR(err); return LOG_IFERR(err);
const bool lock_needed = const bool lock_needed = ((env->flags & ENV_ACTIVE) && env->basal_txn && !env_txn0_owned(env));
((env->flags & ENV_ACTIVE) && env->basal_txn && !env_txn0_owned(env));
bool should_unlock = false; bool should_unlock = false;
switch (option) { switch (option) {
case MDBX_opt_sync_bytes: case MDBX_opt_sync_bytes:
@ -121,10 +109,8 @@ __cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option,
if (unlikely(value > SIZE_MAX - 65536)) if (unlikely(value > SIZE_MAX - 65536))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
value = bytes2pgno(env, (size_t)value + env->ps - 1); value = bytes2pgno(env, (size_t)value + env->ps - 1);
if ((uint32_t)value != if ((uint32_t)value != atomic_load32(&env->lck->autosync_threshold, mo_AcquireRelease) &&
atomic_load32(&env->lck->autosync_threshold, mo_AcquireRelease) && atomic_store32(&env->lck->autosync_threshold, (uint32_t)value, mo_Relaxed)
atomic_store32(&env->lck->autosync_threshold, (uint32_t)value,
mo_Relaxed)
/* Дергаем sync(force=off) только если задано новое не-нулевое значение /* Дергаем sync(force=off) только если задано новое не-нулевое значение
* и мы вне транзакции */ * и мы вне транзакции */
&& lock_needed) { && lock_needed) {
@ -248,8 +234,7 @@ __cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option,
err = MDBX_EPERM /* unable change during transaction */; err = MDBX_EPERM /* unable change during transaction */;
else { else {
const pgno_t value32 = (pgno_t)value; const pgno_t value32 = (pgno_t)value;
if (option == MDBX_opt_txn_dp_initial && if (option == MDBX_opt_txn_dp_initial && env->options.dp_initial != value32) {
env->options.dp_initial != value32) {
env->options.dp_initial = value32; env->options.dp_initial = value32;
if (env->options.dp_limit < value32) { if (env->options.dp_limit < value32) {
env->options.dp_limit = value32; env->options.dp_limit = value32;
@ -308,8 +293,7 @@ __cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option,
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
/* позволяем "установить" значение по-умолчанию и совпадающее /* позволяем "установить" значение по-умолчанию и совпадающее
* с поведением соответствующим текущей установке MDBX_NOMETASYNC */ * с поведением соответствующим текущей установке MDBX_NOMETASYNC */
if (value == /* default */ UINT64_MAX && if (value == /* default */ UINT64_MAX && value != ((env->flags & MDBX_NOMETASYNC) ? 0 : UINT_MAX))
value != ((env->flags & MDBX_NOMETASYNC) ? 0 : UINT_MAX))
err = MDBX_EINVAL; err = MDBX_EINVAL;
#else #else
if (value == /* default */ UINT64_MAX) if (value == /* default */ UINT64_MAX)
@ -335,8 +319,7 @@ __cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option,
case MDBX_opt_prefer_waf_insteadof_balance: case MDBX_opt_prefer_waf_insteadof_balance:
if (value == /* default */ UINT64_MAX) if (value == /* default */ UINT64_MAX)
env->options.prefer_waf_insteadof_balance = env->options.prefer_waf_insteadof_balance = default_prefer_waf_insteadof_balance(env);
default_prefer_waf_insteadof_balance(env);
else if (value > 1) else if (value > 1)
err = MDBX_EINVAL; err = MDBX_EINVAL;
else else
@ -400,8 +383,7 @@ __cold int mdbx_env_set_option(MDBX_env *env, const MDBX_option_t option,
return LOG_IFERR(err); return LOG_IFERR(err);
} }
__cold int mdbx_env_get_option(const MDBX_env *env, const MDBX_option_t option, __cold int mdbx_env_get_option(const MDBX_env *env, const MDBX_option_t option, uint64_t *pvalue) {
uint64_t *pvalue) {
int err = check_env(env, false); int err = check_env(env, false);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return LOG_IFERR(err); return LOG_IFERR(err);
@ -412,15 +394,13 @@ __cold int mdbx_env_get_option(const MDBX_env *env, const MDBX_option_t option,
case MDBX_opt_sync_bytes: case MDBX_opt_sync_bytes:
if (unlikely(!(env->flags & ENV_ACTIVE))) if (unlikely(!(env->flags & ENV_ACTIVE)))
return LOG_IFERR(MDBX_EPERM); return LOG_IFERR(MDBX_EPERM);
*pvalue = pgno2bytes( *pvalue = pgno2bytes(env, atomic_load32(&env->lck->autosync_threshold, mo_Relaxed));
env, atomic_load32(&env->lck->autosync_threshold, mo_Relaxed));
break; break;
case MDBX_opt_sync_period: case MDBX_opt_sync_period:
if (unlikely(!(env->flags & ENV_ACTIVE))) if (unlikely(!(env->flags & ENV_ACTIVE)))
return LOG_IFERR(MDBX_EPERM); return LOG_IFERR(MDBX_EPERM);
*pvalue = osal_monotime_to_16dot16( *pvalue = osal_monotime_to_16dot16(atomic_load64(&env->lck->autosync_period, mo_Relaxed));
atomic_load64(&env->lck->autosync_period, mo_Relaxed));
break; break;
case MDBX_opt_max_db: case MDBX_opt_max_db:

161
src/env.c
View File

@ -4,17 +4,14 @@
#include "internals.h" #include "internals.h"
bool env_txn0_owned(const MDBX_env *env) { bool env_txn0_owned(const MDBX_env *env) {
return (env->flags & MDBX_NOSTICKYTHREADS) return (env->flags & MDBX_NOSTICKYTHREADS) ? (env->basal_txn->owner != 0)
? (env->basal_txn->owner != 0) : (env->basal_txn->owner == osal_thread_self());
: (env->basal_txn->owner == osal_thread_self());
} }
int env_page_auxbuffer(MDBX_env *env) { int env_page_auxbuffer(MDBX_env *env) {
const int err = const int err = env->page_auxbuf
env->page_auxbuf ? MDBX_SUCCESS
? MDBX_SUCCESS : osal_memalign_alloc(globals.sys_pagesize, env->ps * (size_t)NUM_METAS, &env->page_auxbuf);
: osal_memalign_alloc(globals.sys_pagesize,
env->ps * (size_t)NUM_METAS, &env->page_auxbuf);
if (likely(err == MDBX_SUCCESS)) { if (likely(err == MDBX_SUCCESS)) {
memset(env->page_auxbuf, -1, env->ps * (size_t)2); memset(env->page_auxbuf, -1, env->ps * (size_t)2);
memset(ptr_disp(env->page_auxbuf, env->ps * (size_t)2), 0, env->ps); memset(ptr_disp(env->page_auxbuf, env->ps * (size_t)2), 0, env->ps);
@ -34,26 +31,19 @@ __cold unsigned env_setup_pagesize(MDBX_env *env, const size_t pagesize) {
STATIC_ASSERT(MAX_GC1OVPAGE(MDBX_MIN_PAGESIZE) > 4); STATIC_ASSERT(MAX_GC1OVPAGE(MDBX_MIN_PAGESIZE) > 4);
STATIC_ASSERT(MAX_GC1OVPAGE(MDBX_MAX_PAGESIZE) < PAGELIST_LIMIT); STATIC_ASSERT(MAX_GC1OVPAGE(MDBX_MAX_PAGESIZE) < PAGELIST_LIMIT);
const intptr_t maxgc_ov1page = (pagesize - PAGEHDRSZ) / sizeof(pgno_t) - 1; const intptr_t maxgc_ov1page = (pagesize - PAGEHDRSZ) / sizeof(pgno_t) - 1;
ENSURE(env, ENSURE(env, maxgc_ov1page > 42 && maxgc_ov1page < (intptr_t)PAGELIST_LIMIT / 4);
maxgc_ov1page > 42 && maxgc_ov1page < (intptr_t)PAGELIST_LIMIT / 4);
env->maxgc_large1page = (unsigned)maxgc_ov1page; env->maxgc_large1page = (unsigned)maxgc_ov1page;
env->maxgc_per_branch = env->maxgc_per_branch = (unsigned)((pagesize - PAGEHDRSZ) / (sizeof(indx_t) + sizeof(node_t) + sizeof(txnid_t)));
(unsigned)((pagesize - PAGEHDRSZ) /
(sizeof(indx_t) + sizeof(node_t) + sizeof(txnid_t)));
STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) > STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) > sizeof(tree_t) + NODESIZE + 42);
sizeof(tree_t) + NODESIZE + 42);
STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MAX_PAGESIZE) < UINT16_MAX); STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MAX_PAGESIZE) < UINT16_MAX);
STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) >= STATIC_ASSERT(LEAF_NODE_MAX(MDBX_MIN_PAGESIZE) >= BRANCH_NODE_MAX(MDBX_MIN_PAGESIZE));
BRANCH_NODE_MAX(MDBX_MIN_PAGESIZE));
STATIC_ASSERT(BRANCH_NODE_MAX(MDBX_MAX_PAGESIZE) > NODESIZE + 42); STATIC_ASSERT(BRANCH_NODE_MAX(MDBX_MAX_PAGESIZE) > NODESIZE + 42);
STATIC_ASSERT(BRANCH_NODE_MAX(MDBX_MAX_PAGESIZE) < UINT16_MAX); STATIC_ASSERT(BRANCH_NODE_MAX(MDBX_MAX_PAGESIZE) < UINT16_MAX);
const intptr_t branch_nodemax = BRANCH_NODE_MAX(pagesize); const intptr_t branch_nodemax = BRANCH_NODE_MAX(pagesize);
const intptr_t leaf_nodemax = LEAF_NODE_MAX(pagesize); const intptr_t leaf_nodemax = LEAF_NODE_MAX(pagesize);
ENSURE(env, branch_nodemax > (intptr_t)(NODESIZE + 42) && ENSURE(env, branch_nodemax > (intptr_t)(NODESIZE + 42) && branch_nodemax % 2 == 0 &&
branch_nodemax % 2 == 0 && leaf_nodemax > (intptr_t)(sizeof(tree_t) + NODESIZE + 42) && leaf_nodemax >= branch_nodemax &&
leaf_nodemax > (intptr_t)(sizeof(tree_t) + NODESIZE + 42) &&
leaf_nodemax >= branch_nodemax &&
leaf_nodemax < (int)UINT16_MAX && leaf_nodemax % 2 == 0); leaf_nodemax < (int)UINT16_MAX && leaf_nodemax % 2 == 0);
env->leaf_nodemax = (uint16_t)leaf_nodemax; env->leaf_nodemax = (uint16_t)leaf_nodemax;
env->branch_nodemax = (uint16_t)branch_nodemax; env->branch_nodemax = (uint16_t)branch_nodemax;
@ -71,18 +61,14 @@ __cold unsigned env_setup_pagesize(MDBX_env *env, const size_t pagesize) {
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
ERROR("mdbx_get_sysraminfo(), rc %d", err); ERROR("mdbx_get_sysraminfo(), rc %d", err);
else { else {
size_t reasonable_dpl_limit = size_t reasonable_dpl_limit = (size_t)(total_ram_pages + avail_ram_pages) / 42;
(size_t)(total_ram_pages + avail_ram_pages) / 42;
if (pagesize > globals.sys_pagesize) if (pagesize > globals.sys_pagesize)
reasonable_dpl_limit /= pagesize / globals.sys_pagesize; reasonable_dpl_limit /= pagesize / globals.sys_pagesize;
else if (pagesize < globals.sys_pagesize) else if (pagesize < globals.sys_pagesize)
reasonable_dpl_limit *= globals.sys_pagesize / pagesize; reasonable_dpl_limit *= globals.sys_pagesize / pagesize;
reasonable_dpl_limit = (reasonable_dpl_limit < PAGELIST_LIMIT) reasonable_dpl_limit = (reasonable_dpl_limit < PAGELIST_LIMIT) ? reasonable_dpl_limit : PAGELIST_LIMIT;
? reasonable_dpl_limit reasonable_dpl_limit =
: PAGELIST_LIMIT; (reasonable_dpl_limit > CURSOR_STACK_SIZE * 4) ? reasonable_dpl_limit : CURSOR_STACK_SIZE * 4;
reasonable_dpl_limit = (reasonable_dpl_limit > CURSOR_STACK_SIZE * 4)
? reasonable_dpl_limit
: CURSOR_STACK_SIZE * 4;
env->options.dp_limit = (unsigned)reasonable_dpl_limit; env->options.dp_limit = (unsigned)reasonable_dpl_limit;
} }
} }
@ -108,46 +94,36 @@ retry:;
goto bailout; goto bailout;
} }
const troika_t troika = const troika_t troika = (txn0_owned | should_unlock) ? env->basal_txn->tw.troika : meta_tap(env);
(txn0_owned | should_unlock) ? env->basal_txn->tw.troika : meta_tap(env);
const meta_ptr_t head = meta_recent(env, &troika); const meta_ptr_t head = meta_recent(env, &troika);
const uint64_t unsynced_pages = const uint64_t unsynced_pages = atomic_load64(&env->lck->unsynced_pages, mo_Relaxed);
atomic_load64(&env->lck->unsynced_pages, mo_Relaxed);
if (unsynced_pages == 0) { if (unsynced_pages == 0) {
const uint32_t synched_meta_txnid_u32 = const uint32_t synched_meta_txnid_u32 = atomic_load32(&env->lck->meta_sync_txnid, mo_Relaxed);
atomic_load32(&env->lck->meta_sync_txnid, mo_Relaxed);
if (synched_meta_txnid_u32 == (uint32_t)head.txnid && head.is_steady) if (synched_meta_txnid_u32 == (uint32_t)head.txnid && head.is_steady)
goto bailout; goto bailout;
} }
if (should_unlock && (env->flags & MDBX_WRITEMAP) && if (should_unlock && (env->flags & MDBX_WRITEMAP) &&
unlikely(head.ptr_c->geometry.first_unallocated > unlikely(head.ptr_c->geometry.first_unallocated > bytes2pgno(env, env->dxb_mmap.current))) {
bytes2pgno(env, env->dxb_mmap.current))) {
if (unlikely(env->stuck_meta >= 0) && if (unlikely(env->stuck_meta >= 0) && troika.recent != (uint8_t)env->stuck_meta) {
troika.recent != (uint8_t)env->stuck_meta) {
NOTICE("skip %s since wagering meta-page (%u) is mispatch the recent " NOTICE("skip %s since wagering meta-page (%u) is mispatch the recent "
"meta-page (%u)", "meta-page (%u)",
"sync datafile", env->stuck_meta, troika.recent); "sync datafile", env->stuck_meta, troika.recent);
rc = MDBX_RESULT_TRUE; rc = MDBX_RESULT_TRUE;
} else { } else {
rc = dxb_resize(env, head.ptr_c->geometry.first_unallocated, rc = dxb_resize(env, head.ptr_c->geometry.first_unallocated, head.ptr_c->geometry.now, head.ptr_c->geometry.upper,
head.ptr_c->geometry.now, head.ptr_c->geometry.upper,
implicit_grow); implicit_grow);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
} }
} }
const size_t autosync_threshold = const size_t autosync_threshold = atomic_load32(&env->lck->autosync_threshold, mo_Relaxed);
atomic_load32(&env->lck->autosync_threshold, mo_Relaxed); const uint64_t autosync_period = atomic_load64(&env->lck->autosync_period, mo_Relaxed);
const uint64_t autosync_period =
atomic_load64(&env->lck->autosync_period, mo_Relaxed);
uint64_t eoos_timestamp; uint64_t eoos_timestamp;
if (force || (autosync_threshold && unsynced_pages >= autosync_threshold) || if (force || (autosync_threshold && unsynced_pages >= autosync_threshold) ||
(autosync_period && (autosync_period && (eoos_timestamp = atomic_load64(&env->lck->eoos_timestamp, mo_Relaxed)) &&
(eoos_timestamp =
atomic_load64(&env->lck->eoos_timestamp, mo_Relaxed)) &&
osal_monotime() - eoos_timestamp >= autosync_period)) osal_monotime() - eoos_timestamp >= autosync_period))
flags &= MDBX_WRITEMAP /* clear flags for full steady sync */; flags &= MDBX_WRITEMAP /* clear flags for full steady sync */;
@ -159,8 +135,7 @@ retry:;
int err; int err;
/* pre-sync to avoid latency for writer */ /* pre-sync to avoid latency for writer */
if (unsynced_pages > /* FIXME: define threshold */ 42 && if (unsynced_pages > /* FIXME: define threshold */ 42 && (flags & MDBX_SAFE_NOSYNC) == 0) {
(flags & MDBX_SAFE_NOSYNC) == 0) {
eASSERT(env, ((flags ^ env->flags) & MDBX_WRITEMAP) == 0); eASSERT(env, ((flags ^ env->flags) & MDBX_WRITEMAP) == 0);
if (flags & MDBX_WRITEMAP) { if (flags & MDBX_WRITEMAP) {
/* Acquire guard to avoid collision with remap */ /* Acquire guard to avoid collision with remap */
@ -171,8 +146,7 @@ retry:;
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
#endif #endif
const size_t usedbytes = const size_t usedbytes = pgno_align2os_bytes(env, head.ptr_c->geometry.first_unallocated);
pgno_align2os_bytes(env, head.ptr_c->geometry.first_unallocated);
err = osal_msync(&env->dxb_mmap, 0, usedbytes, MDBX_SYNC_DATA); err = osal_msync(&env->dxb_mmap, 0, usedbytes, MDBX_SYNC_DATA);
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
imports.srwl_ReleaseShared(&env->remap_guard); imports.srwl_ReleaseShared(&env->remap_guard);
@ -215,8 +189,7 @@ retry:;
eASSERT(env, txn0_owned || should_unlock); eASSERT(env, txn0_owned || should_unlock);
eASSERT(env, !txn0_owned || (flags & txn_shrink_allowed) == 0); eASSERT(env, !txn0_owned || (flags & txn_shrink_allowed) == 0);
if (!head.is_steady && unlikely(env->stuck_meta >= 0) && if (!head.is_steady && unlikely(env->stuck_meta >= 0) && troika.recent != (uint8_t)env->stuck_meta) {
troika.recent != (uint8_t)env->stuck_meta) {
NOTICE("skip %s since wagering meta-page (%u) is mispatch the recent " NOTICE("skip %s since wagering meta-page (%u) is mispatch the recent "
"meta-page (%u)", "meta-page (%u)",
"sync datafile", env->stuck_meta, troika.recent); "sync datafile", env->stuck_meta, troika.recent);
@ -224,9 +197,8 @@ retry:;
goto bailout; goto bailout;
} }
if (!head.is_steady || ((flags & MDBX_SAFE_NOSYNC) == 0 && unsynced_pages)) { if (!head.is_steady || ((flags & MDBX_SAFE_NOSYNC) == 0 && unsynced_pages)) {
DEBUG("meta-head %" PRIaPGNO ", %s, sync_pending %" PRIu64, DEBUG("meta-head %" PRIaPGNO ", %s, sync_pending %" PRIu64, data_page(head.ptr_c)->pgno,
data_page(head.ptr_c)->pgno, durable_caption(head.ptr_c), durable_caption(head.ptr_c), unsynced_pages);
unsynced_pages);
meta_t meta = *head.ptr_c; meta_t meta = *head.ptr_c;
rc = dxb_sync_locked(env, flags, &meta, &env->basal_txn->tw.troika); rc = dxb_sync_locked(env, flags, &meta, &env->basal_txn->tw.troika);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -235,8 +207,7 @@ retry:;
/* LY: sync meta-pages if MDBX_NOMETASYNC enabled /* LY: sync meta-pages if MDBX_NOMETASYNC enabled
* and someone was not synced above. */ * and someone was not synced above. */
if (atomic_load32(&env->lck->meta_sync_txnid, mo_Relaxed) != if (atomic_load32(&env->lck->meta_sync_txnid, mo_Relaxed) != (uint32_t)head.txnid)
(uint32_t)head.txnid)
rc = meta_sync(env, head); rc = meta_sync(env, head);
bailout: bailout:
@ -334,9 +305,8 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
*/ */
env->pid = osal_getpid(); env->pid = osal_getpid();
int rc = osal_openfile((env->flags & MDBX_RDONLY) ? MDBX_OPEN_DXB_READ int rc = osal_openfile((env->flags & MDBX_RDONLY) ? MDBX_OPEN_DXB_READ : MDBX_OPEN_DXB_LAZY, env, env->pathname.dxb,
: MDBX_OPEN_DXB_LAZY, &env->lazy_fd, mode);
env, env->pathname.dxb, &env->lazy_fd, mode);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
@ -355,8 +325,7 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
eASSERT(env, env->ioring.overlapped_fd == 0); eASSERT(env, env->ioring.overlapped_fd == 0);
bool ior_direct = false; bool ior_direct = false;
if (!(env->flags & if (!(env->flags & (MDBX_RDONLY | MDBX_SAFE_NOSYNC | MDBX_NOMETASYNC | MDBX_EXCLUSIVE))) {
(MDBX_RDONLY | MDBX_SAFE_NOSYNC | MDBX_NOMETASYNC | MDBX_EXCLUSIVE))) {
if (MDBX_AVOID_MSYNC && (env->flags & MDBX_WRITEMAP)) { if (MDBX_AVOID_MSYNC && (env->flags & MDBX_WRITEMAP)) {
/* Запрошен режим MDBX_SYNC_DURABLE | MDBX_WRITEMAP при активной опции /* Запрошен режим MDBX_SYNC_DURABLE | MDBX_WRITEMAP при активной опции
* MDBX_AVOID_MSYNC. * MDBX_AVOID_MSYNC.
@ -383,8 +352,7 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
int err = dxb_read_header(env, &header, MDBX_SUCCESS, true); int err = dxb_read_header(env, &header, MDBX_SUCCESS, true);
if ((err == MDBX_SUCCESS && header.pagesize >= globals.sys_pagesize) || if ((err == MDBX_SUCCESS && header.pagesize >= globals.sys_pagesize) ||
(err == MDBX_ENODATA && mode && env->ps >= globals.sys_pagesize && (err == MDBX_ENODATA && mode && env->ps >= globals.sys_pagesize &&
osal_filesize(env->lazy_fd, &dxb_filesize) == MDBX_SUCCESS && osal_filesize(env->lazy_fd, &dxb_filesize) == MDBX_SUCCESS && dxb_filesize == 0))
dxb_filesize == 0))
/* Может быть коллизия, если два процесса пытаются одновременно создать /* Может быть коллизия, если два процесса пытаются одновременно создать
* БД с разным размером страницы, который у одного меньше системной * БД с разным размером страницы, который у одного меньше системной
* страницы, а у другого НЕ меньше. Эта допустимая, но очень странная * страницы, а у другого НЕ меньше. Эта допустимая, но очень странная
@ -392,9 +360,8 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
ior_direct = true; ior_direct = true;
} }
rc = osal_openfile(ior_direct ? MDBX_OPEN_DXB_OVERLAPPED_DIRECT rc = osal_openfile(ior_direct ? MDBX_OPEN_DXB_OVERLAPPED_DIRECT : MDBX_OPEN_DXB_OVERLAPPED, env, env->pathname.dxb,
: MDBX_OPEN_DXB_OVERLAPPED, &env->ioring.overlapped_fd, 0);
env, env->pathname.dxb, &env->ioring.overlapped_fd, 0);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
env->dxb_lock_event = CreateEventW(nullptr, true, false, nullptr); env->dxb_lock_event = CreateEventW(nullptr, true, false, nullptr);
@ -410,8 +377,7 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
return errno; return errno;
mode = st.st_mode; mode = st.st_mode;
} }
mode = (/* inherit read permissions for group and others */ mode & mode = (/* inherit read permissions for group and others */ mode & (S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) |
(S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) |
/* always add read/write for owner */ S_IRUSR | S_IWUSR | /* always add read/write for owner */ S_IRUSR | S_IWUSR |
((mode & S_IRGRP) ? /* +write if readable by group */ S_IWGRP : 0) | ((mode & S_IRGRP) ? /* +write if readable by group */ S_IWGRP : 0) |
((mode & S_IROTH) ? /* +write if readable by others */ S_IWOTH : 0); ((mode & S_IROTH) ? /* +write if readable by others */ S_IWOTH : 0);
@ -428,8 +394,7 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
| MDBX_EXCLUSIVE | MDBX_EXCLUSIVE
#endif /* !Windows */ #endif /* !Windows */
))) { ))) {
rc = osal_openfile(MDBX_OPEN_DXB_DSYNC, env, env->pathname.dxb, rc = osal_openfile(MDBX_OPEN_DXB_DSYNC, env, env->pathname.dxb, &env->dsync_fd, 0);
&env->dsync_fd, 0);
if (unlikely(MDBX_IS_ERROR(rc))) if (unlikely(MDBX_IS_ERROR(rc)))
return rc; return rc;
if (env->dsync_fd != INVALID_HANDLE_VALUE) { if (env->dsync_fd != INVALID_HANDLE_VALUE) {
@ -439,19 +404,14 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
} }
} }
const MDBX_env_flags_t lazy_flags = const MDBX_env_flags_t lazy_flags = MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC | MDBX_NOMETASYNC;
MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC | MDBX_NOMETASYNC; const MDBX_env_flags_t mode_flags = lazy_flags | MDBX_LIFORECLAIM | MDBX_NORDAHEAD | MDBX_RDONLY | MDBX_WRITEMAP;
const MDBX_env_flags_t mode_flags = lazy_flags | MDBX_LIFORECLAIM |
MDBX_NORDAHEAD | MDBX_RDONLY |
MDBX_WRITEMAP;
lck_t *const lck = env->lck_mmap.lck; lck_t *const lck = env->lck_mmap.lck;
if (lck && lck_rc != MDBX_RESULT_TRUE && (env->flags & MDBX_RDONLY) == 0) { if (lck && lck_rc != MDBX_RESULT_TRUE && (env->flags & MDBX_RDONLY) == 0) {
MDBX_env_flags_t snap_flags; MDBX_env_flags_t snap_flags;
while ((snap_flags = atomic_load32(&lck->envmode, mo_AcquireRelease)) == while ((snap_flags = atomic_load32(&lck->envmode, mo_AcquireRelease)) == MDBX_RDONLY) {
MDBX_RDONLY) { if (atomic_cas32(&lck->envmode, MDBX_RDONLY, (snap_flags = (env->flags & mode_flags)))) {
if (atomic_cas32(&lck->envmode, MDBX_RDONLY,
(snap_flags = (env->flags & mode_flags)))) {
/* The case: /* The case:
* - let's assume that for some reason the DB file is smaller * - let's assume that for some reason the DB file is smaller
* than it should be according to the geometry, * than it should be according to the geometry,
@ -471,12 +431,9 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
if (env->flags & MDBX_ACCEDE) { if (env->flags & MDBX_ACCEDE) {
/* Pickup current mode-flags (MDBX_LIFORECLAIM, MDBX_NORDAHEAD, etc). */ /* Pickup current mode-flags (MDBX_LIFORECLAIM, MDBX_NORDAHEAD, etc). */
const MDBX_env_flags_t diff = const MDBX_env_flags_t diff =
(snap_flags ^ env->flags) & (snap_flags ^ env->flags) & ((snap_flags & lazy_flags) ? mode_flags : mode_flags & ~MDBX_WRITEMAP);
((snap_flags & lazy_flags) ? mode_flags
: mode_flags & ~MDBX_WRITEMAP);
env->flags ^= diff; env->flags ^= diff;
NOTICE("accede mode-flags: 0x%X, 0x%X -> 0x%X", diff, env->flags ^ diff, NOTICE("accede mode-flags: 0x%X, 0x%X -> 0x%X", diff, env->flags ^ diff, env->flags);
env->flags);
} }
/* Ранее упущенный не очевидный момент: При работе БД в режимах /* Ранее упущенный не очевидный момент: При работе БД в режимах
@ -498,12 +455,10 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
* В результате, требуется либо запретить совместную работу процессам с * В результате, требуется либо запретить совместную работу процессам с
* разным MDBX_WRITEMAP в режиме отложенной записи, либо отслеживать такое * разным MDBX_WRITEMAP в режиме отложенной записи, либо отслеживать такое
* смешивание и блокировать steady-пометки - что контрпродуктивно. */ * смешивание и блокировать steady-пометки - что контрпродуктивно. */
const MDBX_env_flags_t rigorous_flags = const MDBX_env_flags_t rigorous_flags = (snap_flags & lazy_flags)
(snap_flags & lazy_flags) ? MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC | MDBX_WRITEMAP
? MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC | MDBX_WRITEMAP : MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC;
: MDBX_SAFE_NOSYNC | MDBX_UTTERLY_NOSYNC; const MDBX_env_flags_t rigorous_diff = (snap_flags ^ env->flags) & rigorous_flags;
const MDBX_env_flags_t rigorous_diff =
(snap_flags ^ env->flags) & rigorous_flags;
if (rigorous_diff) { if (rigorous_diff) {
ERROR("current mode/flags 0x%X incompatible with requested 0x%X, " ERROR("current mode/flags 0x%X incompatible with requested 0x%X, "
"rigorous diff 0x%X", "rigorous diff 0x%X",
@ -529,8 +484,7 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
} }
if (unlikely(/* recovery mode */ env->stuck_meta >= 0) && if (unlikely(/* recovery mode */ env->stuck_meta >= 0) &&
(lck_rc != /* exclusive */ MDBX_RESULT_TRUE || (lck_rc != /* exclusive */ MDBX_RESULT_TRUE || (env->flags & MDBX_EXCLUSIVE) == 0)) {
(env->flags & MDBX_EXCLUSIVE) == 0)) {
ERROR("%s", "recovery requires exclusive mode"); ERROR("%s", "recovery requires exclusive mode");
return MDBX_BUSY; return MDBX_BUSY;
} }
@ -545,8 +499,7 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
if (lck) { if (lck) {
if (lck_rc == MDBX_RESULT_TRUE) { if (lck_rc == MDBX_RESULT_TRUE) {
rc = lck_downgrade(env); rc = lck_downgrade(env);
DEBUG("lck-downgrade-%s: rc %i", DEBUG("lck-downgrade-%s: rc %i", (env->flags & MDBX_EXCLUSIVE) ? "partial" : "full", rc);
(env->flags & MDBX_EXCLUSIVE) ? "partial" : "full", rc);
if (rc != MDBX_SUCCESS) if (rc != MDBX_SUCCESS)
return rc; return rc;
} else { } else {
@ -556,14 +509,13 @@ __cold int env_open(MDBX_env *env, mdbx_mode_t mode) {
} }
} }
rc = (env->flags & MDBX_RDONLY) rc = (env->flags & MDBX_RDONLY) ? MDBX_SUCCESS
? MDBX_SUCCESS : osal_ioring_create(&env->ioring
: osal_ioring_create(&env->ioring
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
, ,
ior_direct, env->ioring.overlapped_fd ior_direct, env->ioring.overlapped_fd
#endif /* Windows */ #endif /* Windows */
); );
return rc; return rc;
} }
@ -606,8 +558,7 @@ __cold int env_close(MDBX_env *env, bool resurrect_after_fork) {
} }
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
eASSERT(env, !env->ioring.overlapped_fd || eASSERT(env, !env->ioring.overlapped_fd || env->ioring.overlapped_fd == INVALID_HANDLE_VALUE);
env->ioring.overlapped_fd == INVALID_HANDLE_VALUE);
if (env->dxb_lock_event != INVALID_HANDLE_VALUE) { if (env->dxb_lock_event != INVALID_HANDLE_VALUE) {
CloseHandle(env->dxb_lock_event); CloseHandle(env->dxb_lock_event);
env->dxb_lock_event = INVALID_HANDLE_VALUE; env->dxb_lock_event = INVALID_HANDLE_VALUE;

View File

@ -110,27 +110,22 @@ extern struct libmdbx_imports imports;
extern LIBMDBX_API const char *const mdbx_sourcery_anchor; extern LIBMDBX_API const char *const mdbx_sourcery_anchor;
#endif #endif
#define MDBX_IS_ERROR(rc) \ #define MDBX_IS_ERROR(rc) ((rc) != MDBX_RESULT_TRUE && (rc) != MDBX_RESULT_FALSE)
((rc) != MDBX_RESULT_TRUE && (rc) != MDBX_RESULT_FALSE)
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t int64pgno(int64_t i64) {
int64pgno(int64_t i64) {
if (likely(i64 >= (int64_t)MIN_PAGENO && i64 <= (int64_t)MAX_PAGENO + 1)) if (likely(i64 >= (int64_t)MIN_PAGENO && i64 <= (int64_t)MAX_PAGENO + 1))
return (pgno_t)i64; return (pgno_t)i64;
return (i64 < (int64_t)MIN_PAGENO) ? MIN_PAGENO : MAX_PAGENO; return (i64 < (int64_t)MIN_PAGENO) ? MIN_PAGENO : MAX_PAGENO;
} }
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t pgno_add(size_t base, size_t augend) {
pgno_add(size_t base, size_t augend) {
assert(base <= MAX_PAGENO + 1 && augend < MAX_PAGENO); assert(base <= MAX_PAGENO + 1 && augend < MAX_PAGENO);
return int64pgno((int64_t)base + (int64_t)augend); return int64pgno((int64_t)base + (int64_t)augend);
} }
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t pgno_sub(size_t base, size_t subtrahend) {
pgno_sub(size_t base, size_t subtrahend) { assert(base >= MIN_PAGENO && base <= MAX_PAGENO + 1 && subtrahend < MAX_PAGENO);
assert(base >= MIN_PAGENO && base <= MAX_PAGENO + 1 &&
subtrahend < MAX_PAGENO);
return int64pgno((int64_t)base - (int64_t)subtrahend); return int64pgno((int64_t)base - (int64_t)subtrahend);
} }

View File

@ -52,8 +52,7 @@ static bool mincore_fetch(MDBX_env *const env, const size_t unit_begin) {
env->lck->pgops.mincore.weak += 1; env->lck->pgops.mincore.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
uint8_t *const vector = alloca(pages); uint8_t *const vector = alloca(pages);
if (unlikely(mincore(ptr_disp(env->dxb_mmap.base, offset), length, if (unlikely(mincore(ptr_disp(env->dxb_mmap.base, offset), length, (void *)vector))) {
(void *)vector))) {
NOTICE("mincore(+%zu, %zu), err %d", offset, length, errno); NOTICE("mincore(+%zu, %zu), err %d", offset, length, errno);
return false; return false;
} }
@ -79,14 +78,10 @@ static bool mincore_fetch(MDBX_env *const env, const size_t unit_begin) {
} }
#endif /* MDBX_USE_MINCORE */ #endif /* MDBX_USE_MINCORE */
MDBX_MAYBE_UNUSED static inline bool mincore_probe(MDBX_env *const env, MDBX_MAYBE_UNUSED static inline bool mincore_probe(MDBX_env *const env, const pgno_t pgno) {
const pgno_t pgno) {
#if MDBX_USE_MINCORE #if MDBX_USE_MINCORE
const size_t offset_aligned = const size_t offset_aligned = floor_powerof2(pgno2bytes(env, pgno), globals.sys_pagesize);
floor_powerof2(pgno2bytes(env, pgno), globals.sys_pagesize); const unsigned unit_log2 = (env->ps2ln > globals.sys_pagesize_ln2) ? env->ps2ln : globals.sys_pagesize_ln2;
const unsigned unit_log2 = (env->ps2ln > globals.sys_pagesize_ln2)
? env->ps2ln
: globals.sys_pagesize_ln2;
const size_t unit_begin = offset_aligned >> unit_log2; const size_t unit_begin = offset_aligned >> unit_log2;
eASSERT(env, (unit_begin << unit_log2) == offset_aligned); eASSERT(env, (unit_begin << unit_log2) == offset_aligned);
const ptrdiff_t dist = unit_begin - env->lck->mincore_cache.begin[0]; const ptrdiff_t dist = unit_begin - env->lck->mincore_cache.begin[0];
@ -102,8 +97,7 @@ MDBX_MAYBE_UNUSED static inline bool mincore_probe(MDBX_env *const env,
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_MAYBE_UNUSED __hot static pgno_t * MDBX_MAYBE_UNUSED __hot static pgno_t *scan4seq_fallback(pgno_t *range, const size_t len, const size_t seq) {
scan4seq_fallback(pgno_t *range, const size_t len, const size_t seq) {
assert(seq > 0 && len > seq); assert(seq > 0 && len > seq);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
assert(range[-1] == len); assert(range[-1] == len);
@ -167,8 +161,7 @@ scan4seq_fallback(pgno_t *range, const size_t len, const size_t seq) {
return nullptr; return nullptr;
} }
MDBX_MAYBE_UNUSED static const pgno_t *scan4range_checker(const pnl_t pnl, MDBX_MAYBE_UNUSED static const pgno_t *scan4range_checker(const pnl_t pnl, const size_t seq) {
const size_t seq) {
size_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_GETSIZE(pnl); size_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_GETSIZE(pnl);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
while (seq <= MDBX_PNL_GETSIZE(pnl) - begin) { while (seq <= MDBX_PNL_GETSIZE(pnl) - begin) {
@ -186,8 +179,7 @@ MDBX_MAYBE_UNUSED static const pgno_t *scan4range_checker(const pnl_t pnl,
return nullptr; return nullptr;
} }
#if defined(_MSC_VER) && !defined(__builtin_clz) && \ #if defined(_MSC_VER) && !defined(__builtin_clz) && !__has_builtin(__builtin_clz)
!__has_builtin(__builtin_clz)
MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clz(uint32_t value) { MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clz(uint32_t value) {
unsigned long index; unsigned long index;
_BitScanReverse(&index, value); _BitScanReverse(&index, value);
@ -195,8 +187,7 @@ MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clz(uint32_t value) {
} }
#endif /* _MSC_VER */ #endif /* _MSC_VER */
#if defined(_MSC_VER) && !defined(__builtin_clzl) && \ #if defined(_MSC_VER) && !defined(__builtin_clzl) && !__has_builtin(__builtin_clzl)
!__has_builtin(__builtin_clzl)
MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clzl(size_t value) { MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clzl(size_t value) {
unsigned long index; unsigned long index;
#ifdef _WIN64 #ifdef _WIN64
@ -213,8 +204,7 @@ MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clzl(size_t value) {
#if !MDBX_PNL_ASCENDING #if !MDBX_PNL_ASCENDING
#if !defined(MDBX_ATTRIBUTE_TARGET) && \ #if !defined(MDBX_ATTRIBUTE_TARGET) && (__has_attribute(__target__) || __GNUC_PREREQ(5, 0))
(__has_attribute(__target__) || __GNUC_PREREQ(5, 0))
#define MDBX_ATTRIBUTE_TARGET(target) __attribute__((__target__(target))) #define MDBX_ATTRIBUTE_TARGET(target) __attribute__((__target__(target)))
#endif /* MDBX_ATTRIBUTE_TARGET */ #endif /* MDBX_ATTRIBUTE_TARGET */
@ -223,9 +213,8 @@ MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clzl(size_t value) {
* gcc/i686-buildroot-linux-gnu/12.2.0/include/xmmintrin.h:814:1: * gcc/i686-buildroot-linux-gnu/12.2.0/include/xmmintrin.h:814:1:
* error: inlining failed in call to 'always_inline' '_mm_movemask_ps': * error: inlining failed in call to 'always_inline' '_mm_movemask_ps':
* target specific option mismatch */ * target specific option mismatch */
#if !defined(__FAST_MATH__) || !__FAST_MATH__ || !defined(__GNUC__) || \ #if !defined(__FAST_MATH__) || !__FAST_MATH__ || !defined(__GNUC__) || defined(__e2k__) || defined(__clang__) || \
defined(__e2k__) || defined(__clang__) || defined(__amd64__) || \ defined(__amd64__) || defined(__SSE2__)
defined(__SSE2__)
#define MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND 0 #define MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND 0
#else #else
#define MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND 1 #define MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND 1
@ -237,41 +226,36 @@ MDBX_MAYBE_UNUSED static __always_inline size_t __builtin_clzl(size_t value) {
#elif (defined(_M_IX86_FP) && _M_IX86_FP >= 2) || defined(__amd64__) #elif (defined(_M_IX86_FP) && _M_IX86_FP >= 2) || defined(__amd64__)
#define __SSE2__ #define __SSE2__
#define MDBX_ATTRIBUTE_TARGET_SSE2 /* nope */ #define MDBX_ATTRIBUTE_TARGET_SSE2 /* nope */
#elif defined(MDBX_ATTRIBUTE_TARGET) && defined(__ia32__) && \ #elif defined(MDBX_ATTRIBUTE_TARGET) && defined(__ia32__) && !MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND
!MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND
#define MDBX_ATTRIBUTE_TARGET_SSE2 MDBX_ATTRIBUTE_TARGET("sse,sse2") #define MDBX_ATTRIBUTE_TARGET_SSE2 MDBX_ATTRIBUTE_TARGET("sse,sse2")
#endif /* __SSE2__ */ #endif /* __SSE2__ */
#if defined(__AVX2__) #if defined(__AVX2__)
#define MDBX_ATTRIBUTE_TARGET_AVX2 /* nope */ #define MDBX_ATTRIBUTE_TARGET_AVX2 /* nope */
#elif defined(MDBX_ATTRIBUTE_TARGET) && defined(__ia32__) && \ #elif defined(MDBX_ATTRIBUTE_TARGET) && defined(__ia32__) && !MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND
!MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND
#define MDBX_ATTRIBUTE_TARGET_AVX2 MDBX_ATTRIBUTE_TARGET("sse,sse2,avx,avx2") #define MDBX_ATTRIBUTE_TARGET_AVX2 MDBX_ATTRIBUTE_TARGET("sse,sse2,avx,avx2")
#endif /* __AVX2__ */ #endif /* __AVX2__ */
#if defined(MDBX_ATTRIBUTE_TARGET_AVX2) #if defined(MDBX_ATTRIBUTE_TARGET_AVX2)
#if defined(__AVX512BW__) #if defined(__AVX512BW__)
#define MDBX_ATTRIBUTE_TARGET_AVX512BW /* nope */ #define MDBX_ATTRIBUTE_TARGET_AVX512BW /* nope */
#elif defined(MDBX_ATTRIBUTE_TARGET) && defined(__ia32__) && \ #elif defined(MDBX_ATTRIBUTE_TARGET) && defined(__ia32__) && !MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND && \
!MDBX_GCC_FASTMATH_i686_SIMD_WORKAROUND && \
(__GNUC_PREREQ(6, 0) || __CLANG_PREREQ(5, 0)) (__GNUC_PREREQ(6, 0) || __CLANG_PREREQ(5, 0))
#define MDBX_ATTRIBUTE_TARGET_AVX512BW \ #define MDBX_ATTRIBUTE_TARGET_AVX512BW MDBX_ATTRIBUTE_TARGET("sse,sse2,avx,avx2,avx512bw")
MDBX_ATTRIBUTE_TARGET("sse,sse2,avx,avx2,avx512bw")
#endif /* __AVX512BW__ */ #endif /* __AVX512BW__ */
#endif /* MDBX_ATTRIBUTE_TARGET_AVX2 for MDBX_ATTRIBUTE_TARGET_AVX512BW */ #endif /* MDBX_ATTRIBUTE_TARGET_AVX2 for MDBX_ATTRIBUTE_TARGET_AVX512BW */
#ifdef MDBX_ATTRIBUTE_TARGET_SSE2 #ifdef MDBX_ATTRIBUTE_TARGET_SSE2
MDBX_ATTRIBUTE_TARGET_SSE2 static __always_inline unsigned MDBX_ATTRIBUTE_TARGET_SSE2 static __always_inline unsigned
diffcmp2mask_sse2(const pgno_t *const ptr, const ptrdiff_t offset, diffcmp2mask_sse2(const pgno_t *const ptr, const ptrdiff_t offset, const __m128i pattern) {
const __m128i pattern) {
const __m128i f = _mm_loadu_si128((const __m128i *)ptr); const __m128i f = _mm_loadu_si128((const __m128i *)ptr);
const __m128i l = _mm_loadu_si128((const __m128i *)(ptr + offset)); const __m128i l = _mm_loadu_si128((const __m128i *)(ptr + offset));
const __m128i cmp = _mm_cmpeq_epi32(_mm_sub_epi32(f, l), pattern); const __m128i cmp = _mm_cmpeq_epi32(_mm_sub_epi32(f, l), pattern);
return _mm_movemask_ps(*(const __m128 *)&cmp); return _mm_movemask_ps(*(const __m128 *)&cmp);
} }
MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_SSE2 static pgno_t * MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_SSE2 static pgno_t *scan4seq_sse2(pgno_t *range, const size_t len,
scan4seq_sse2(pgno_t *range, const size_t len, const size_t seq) { const size_t seq) {
assert(seq > 0 && len > seq); assert(seq > 0 && len > seq);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
#error "FIXME: Not implemented" #error "FIXME: Not implemented"
@ -303,8 +287,7 @@ scan4seq_sse2(pgno_t *range, const size_t len, const size_t seq) {
* Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */ * Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */
#if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__) #if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__)
const unsigned on_page_safe_mask = 0xff0 /* enough for '-15' bytes offset */; const unsigned on_page_safe_mask = 0xff0 /* enough for '-15' bytes offset */;
if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && !RUNNING_ON_VALGRIND) {
!RUNNING_ON_VALGRIND) {
const unsigned extra = (unsigned)(detent + 4 - range); const unsigned extra = (unsigned)(detent + 4 - range);
assert(extra > 0 && extra < 4); assert(extra > 0 && extra < 4);
mask = 0xF << extra; mask = 0xF << extra;
@ -324,8 +307,7 @@ scan4seq_sse2(pgno_t *range, const size_t len, const size_t seq) {
#ifdef MDBX_ATTRIBUTE_TARGET_AVX2 #ifdef MDBX_ATTRIBUTE_TARGET_AVX2
MDBX_ATTRIBUTE_TARGET_AVX2 static __always_inline unsigned MDBX_ATTRIBUTE_TARGET_AVX2 static __always_inline unsigned
diffcmp2mask_avx2(const pgno_t *const ptr, const ptrdiff_t offset, diffcmp2mask_avx2(const pgno_t *const ptr, const ptrdiff_t offset, const __m256i pattern) {
const __m256i pattern) {
const __m256i f = _mm256_loadu_si256((const __m256i *)ptr); const __m256i f = _mm256_loadu_si256((const __m256i *)ptr);
const __m256i l = _mm256_loadu_si256((const __m256i *)(ptr + offset)); const __m256i l = _mm256_loadu_si256((const __m256i *)(ptr + offset));
const __m256i cmp = _mm256_cmpeq_epi32(_mm256_sub_epi32(f, l), pattern); const __m256i cmp = _mm256_cmpeq_epi32(_mm256_sub_epi32(f, l), pattern);
@ -333,16 +315,15 @@ diffcmp2mask_avx2(const pgno_t *const ptr, const ptrdiff_t offset,
} }
MDBX_ATTRIBUTE_TARGET_AVX2 static __always_inline unsigned MDBX_ATTRIBUTE_TARGET_AVX2 static __always_inline unsigned
diffcmp2mask_sse2avx(const pgno_t *const ptr, const ptrdiff_t offset, diffcmp2mask_sse2avx(const pgno_t *const ptr, const ptrdiff_t offset, const __m128i pattern) {
const __m128i pattern) {
const __m128i f = _mm_loadu_si128((const __m128i *)ptr); const __m128i f = _mm_loadu_si128((const __m128i *)ptr);
const __m128i l = _mm_loadu_si128((const __m128i *)(ptr + offset)); const __m128i l = _mm_loadu_si128((const __m128i *)(ptr + offset));
const __m128i cmp = _mm_cmpeq_epi32(_mm_sub_epi32(f, l), pattern); const __m128i cmp = _mm_cmpeq_epi32(_mm_sub_epi32(f, l), pattern);
return _mm_movemask_ps(*(const __m128 *)&cmp); return _mm_movemask_ps(*(const __m128 *)&cmp);
} }
MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_AVX2 static pgno_t * MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_AVX2 static pgno_t *scan4seq_avx2(pgno_t *range, const size_t len,
scan4seq_avx2(pgno_t *range, const size_t len, const size_t seq) { const size_t seq) {
assert(seq > 0 && len > seq); assert(seq > 0 && len > seq);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
#error "FIXME: Not implemented" #error "FIXME: Not implemented"
@ -374,8 +355,7 @@ scan4seq_avx2(pgno_t *range, const size_t len, const size_t seq) {
* Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */ * Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */
#if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__) #if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__)
const unsigned on_page_safe_mask = 0xfe0 /* enough for '-31' bytes offset */; const unsigned on_page_safe_mask = 0xfe0 /* enough for '-31' bytes offset */;
if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && !RUNNING_ON_VALGRIND) {
!RUNNING_ON_VALGRIND) {
const unsigned extra = (unsigned)(detent + 8 - range); const unsigned extra = (unsigned)(detent + 8 - range);
assert(extra > 0 && extra < 8); assert(extra > 0 && extra < 8);
mask = 0xFF << extra; mask = 0xFF << extra;
@ -402,15 +382,14 @@ scan4seq_avx2(pgno_t *range, const size_t len, const size_t seq) {
#ifdef MDBX_ATTRIBUTE_TARGET_AVX512BW #ifdef MDBX_ATTRIBUTE_TARGET_AVX512BW
MDBX_ATTRIBUTE_TARGET_AVX512BW static __always_inline unsigned MDBX_ATTRIBUTE_TARGET_AVX512BW static __always_inline unsigned
diffcmp2mask_avx512bw(const pgno_t *const ptr, const ptrdiff_t offset, diffcmp2mask_avx512bw(const pgno_t *const ptr, const ptrdiff_t offset, const __m512i pattern) {
const __m512i pattern) {
const __m512i f = _mm512_loadu_si512((const __m512i *)ptr); const __m512i f = _mm512_loadu_si512((const __m512i *)ptr);
const __m512i l = _mm512_loadu_si512((const __m512i *)(ptr + offset)); const __m512i l = _mm512_loadu_si512((const __m512i *)(ptr + offset));
return _mm512_cmpeq_epi32_mask(_mm512_sub_epi32(f, l), pattern); return _mm512_cmpeq_epi32_mask(_mm512_sub_epi32(f, l), pattern);
} }
MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_AVX512BW static pgno_t * MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_AVX512BW static pgno_t *scan4seq_avx512bw(pgno_t *range, const size_t len,
scan4seq_avx512bw(pgno_t *range, const size_t len, const size_t seq) { const size_t seq) {
assert(seq > 0 && len > seq); assert(seq > 0 && len > seq);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
#error "FIXME: Not implemented" #error "FIXME: Not implemented"
@ -442,8 +421,7 @@ scan4seq_avx512bw(pgno_t *range, const size_t len, const size_t seq) {
* Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */ * Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */
#if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__) #if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__)
const unsigned on_page_safe_mask = 0xfc0 /* enough for '-63' bytes offset */; const unsigned on_page_safe_mask = 0xfc0 /* enough for '-63' bytes offset */;
if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && !RUNNING_ON_VALGRIND) {
!RUNNING_ON_VALGRIND) {
const unsigned extra = (unsigned)(detent + 16 - range); const unsigned extra = (unsigned)(detent + 16 - range);
assert(extra > 0 && extra < 16); assert(extra > 0 && extra < 16);
mask = 0xFFFF << extra; mask = 0xFFFF << extra;
@ -474,10 +452,8 @@ scan4seq_avx512bw(pgno_t *range, const size_t len, const size_t seq) {
} }
#endif /* MDBX_ATTRIBUTE_TARGET_AVX512BW */ #endif /* MDBX_ATTRIBUTE_TARGET_AVX512BW */
#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && \ #if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) static __always_inline size_t diffcmp2mask_neon(const pgno_t *const ptr, const ptrdiff_t offset,
static __always_inline size_t diffcmp2mask_neon(const pgno_t *const ptr,
const ptrdiff_t offset,
const uint32x4_t pattern) { const uint32x4_t pattern) {
const uint32x4_t f = vld1q_u32(ptr); const uint32x4_t f = vld1q_u32(ptr);
const uint32x4_t l = vld1q_u32(ptr + offset); const uint32x4_t l = vld1q_u32(ptr + offset);
@ -485,12 +461,10 @@ static __always_inline size_t diffcmp2mask_neon(const pgno_t *const ptr,
if (sizeof(size_t) > 7) if (sizeof(size_t) > 7)
return vget_lane_u64(vreinterpret_u64_u16(cmp), 0); return vget_lane_u64(vreinterpret_u64_u16(cmp), 0);
else else
return vget_lane_u32(vreinterpret_u32_u8(vmovn_u16(vcombine_u16(cmp, cmp))), return vget_lane_u32(vreinterpret_u32_u8(vmovn_u16(vcombine_u16(cmp, cmp))), 0);
0);
} }
__hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len, __hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len, const size_t seq) {
const size_t seq) {
assert(seq > 0 && len > seq); assert(seq > 0 && len > seq);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
#error "FIXME: Not implemented" #error "FIXME: Not implemented"
@ -522,8 +496,7 @@ __hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len,
* Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */ * Поэтому проверяем смещение на странице, а с ASAN всегда страхуемся. */
#if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__) #if !defined(ENABLE_MEMCHECK) && !defined(__SANITIZE_ADDRESS__)
const unsigned on_page_safe_mask = 0xff0 /* enough for '-15' bytes offset */; const unsigned on_page_safe_mask = 0xff0 /* enough for '-15' bytes offset */;
if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && if (likely(on_page_safe_mask & (uintptr_t)(range + offset)) && !RUNNING_ON_VALGRIND) {
!RUNNING_ON_VALGRIND) {
const unsigned extra = (unsigned)(detent + 4 - range); const unsigned extra = (unsigned)(detent + 4 - range);
assert(extra > 0 && extra < 4); assert(extra > 0 && extra < 4);
mask = (~(size_t)0) << (extra * sizeof(size_t) * 2); mask = (~(size_t)0) << (extra * sizeof(size_t) * 2);
@ -548,8 +521,7 @@ __hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len,
#define scan4seq_default scan4seq_avx2 #define scan4seq_default scan4seq_avx2
#elif defined(__SSE2__) && defined(MDBX_ATTRIBUTE_TARGET_SSE2) #elif defined(__SSE2__) && defined(MDBX_ATTRIBUTE_TARGET_SSE2)
#define scan4seq_default scan4seq_sse2 #define scan4seq_default scan4seq_sse2
#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) && \ #elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define scan4seq_default scan4seq_neon #define scan4seq_default scan4seq_neon
/* Choosing of another variants should be added here. */ /* Choosing of another variants should be added here. */
#endif /* scan4seq_default */ #endif /* scan4seq_default */
@ -570,17 +542,12 @@ __hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len,
#else #else
/* Selecting the most appropriate implementation at runtime, /* Selecting the most appropriate implementation at runtime,
* depending on the available CPU features. */ * depending on the available CPU features. */
static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len, static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len, const size_t seq);
const size_t seq); static pgno_t *(*scan4seq_impl)(pgno_t *range, const size_t len, const size_t seq) = scan4seq_resolver;
static pgno_t *(*scan4seq_impl)(pgno_t *range, const size_t len,
const size_t seq) = scan4seq_resolver;
static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len, static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len, const size_t seq) {
const size_t seq) { pgno_t *(*choice)(pgno_t *range, const size_t len, const size_t seq) = nullptr;
pgno_t *(*choice)(pgno_t *range, const size_t len, const size_t seq) = #if __has_builtin(__builtin_cpu_init) || defined(__BUILTIN_CPU_INIT__) || __GNUC_PREREQ(4, 8)
nullptr;
#if __has_builtin(__builtin_cpu_init) || defined(__BUILTIN_CPU_INIT__) || \
__GNUC_PREREQ(4, 8)
__builtin_cpu_init(); __builtin_cpu_init();
#endif /* __builtin_cpu_init() */ #endif /* __builtin_cpu_init() */
#ifdef MDBX_ATTRIBUTE_TARGET_SSE2 #ifdef MDBX_ATTRIBUTE_TARGET_SSE2
@ -607,12 +574,10 @@ static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len,
#define ALLOC_SHOULD_SCAN 8 /* внутреннее состояние */ #define ALLOC_SHOULD_SCAN 8 /* внутреннее состояние */
#define ALLOC_LIFO 16 /* внутреннее состояние */ #define ALLOC_LIFO 16 /* внутреннее состояние */
static inline bool is_gc_usable(MDBX_txn *txn, const MDBX_cursor *mc, static inline bool is_gc_usable(MDBX_txn *txn, const MDBX_cursor *mc, const uint8_t flags) {
const uint8_t flags) {
/* If txn is updating the GC, then the retired-list cannot play catch-up with /* If txn is updating the GC, then the retired-list cannot play catch-up with
* itself by growing while trying to save it. */ * itself by growing while trying to save it. */
if (mc->tree == &txn->dbs[FREE_DBI] && !(flags & ALLOC_RESERVE) && if (mc->tree == &txn->dbs[FREE_DBI] && !(flags & ALLOC_RESERVE) && !(mc->flags & z_gcu_preparation))
!(mc->flags & z_gcu_preparation))
return false; return false;
/* avoid search inside empty tree and while tree is updating, /* avoid search inside empty tree and while tree is updating,
@ -690,8 +655,7 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
#ifndef MDBX_ENABLE_SAVING_SEQUENCES #ifndef MDBX_ENABLE_SAVING_SEQUENCES
#define MDBX_ENABLE_SAVING_SEQUENCES 0 #define MDBX_ENABLE_SAVING_SEQUENCES 0
#endif #endif
if (MDBX_ENABLE_SAVING_SEQUENCES && unlikely(target[dir] == *target + 1) && if (MDBX_ENABLE_SAVING_SEQUENCES && unlikely(target[dir] == *target + 1) && len > 2) {
len > 2) {
/* Пытаемся пропускать последовательности при наличии одиночных элементов. /* Пытаемся пропускать последовательности при наличии одиночных элементов.
* TODO: необходимо кэшировать пропускаемые последовательности * TODO: необходимо кэшировать пропускаемые последовательности
* чтобы не сканировать список сначала при каждом выделении. */ * чтобы не сканировать список сначала при каждом выделении. */
@ -719,8 +683,7 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
/* вырезаем элемент с перемещением хвоста */ /* вырезаем элемент с перемещением хвоста */
MDBX_PNL_SETSIZE(txn->tw.relist, len - 1); MDBX_PNL_SETSIZE(txn->tw.relist, len - 1);
for (const pgno_t *const end = txn->tw.relist + len - 1; target <= end; for (const pgno_t *const end = txn->tw.relist + len - 1; target <= end; ++target)
++target)
*target = target[1]; *target = target[1];
#else #else
/* перемещать хвост не нужно, просто усекам список */ /* перемещать хвост не нужно, просто усекам список */
@ -729,8 +692,7 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
return pgno; return pgno;
} }
__hot static pgno_t relist_get_sequence(MDBX_txn *txn, const size_t num, __hot static pgno_t relist_get_sequence(MDBX_txn *txn, const size_t num, uint8_t flags) {
uint8_t flags) {
const size_t len = MDBX_PNL_GETSIZE(txn->tw.relist); const size_t len = MDBX_PNL_GETSIZE(txn->tw.relist);
pgno_t *edge = MDBX_PNL_EDGE(txn->tw.relist); pgno_t *edge = MDBX_PNL_EDGE(txn->tw.relist);
assert(len >= num && num > 1); assert(len >= num && num > 1);
@ -754,8 +716,7 @@ __hot static pgno_t relist_get_sequence(MDBX_txn *txn, const size_t num,
/* вырезаем найденную последовательность с перемещением хвоста */ /* вырезаем найденную последовательность с перемещением хвоста */
MDBX_PNL_SETSIZE(txn->tw.relist, len - num); MDBX_PNL_SETSIZE(txn->tw.relist, len - num);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
for (const pgno_t *const end = txn->tw.relist + len - num; target <= end; for (const pgno_t *const end = txn->tw.relist + len - num; target <= end; ++target)
++target)
*target = target[num]; *target = target[num];
#else #else
for (const pgno_t *const end = txn->tw.relist + len; ++target <= end;) for (const pgno_t *const end = txn->tw.relist + len; ++target <= end;)
@ -766,16 +727,13 @@ __hot static pgno_t relist_get_sequence(MDBX_txn *txn, const size_t num,
return 0; return 0;
} }
static inline pgr_t page_alloc_finalize(MDBX_env *const env, static inline pgr_t page_alloc_finalize(MDBX_env *const env, MDBX_txn *const txn, const MDBX_cursor *const mc,
MDBX_txn *const txn,
const MDBX_cursor *const mc,
const pgno_t pgno, const size_t num) { const pgno_t pgno, const size_t num) {
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
size_t majflt_before; size_t majflt_before;
const uint64_t cputime_before = osal_cputime(&majflt_before); const uint64_t cputime_before = osal_cputime(&majflt_before);
gc_prof_stat_t *const prof = (cursor_dbi(mc) == FREE_DBI) gc_prof_stat_t *const prof =
? &env->lck->pgops.gc_prof.self (cursor_dbi(mc) == FREE_DBI) ? &env->lck->pgops.gc_prof.self : &env->lck->pgops.gc_prof.work;
: &env->lck->pgops.gc_prof.work;
#else #else
(void)mc; (void)mc;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
@ -811,8 +769,7 @@ static inline pgr_t page_alloc_finalize(MDBX_env *const env,
* грязной I/O очереди. Из-за этого штраф за лишнюю запись может быть * грязной I/O очереди. Из-за этого штраф за лишнюю запись может быть
* сравним с избегаемым ненужным чтением. */ * сравним с избегаемым ненужным чтением. */
if (env->prefault_write_activated) { if (env->prefault_write_activated) {
void *const pattern = void *const pattern = ptr_disp(env->page_auxbuf, need_clean ? env->ps : env->ps * 2);
ptr_disp(env->page_auxbuf, need_clean ? env->ps : env->ps * 2);
size_t file_offset = pgno2bytes(env, pgno); size_t file_offset = pgno2bytes(env, pgno);
if (likely(num == 1)) { if (likely(num == 1)) {
if (!mincore_probe(env, pgno)) { if (!mincore_probe(env, pgno)) {
@ -831,8 +788,7 @@ static inline pgr_t page_alloc_finalize(MDBX_env *const env,
iov[n].iov_len = env->ps; iov[n].iov_len = env->ps;
iov[n].iov_base = pattern; iov[n].iov_base = pattern;
if (unlikely(++n == MDBX_AUXILARY_IOV_MAX)) { if (unlikely(++n == MDBX_AUXILARY_IOV_MAX)) {
osal_pwritev(env->lazy_fd, iov, MDBX_AUXILARY_IOV_MAX, osal_pwritev(env->lazy_fd, iov, MDBX_AUXILARY_IOV_MAX, file_offset);
file_offset);
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
env->lck->pgops.prefault.weak += 1; env->lck->pgops.prefault.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
@ -873,8 +829,7 @@ static inline pgr_t page_alloc_finalize(MDBX_env *const env,
ret.err = page_dirty(txn, ret.page, (pgno_t)num); ret.err = page_dirty(txn, ret.page, (pgno_t)num);
bailout: bailout:
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
MDBX_ENABLE_REFUND));
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
size_t majflt_after; size_t majflt_after;
prof->xtime_cpu += osal_cputime(&majflt_after) - cputime_before; prof->xtime_cpu += osal_cputime(&majflt_after) - cputime_before;
@ -883,32 +838,25 @@ bailout:
return ret; return ret;
} }
pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags) {
uint8_t flags) {
pgr_t ret; pgr_t ret;
MDBX_txn *const txn = mc->txn; MDBX_txn *const txn = mc->txn;
MDBX_env *const env = txn->env; MDBX_env *const env = txn->env;
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
gc_prof_stat_t *const prof = (cursor_dbi(mc) == FREE_DBI) gc_prof_stat_t *const prof =
? &env->lck->pgops.gc_prof.self (cursor_dbi(mc) == FREE_DBI) ? &env->lck->pgops.gc_prof.self : &env->lck->pgops.gc_prof.work;
: &env->lck->pgops.gc_prof.work;
prof->spe_counter += 1; prof->spe_counter += 1;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
eASSERT(env, num > 0 || (flags & ALLOC_RESERVE)); eASSERT(env, num > 0 || (flags & ALLOC_RESERVE));
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
MDBX_ENABLE_REFUND));
size_t newnext; size_t newnext;
const uint64_t monotime_begin = const uint64_t monotime_begin = (MDBX_ENABLE_PROFGC || (num > 1 && env->options.gc_time_limit)) ? osal_monotime() : 0;
(MDBX_ENABLE_PROFGC || (num > 1 && env->options.gc_time_limit))
? osal_monotime()
: 0;
struct monotime_cache now_cache; struct monotime_cache now_cache;
now_cache.expire_countdown = now_cache.expire_countdown = 1 /* старт с 1 позволяет избавиться как от лишних системных вызовов когда
1 /* старт с 1 позволяет избавиться как от лишних системных вызовов когда лимит времени задан нулевой или уже исчерпан, так и от подсчета
лимит времени задан нулевой или уже исчерпан, так и от подсчета времени при не-достижении rp_augment_limit */
времени при не-достижении rp_augment_limit */
; ;
now_cache.value = monotime_begin; now_cache.value = monotime_begin;
pgno_t pgno = 0; pgno_t pgno = 0;
@ -917,9 +865,8 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num,
prof->xpages += 1; prof->xpages += 1;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) { if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) {
eASSERT(env, eASSERT(env, MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated &&
MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated && MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated);
MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated);
pgno = relist_get_sequence(txn, num, flags); pgno = relist_get_sequence(txn, num, flags);
if (likely(pgno)) if (likely(pgno))
goto done; goto done;
@ -936,16 +883,14 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num,
goto no_gc; goto no_gc;
} }
eASSERT(env, eASSERT(env, (flags & (ALLOC_COALESCE | ALLOC_LIFO | ALLOC_SHOULD_SCAN)) == 0);
(flags & (ALLOC_COALESCE | ALLOC_LIFO | ALLOC_SHOULD_SCAN)) == 0);
flags += (env->flags & MDBX_LIFORECLAIM) ? ALLOC_LIFO : 0; flags += (env->flags & MDBX_LIFORECLAIM) ? ALLOC_LIFO : 0;
if (/* Не коагулируем записи при подготовке резерва для обновления GC. if (/* Не коагулируем записи при подготовке резерва для обновления GC.
* Иначе попытка увеличить резерв может приводить к необходимости ещё * Иначе попытка увеличить резерв может приводить к необходимости ещё
* большего резерва из-за увеличения списка переработанных страниц. */ * большего резерва из-за увеличения списка переработанных страниц. */
(flags & ALLOC_RESERVE) == 0) { (flags & ALLOC_RESERVE) == 0) {
if (txn->dbs[FREE_DBI].branch_pages && if (txn->dbs[FREE_DBI].branch_pages && MDBX_PNL_GETSIZE(txn->tw.relist) < env->maxgc_large1page / 2)
MDBX_PNL_GETSIZE(txn->tw.relist) < env->maxgc_large1page / 2)
flags += ALLOC_COALESCE; flags += ALLOC_COALESCE;
} }
@ -976,9 +921,7 @@ retry_gc_refresh_oldest:;
txnid_t oldest = txn_snapshot_oldest(txn); txnid_t oldest = txn_snapshot_oldest(txn);
retry_gc_have_oldest: retry_gc_have_oldest:
if (unlikely(oldest >= txn->txnid)) { if (unlikely(oldest >= txn->txnid)) {
ERROR("unexpected/invalid oldest-readed txnid %" PRIaTXN ERROR("unexpected/invalid oldest-readed txnid %" PRIaTXN " for current-txnid %" PRIaTXN, oldest, txn->txnid);
" for current-txnid %" PRIaTXN,
oldest, txn->txnid);
ret.err = MDBX_PROBLEM; ret.err = MDBX_PROBLEM;
goto fail; goto fail;
} }
@ -1026,8 +969,7 @@ next_gc:;
goto depleted_gc; goto depleted_gc;
} }
if (unlikely(key.iov_len != sizeof(txnid_t))) { if (unlikely(key.iov_len != sizeof(txnid_t))) {
ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC key-length");
"invalid GC key-length");
ret.err = MDBX_CORRUPTED; ret.err = MDBX_CORRUPTED;
goto fail; goto fail;
} }
@ -1046,26 +988,21 @@ next_gc:;
/* Reading next GC record */ /* Reading next GC record */
MDBX_val data; MDBX_val data;
page_t *const mp = gc->pg[gc->top]; page_t *const mp = gc->pg[gc->top];
if (unlikely((ret.err = node_read(gc, page_node(mp, gc->ki[gc->top]), &data, if (unlikely((ret.err = node_read(gc, page_node(mp, gc->ki[gc->top]), &data, mp)) != MDBX_SUCCESS))
mp)) != MDBX_SUCCESS))
goto fail; goto fail;
pgno_t *gc_pnl = (pgno_t *)data.iov_base; pgno_t *gc_pnl = (pgno_t *)data.iov_base;
if (unlikely(data.iov_len % sizeof(pgno_t) || if (unlikely(data.iov_len % sizeof(pgno_t) || data.iov_len < MDBX_PNL_SIZEOF(gc_pnl) ||
data.iov_len < MDBX_PNL_SIZEOF(gc_pnl) ||
!pnl_check(gc_pnl, txn->geo.first_unallocated))) { !pnl_check(gc_pnl, txn->geo.first_unallocated))) {
ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC value-length");
"invalid GC value-length");
ret.err = MDBX_CORRUPTED; ret.err = MDBX_CORRUPTED;
goto fail; goto fail;
} }
const size_t gc_len = MDBX_PNL_GETSIZE(gc_pnl); const size_t gc_len = MDBX_PNL_GETSIZE(gc_pnl);
TRACE("gc-read: id #%" PRIaTXN " len %zu, re-list will %zu ", id, gc_len, TRACE("gc-read: id #%" PRIaTXN " len %zu, re-list will %zu ", id, gc_len, gc_len + MDBX_PNL_GETSIZE(txn->tw.relist));
gc_len + MDBX_PNL_GETSIZE(txn->tw.relist));
if (unlikely(gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) >= if (unlikely(gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) >= env->maxgc_large1page)) {
env->maxgc_large1page)) {
/* Don't try to coalesce too much. */ /* Don't try to coalesce too much. */
if (flags & ALLOC_SHOULD_SCAN) { if (flags & ALLOC_SHOULD_SCAN) {
eASSERT(env, flags & ALLOC_COALESCE); eASSERT(env, flags & ALLOC_COALESCE);
@ -1076,10 +1013,8 @@ next_gc:;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
TRACE("clear %s %s", "ALLOC_COALESCE", "since got threshold"); TRACE("clear %s %s", "ALLOC_COALESCE", "since got threshold");
if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) { if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) {
eASSERT(env, eASSERT(env, MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated &&
MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated && MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated);
MDBX_PNL_FIRST(txn->tw.relist) <
txn->geo.first_unallocated);
if (likely(num == 1)) { if (likely(num == 1)) {
pgno = relist_get_single(txn); pgno = relist_get_single(txn);
goto done; goto done;
@ -1090,25 +1025,19 @@ next_gc:;
} }
flags -= ALLOC_COALESCE | ALLOC_SHOULD_SCAN; flags -= ALLOC_COALESCE | ALLOC_SHOULD_SCAN;
} }
if (unlikely(/* list is too long already */ MDBX_PNL_GETSIZE( if (unlikely(/* list is too long already */ MDBX_PNL_GETSIZE(txn->tw.relist) >= env->options.rp_augment_limit) &&
txn->tw.relist) >= env->options.rp_augment_limit) &&
((/* not a slot-request from gc-update */ num && ((/* not a slot-request from gc-update */ num &&
/* have enough unallocated space */ txn->geo.upper >= /* have enough unallocated space */ txn->geo.upper >= txn->geo.first_unallocated + num &&
txn->geo.first_unallocated + num && monotime_since_cached(monotime_begin, &now_cache) + txn->tw.gc.time_acc >= env->options.gc_time_limit) ||
monotime_since_cached(monotime_begin, &now_cache) +
txn->tw.gc.time_acc >=
env->options.gc_time_limit) ||
gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) >= PAGELIST_LIMIT)) { gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) >= PAGELIST_LIMIT)) {
/* Stop reclaiming to avoid large/overflow the page list. This is a rare /* Stop reclaiming to avoid large/overflow the page list. This is a rare
* case while search for a continuously multi-page region in a * case while search for a continuously multi-page region in a
* large database, see https://libmdbx.dqdkfa.ru/dead-github/issues/123 */ * large database, see https://libmdbx.dqdkfa.ru/dead-github/issues/123 */
NOTICE("stop reclaiming %s: %zu (current) + %zu " NOTICE("stop reclaiming %s: %zu (current) + %zu "
"(chunk) -> %zu, rp_augment_limit %u", "(chunk) -> %zu, rp_augment_limit %u",
likely(gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) < PAGELIST_LIMIT) likely(gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) < PAGELIST_LIMIT) ? "since rp_augment_limit was reached"
? "since rp_augment_limit was reached" : "to avoid PNL overflow",
: "to avoid PNL overflow", MDBX_PNL_GETSIZE(txn->tw.relist), gc_len, gc_len + MDBX_PNL_GETSIZE(txn->tw.relist),
MDBX_PNL_GETSIZE(txn->tw.relist), gc_len,
gc_len + MDBX_PNL_GETSIZE(txn->tw.relist),
env->options.rp_augment_limit); env->options.rp_augment_limit);
goto depleted_gc; goto depleted_gc;
} }
@ -1128,9 +1057,7 @@ next_gc:;
goto fail; goto fail;
if (LOG_ENABLED(MDBX_LOG_EXTRA)) { if (LOG_ENABLED(MDBX_LOG_EXTRA)) {
DEBUG_EXTRA("readed GC-pnl txn %" PRIaTXN " root %" PRIaPGNO DEBUG_EXTRA("readed GC-pnl txn %" PRIaTXN " root %" PRIaPGNO " len %zu, PNL", id, txn->dbs[FREE_DBI].root, gc_len);
" len %zu, PNL",
id, txn->dbs[FREE_DBI].root, gc_len);
for (size_t i = gc_len; i; i--) for (size_t i = gc_len; i; i--)
DEBUG_EXTRA_PRINT(" %" PRIaPGNO, gc_pnl[i]); DEBUG_EXTRA_PRINT(" %" PRIaPGNO, gc_pnl[i]);
DEBUG_EXTRA_PRINT(", first_unallocated %u\n", txn->geo.first_unallocated); DEBUG_EXTRA_PRINT(", first_unallocated %u\n", txn->geo.first_unallocated);
@ -1141,33 +1068,27 @@ next_gc:;
flags |= ALLOC_SHOULD_SCAN; flags |= ALLOC_SHOULD_SCAN;
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
if (unlikely(!pnl_check(txn->tw.relist, txn->geo.first_unallocated))) { if (unlikely(!pnl_check(txn->tw.relist, txn->geo.first_unallocated))) {
ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid txn retired-list");
"invalid txn retired-list");
ret.err = MDBX_CORRUPTED; ret.err = MDBX_CORRUPTED;
goto fail; goto fail;
} }
} else { } else {
eASSERT(env, eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated));
} }
eASSERT(env, dpl_check(txn)); eASSERT(env, dpl_check(txn));
eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.relist) == 0 || eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.relist) == 0 || MDBX_PNL_MOST(txn->tw.relist) < txn->geo.first_unallocated);
MDBX_PNL_MOST(txn->tw.relist) < txn->geo.first_unallocated);
if (MDBX_ENABLE_REFUND && MDBX_PNL_GETSIZE(txn->tw.relist) && if (MDBX_ENABLE_REFUND && MDBX_PNL_GETSIZE(txn->tw.relist) &&
unlikely(MDBX_PNL_MOST(txn->tw.relist) == unlikely(MDBX_PNL_MOST(txn->tw.relist) == txn->geo.first_unallocated - 1)) {
txn->geo.first_unallocated - 1)) {
/* Refund suitable pages into "unallocated" space */ /* Refund suitable pages into "unallocated" space */
txn_refund(txn); txn_refund(txn);
} }
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
MDBX_ENABLE_REFUND));
/* Done for a kick-reclaim mode, actually no page needed */ /* Done for a kick-reclaim mode, actually no page needed */
if (unlikely(num == 0)) { if (unlikely(num == 0)) {
eASSERT(env, ret.err == MDBX_SUCCESS); eASSERT(env, ret.err == MDBX_SUCCESS);
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "early-exit for slot", id, TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "early-exit for slot", id, MDBX_PNL_GETSIZE(txn->tw.relist));
MDBX_PNL_GETSIZE(txn->tw.relist));
goto early_exit; goto early_exit;
} }
@ -1175,8 +1096,7 @@ next_gc:;
eASSERT(env, op == MDBX_PREV || op == MDBX_NEXT); eASSERT(env, op == MDBX_PREV || op == MDBX_NEXT);
if (flags & ALLOC_COALESCE) { if (flags & ALLOC_COALESCE) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "coalesce-continue", id, TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "coalesce-continue", id, MDBX_PNL_GETSIZE(txn->tw.relist));
MDBX_PNL_GETSIZE(txn->tw.relist));
goto next_gc; goto next_gc;
} }
@ -1184,9 +1104,8 @@ scan:
eASSERT(env, flags & ALLOC_SHOULD_SCAN); eASSERT(env, flags & ALLOC_SHOULD_SCAN);
eASSERT(env, num > 0); eASSERT(env, num > 0);
if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) { if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) {
eASSERT(env, eASSERT(env, MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated &&
MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated && MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated);
MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated);
if (likely(num == 1)) { if (likely(num == 1)) {
eASSERT(env, !(flags & ALLOC_RESERVE)); eASSERT(env, !(flags & ALLOC_RESERVE));
pgno = relist_get_single(txn); pgno = relist_get_single(txn);
@ -1198,14 +1117,12 @@ scan:
} }
flags -= ALLOC_SHOULD_SCAN; flags -= ALLOC_SHOULD_SCAN;
if (ret.err == MDBX_SUCCESS) { if (ret.err == MDBX_SUCCESS) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "continue-search", id, TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "continue-search", id, MDBX_PNL_GETSIZE(txn->tw.relist));
MDBX_PNL_GETSIZE(txn->tw.relist));
goto next_gc; goto next_gc;
} }
depleted_gc: depleted_gc:
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "gc-depleted", id, TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "gc-depleted", id, MDBX_PNL_GETSIZE(txn->tw.relist));
MDBX_PNL_GETSIZE(txn->tw.relist));
ret.err = MDBX_NOTFOUND; ret.err = MDBX_NOTFOUND;
if (flags & ALLOC_SHOULD_SCAN) if (flags & ALLOC_SHOULD_SCAN)
goto scan; goto scan;
@ -1226,16 +1143,11 @@ depleted_gc:
/* Does reclaiming stopped at the last steady point? */ /* Does reclaiming stopped at the last steady point? */
const meta_ptr_t recent = meta_recent(env, &txn->tw.troika); const meta_ptr_t recent = meta_recent(env, &txn->tw.troika);
const meta_ptr_t prefer_steady = meta_prefer_steady(env, &txn->tw.troika); const meta_ptr_t prefer_steady = meta_prefer_steady(env, &txn->tw.troika);
if (recent.ptr_c != prefer_steady.ptr_c && prefer_steady.is_steady && if (recent.ptr_c != prefer_steady.ptr_c && prefer_steady.is_steady && detent == prefer_steady.txnid + 1) {
detent == prefer_steady.txnid + 1) { DEBUG("gc-kick-steady: recent %" PRIaTXN "-%s, steady %" PRIaTXN "-%s, detent %" PRIaTXN, recent.txnid,
DEBUG("gc-kick-steady: recent %" PRIaTXN "-%s, steady %" PRIaTXN durable_caption(recent.ptr_c), prefer_steady.txnid, durable_caption(prefer_steady.ptr_c), detent);
"-%s, detent %" PRIaTXN, const pgno_t autosync_threshold = atomic_load32(&env->lck->autosync_threshold, mo_Relaxed);
recent.txnid, durable_caption(recent.ptr_c), prefer_steady.txnid, const uint64_t autosync_period = atomic_load64(&env->lck->autosync_period, mo_Relaxed);
durable_caption(prefer_steady.ptr_c), detent);
const pgno_t autosync_threshold =
atomic_load32(&env->lck->autosync_threshold, mo_Relaxed);
const uint64_t autosync_period =
atomic_load64(&env->lck->autosync_period, mo_Relaxed);
uint64_t eoos_timestamp; uint64_t eoos_timestamp;
/* wipe the last steady-point if one of: /* wipe the last steady-point if one of:
* - UTTERLY_NOSYNC mode AND auto-sync threshold is NOT specified * - UTTERLY_NOSYNC mode AND auto-sync threshold is NOT specified
@ -1246,8 +1158,7 @@ depleted_gc:
* - database is full (with the current file size) * - database is full (with the current file size)
* AND auto-sync threshold it NOT specified */ * AND auto-sync threshold it NOT specified */
if (F_ISSET(env->flags, MDBX_UTTERLY_NOSYNC) && if (F_ISSET(env->flags, MDBX_UTTERLY_NOSYNC) &&
((autosync_threshold | autosync_period) == 0 || ((autosync_threshold | autosync_period) == 0 || newnext >= prefer_steady.ptr_c->geometry.now)) {
newnext >= prefer_steady.ptr_c->geometry.now)) {
/* wipe steady checkpoint in MDBX_UTTERLY_NOSYNC mode /* wipe steady checkpoint in MDBX_UTTERLY_NOSYNC mode
* without any auto-sync threshold(s). */ * without any auto-sync threshold(s). */
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
@ -1257,39 +1168,30 @@ depleted_gc:
DEBUG("gc-wipe-steady, rc %d", ret.err); DEBUG("gc-wipe-steady, rc %d", ret.err);
if (unlikely(ret.err != MDBX_SUCCESS)) if (unlikely(ret.err != MDBX_SUCCESS))
goto fail; goto fail;
eASSERT(env, prefer_steady.ptr_c != eASSERT(env, prefer_steady.ptr_c != meta_prefer_steady(env, &txn->tw.troika).ptr_c);
meta_prefer_steady(env, &txn->tw.troika).ptr_c);
goto retry_gc_refresh_oldest; goto retry_gc_refresh_oldest;
} }
if ((autosync_threshold && if ((autosync_threshold && atomic_load64(&env->lck->unsynced_pages, mo_Relaxed) >= autosync_threshold) ||
atomic_load64(&env->lck->unsynced_pages, mo_Relaxed) >= (autosync_period && (eoos_timestamp = atomic_load64(&env->lck->eoos_timestamp, mo_Relaxed)) &&
autosync_threshold) ||
(autosync_period &&
(eoos_timestamp =
atomic_load64(&env->lck->eoos_timestamp, mo_Relaxed)) &&
osal_monotime() - eoos_timestamp >= autosync_period) || osal_monotime() - eoos_timestamp >= autosync_period) ||
newnext >= txn->geo.upper || newnext >= txn->geo.upper ||
((num == 0 || newnext >= txn->geo.end_pgno) && ((num == 0 || newnext >= txn->geo.end_pgno) && (autosync_threshold | autosync_period) == 0)) {
(autosync_threshold | autosync_period) == 0)) {
/* make steady checkpoint. */ /* make steady checkpoint. */
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
env->lck->pgops.gc_prof.flushes += 1; env->lck->pgops.gc_prof.flushes += 1;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
meta_t meta = *recent.ptr_c; meta_t meta = *recent.ptr_c;
ret.err = dxb_sync_locked(env, env->flags & MDBX_WRITEMAP, &meta, ret.err = dxb_sync_locked(env, env->flags & MDBX_WRITEMAP, &meta, &txn->tw.troika);
&txn->tw.troika);
DEBUG("gc-make-steady, rc %d", ret.err); DEBUG("gc-make-steady, rc %d", ret.err);
eASSERT(env, ret.err != MDBX_RESULT_TRUE); eASSERT(env, ret.err != MDBX_RESULT_TRUE);
if (unlikely(ret.err != MDBX_SUCCESS)) if (unlikely(ret.err != MDBX_SUCCESS))
goto fail; goto fail;
eASSERT(env, prefer_steady.ptr_c != eASSERT(env, prefer_steady.ptr_c != meta_prefer_steady(env, &txn->tw.troika).ptr_c);
meta_prefer_steady(env, &txn->tw.troika).ptr_c);
goto retry_gc_refresh_oldest; goto retry_gc_refresh_oldest;
} }
} }
if (unlikely(true == if (unlikely(true == atomic_load32(&env->lck->rdt_refresh_flag, mo_AcquireRelease))) {
atomic_load32(&env->lck->rdt_refresh_flag, mo_AcquireRelease))) {
oldest = txn_snapshot_oldest(txn); oldest = txn_snapshot_oldest(txn);
if (oldest >= detent) if (oldest >= detent)
goto retry_gc_have_oldest; goto retry_gc_have_oldest;
@ -1315,8 +1217,7 @@ no_gc:
#ifndef MDBX_ENABLE_BACKLOG_DEPLETED #ifndef MDBX_ENABLE_BACKLOG_DEPLETED
#define MDBX_ENABLE_BACKLOG_DEPLETED 0 #define MDBX_ENABLE_BACKLOG_DEPLETED 0
#endif /* MDBX_ENABLE_BACKLOG_DEPLETED*/ #endif /* MDBX_ENABLE_BACKLOG_DEPLETED*/
if (MDBX_ENABLE_BACKLOG_DEPLETED && if (MDBX_ENABLE_BACKLOG_DEPLETED && unlikely(!(txn->flags & txn_gc_drained))) {
unlikely(!(txn->flags & txn_gc_drained))) {
ret.err = MDBX_BACKLOG_DEPLETED; ret.err = MDBX_BACKLOG_DEPLETED;
goto fail; goto fail;
} }
@ -1338,20 +1239,16 @@ no_gc:
eASSERT(env, newnext > txn->geo.end_pgno); eASSERT(env, newnext > txn->geo.end_pgno);
const size_t grow_step = pv2pages(txn->geo.grow_pv); const size_t grow_step = pv2pages(txn->geo.grow_pv);
size_t aligned = pgno_align2os_pgno( size_t aligned = pgno_align2os_pgno(env, (pgno_t)(newnext + grow_step - newnext % grow_step));
env, (pgno_t)(newnext + grow_step - newnext % grow_step));
if (aligned > txn->geo.upper) if (aligned > txn->geo.upper)
aligned = txn->geo.upper; aligned = txn->geo.upper;
eASSERT(env, aligned >= newnext); eASSERT(env, aligned >= newnext);
VERBOSE("try growth datafile to %zu pages (+%zu)", aligned, VERBOSE("try growth datafile to %zu pages (+%zu)", aligned, aligned - txn->geo.end_pgno);
aligned - txn->geo.end_pgno); ret.err = dxb_resize(env, txn->geo.first_unallocated, (pgno_t)aligned, txn->geo.upper, implicit_grow);
ret.err = dxb_resize(env, txn->geo.first_unallocated, (pgno_t)aligned,
txn->geo.upper, implicit_grow);
if (ret.err != MDBX_SUCCESS) { if (ret.err != MDBX_SUCCESS) {
ERROR("unable growth datafile to %zu pages (+%zu), errcode %d", aligned, ERROR("unable growth datafile to %zu pages (+%zu), errcode %d", aligned, aligned - txn->geo.end_pgno, ret.err);
aligned - txn->geo.end_pgno, ret.err);
goto fail; goto fail;
} }
env->txn->geo.end_pgno = (pgno_t)aligned; env->txn->geo.end_pgno = (pgno_t)aligned;
@ -1363,26 +1260,20 @@ done:
ret.err = MDBX_SUCCESS; ret.err = MDBX_SUCCESS;
if (likely((flags & ALLOC_RESERVE) == 0)) { if (likely((flags & ALLOC_RESERVE) == 0)) {
if (pgno) { if (pgno) {
eASSERT(env, eASSERT(env, pgno + num <= txn->geo.first_unallocated && pgno >= NUM_METAS);
pgno + num <= txn->geo.first_unallocated && pgno >= NUM_METAS); eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
eASSERT(env,
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
} else { } else {
pgno = txn->geo.first_unallocated; pgno = txn->geo.first_unallocated;
txn->geo.first_unallocated += (pgno_t)num; txn->geo.first_unallocated += (pgno_t)num;
eASSERT(env, txn->geo.first_unallocated <= txn->geo.end_pgno); eASSERT(env, txn->geo.first_unallocated <= txn->geo.end_pgno);
eASSERT(env, eASSERT(env, pgno >= NUM_METAS && pgno + num <= txn->geo.first_unallocated);
pgno >= NUM_METAS && pgno + num <= txn->geo.first_unallocated);
} }
ret = page_alloc_finalize(env, txn, mc, pgno, num); ret = page_alloc_finalize(env, txn, mc, pgno, num);
if (unlikely(ret.err != MDBX_SUCCESS)) { if (unlikely(ret.err != MDBX_SUCCESS)) {
fail: fail:
eASSERT(env, ret.err != MDBX_SUCCESS); eASSERT(env, ret.err != MDBX_SUCCESS);
eASSERT(env, eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
int level; int level;
const char *what; const char *what;
if (flags & ALLOC_RESERVE) { if (flags & ALLOC_RESERVE) {
@ -1398,12 +1289,9 @@ done:
"unable alloc %zu %s, alloc-flags 0x%x, err %d, txn-flags " "unable alloc %zu %s, alloc-flags 0x%x, err %d, txn-flags "
"0x%x, re-list-len %zu, loose-count %zu, gc: height %u, " "0x%x, re-list-len %zu, loose-count %zu, gc: height %u, "
"branch %zu, leaf %zu, large %zu, entries %zu\n", "branch %zu, leaf %zu, large %zu, entries %zu\n",
num, what, flags, ret.err, txn->flags, num, what, flags, ret.err, txn->flags, MDBX_PNL_GETSIZE(txn->tw.relist), txn->tw.loose_count,
MDBX_PNL_GETSIZE(txn->tw.relist), txn->tw.loose_count, txn->dbs[FREE_DBI].height, (size_t)txn->dbs[FREE_DBI].branch_pages,
txn->dbs[FREE_DBI].height, (size_t)txn->dbs[FREE_DBI].leaf_pages, (size_t)txn->dbs[FREE_DBI].large_pages,
(size_t)txn->dbs[FREE_DBI].branch_pages,
(size_t)txn->dbs[FREE_DBI].leaf_pages,
(size_t)txn->dbs[FREE_DBI].large_pages,
(size_t)txn->dbs[FREE_DBI].items); (size_t)txn->dbs[FREE_DBI].items);
ret.page = nullptr; ret.page = nullptr;
} }
@ -1411,8 +1299,7 @@ done:
txn->tw.gc.time_acc += monotime_since_cached(monotime_begin, &now_cache); txn->tw.gc.time_acc += monotime_since_cached(monotime_begin, &now_cache);
} else { } else {
early_exit: early_exit:
DEBUG("return nullptr for %zu pages for ALLOC_%s, rc %d", num, DEBUG("return nullptr for %zu pages for ALLOC_%s, rc %d", num, num ? "RESERVE" : "SLOT", ret.err);
num ? "RESERVE" : "SLOT", ret.err);
ret.page = nullptr; ret.page = nullptr;
} }
@ -1425,8 +1312,7 @@ done:
__hot pgr_t gc_alloc_single(const MDBX_cursor *const mc) { __hot pgr_t gc_alloc_single(const MDBX_cursor *const mc) {
MDBX_txn *const txn = mc->txn; MDBX_txn *const txn = mc->txn;
tASSERT(txn, mc->txn->flags & MDBX_TXN_DIRTY); tASSERT(txn, mc->txn->flags & MDBX_TXN_DIRTY);
tASSERT(txn, tASSERT(txn, F_ISSET(*cursor_dbi_state(mc), DBI_LINDO | DBI_VALID | DBI_DIRTY));
F_ISSET(*cursor_dbi_state(mc), DBI_LINDO | DBI_VALID | DBI_DIRTY));
/* If there are any loose pages, just use them */ /* If there are any loose pages, just use them */
while (likely(txn->tw.loose_pages)) { while (likely(txn->tw.loose_pages)) {
@ -1443,8 +1329,7 @@ __hot pgr_t gc_alloc_single(const MDBX_cursor *const mc) {
VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *)); VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *));
txn->tw.loose_pages = page_next(lp); txn->tw.loose_pages = page_next(lp);
txn->tw.loose_count--; txn->tw.loose_count--;
DEBUG_EXTRA("db %d use loose page %" PRIaPGNO, cursor_dbi_dbg(mc), DEBUG_EXTRA("db %d use loose page %" PRIaPGNO, cursor_dbi_dbg(mc), lp->pgno);
lp->pgno);
tASSERT(txn, lp->pgno < txn->geo.first_unallocated); tASSERT(txn, lp->pgno < txn->geo.first_unallocated);
tASSERT(txn, lp->pgno >= NUM_METAS); tASSERT(txn, lp->pgno >= NUM_METAS);
VALGRIND_MAKE_MEM_UNDEFINED(page_data(lp), page_space(txn->env)); VALGRIND_MAKE_MEM_UNDEFINED(page_data(lp), page_space(txn->env));

View File

@ -11,9 +11,7 @@ MDBX_MAYBE_UNUSED static inline const char *dbg_prefix(const gcu_t *ctx) {
return is_lifo(ctx->cursor.txn) ? " lifo" : " fifo"; return is_lifo(ctx->cursor.txn) ? " lifo" : " fifo";
} }
static inline size_t backlog_size(MDBX_txn *txn) { static inline size_t backlog_size(MDBX_txn *txn) { return MDBX_PNL_GETSIZE(txn->tw.relist) + txn->tw.loose_count; }
return MDBX_PNL_GETSIZE(txn->tw.relist) + txn->tw.loose_count;
}
static int clean_stored_retired(MDBX_txn *txn, gcu_t *ctx) { static int clean_stored_retired(MDBX_txn *txn, gcu_t *ctx) {
int err = MDBX_SUCCESS; int err = MDBX_SUCCESS;
@ -53,8 +51,7 @@ static int clean_stored_retired(MDBX_txn *txn, gcu_t *ctx) {
} }
static int touch_gc(gcu_t *ctx) { static int touch_gc(gcu_t *ctx) {
tASSERT(ctx->cursor.txn, is_pointed(&ctx->cursor) || tASSERT(ctx->cursor.txn, is_pointed(&ctx->cursor) || ctx->cursor.txn->dbs[FREE_DBI].leaf_pages == 0);
ctx->cursor.txn->dbs[FREE_DBI].leaf_pages == 0);
MDBX_val key, val; MDBX_val key, val;
key.iov_base = val.iov_base = nullptr; key.iov_base = val.iov_base = nullptr;
key.iov_len = sizeof(txnid_t); key.iov_len = sizeof(txnid_t);
@ -70,24 +67,19 @@ static int touch_gc(gcu_t *ctx) {
* during a deleting, when GC tree is unbalanced. */ * during a deleting, when GC tree is unbalanced. */
static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) { static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) {
const size_t for_cow = txn->dbs[FREE_DBI].height; const size_t for_cow = txn->dbs[FREE_DBI].height;
const size_t for_rebalance = const size_t for_rebalance = for_cow + 1 + (txn->dbs[FREE_DBI].height + 1ul >= txn->dbs[FREE_DBI].branch_pages);
for_cow + 1 +
(txn->dbs[FREE_DBI].height + 1ul >= txn->dbs[FREE_DBI].branch_pages);
size_t for_split = ctx->retired_stored == 0; size_t for_split = ctx->retired_stored == 0;
tASSERT(txn, is_pointed(&ctx->cursor) || txn->dbs[FREE_DBI].leaf_pages == 0); tASSERT(txn, is_pointed(&ctx->cursor) || txn->dbs[FREE_DBI].leaf_pages == 0);
const intptr_t retired_left = const intptr_t retired_left = MDBX_PNL_SIZEOF(txn->tw.retired_pages) - ctx->retired_stored;
MDBX_PNL_SIZEOF(txn->tw.retired_pages) - ctx->retired_stored;
size_t for_relist = 0; size_t for_relist = 0;
if (MDBX_ENABLE_BIGFOOT && retired_left > 0) { if (MDBX_ENABLE_BIGFOOT && retired_left > 0) {
for_relist = (retired_left + txn->env->maxgc_large1page - 1) / for_relist = (retired_left + txn->env->maxgc_large1page - 1) / txn->env->maxgc_large1page;
txn->env->maxgc_large1page;
const size_t per_branch_page = txn->env->maxgc_per_branch; const size_t per_branch_page = txn->env->maxgc_per_branch;
for (size_t entries = for_relist; entries > 1; for_split += entries) for (size_t entries = for_relist; entries > 1; for_split += entries)
entries = (entries + per_branch_page - 1) / per_branch_page; entries = (entries + per_branch_page - 1) / per_branch_page;
} else if (!MDBX_ENABLE_BIGFOOT && retired_left != 0) { } else if (!MDBX_ENABLE_BIGFOOT && retired_left != 0) {
for_relist = for_relist = largechunk_npages(txn->env, MDBX_PNL_SIZEOF(txn->tw.retired_pages));
largechunk_npages(txn->env, MDBX_PNL_SIZEOF(txn->tw.retired_pages));
} }
const size_t for_tree_before_touch = for_cow + for_rebalance + for_split; const size_t for_tree_before_touch = for_cow + for_rebalance + for_split;
@ -96,23 +88,20 @@ static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) {
const size_t for_all_after_touch = for_relist + for_tree_after_touch; const size_t for_all_after_touch = for_relist + for_tree_after_touch;
if (likely(for_relist < 2 && backlog_size(txn) > for_all_before_touch) && if (likely(for_relist < 2 && backlog_size(txn) > for_all_before_touch) &&
(ctx->cursor.top < 0 || (ctx->cursor.top < 0 || is_modifable(txn, ctx->cursor.pg[ctx->cursor.top])))
is_modifable(txn, ctx->cursor.pg[ctx->cursor.top])))
return MDBX_SUCCESS; return MDBX_SUCCESS;
TRACE(">> retired-stored %zu, left %zi, backlog %zu, need %zu (4list %zu, " TRACE(">> retired-stored %zu, left %zi, backlog %zu, need %zu (4list %zu, "
"4split %zu, " "4split %zu, "
"4cow %zu, 4tree %zu)", "4cow %zu, 4tree %zu)",
ctx->retired_stored, retired_left, backlog_size(txn), ctx->retired_stored, retired_left, backlog_size(txn), for_all_before_touch, for_relist, for_split, for_cow,
for_all_before_touch, for_relist, for_split, for_cow,
for_tree_before_touch); for_tree_before_touch);
int err = touch_gc(ctx); int err = touch_gc(ctx);
TRACE("== after-touch, backlog %zu, err %d", backlog_size(txn), err); TRACE("== after-touch, backlog %zu, err %d", backlog_size(txn), err);
if (!MDBX_ENABLE_BIGFOOT && unlikely(for_relist > 1) && if (!MDBX_ENABLE_BIGFOOT && unlikely(for_relist > 1) &&
MDBX_PNL_GETSIZE(txn->tw.retired_pages) != ctx->retired_stored && MDBX_PNL_GETSIZE(txn->tw.retired_pages) != ctx->retired_stored && err == MDBX_SUCCESS) {
err == MDBX_SUCCESS) {
if (unlikely(ctx->retired_stored)) { if (unlikely(ctx->retired_stored)) {
err = clean_stored_retired(txn, ctx); err = clean_stored_retired(txn, ctx);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
@ -122,8 +111,7 @@ static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) {
} }
err = gc_alloc_ex(&ctx->cursor, for_relist, ALLOC_RESERVE).err; err = gc_alloc_ex(&ctx->cursor, for_relist, ALLOC_RESERVE).err;
TRACE("== after-4linear, backlog %zu, err %d", backlog_size(txn), err); TRACE("== after-4linear, backlog %zu, err %d", backlog_size(txn), err);
cASSERT(&ctx->cursor, cASSERT(&ctx->cursor, backlog_size(txn) >= for_relist || err != MDBX_SUCCESS);
backlog_size(txn) >= for_relist || err != MDBX_SUCCESS);
} }
while (backlog_size(txn) < for_all_after_touch && err == MDBX_SUCCESS) while (backlog_size(txn) < for_all_after_touch && err == MDBX_SUCCESS)
@ -131,10 +119,8 @@ static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) {
TRACE("<< backlog %zu, err %d, gc: height %u, branch %zu, leaf %zu, large " TRACE("<< backlog %zu, err %d, gc: height %u, branch %zu, leaf %zu, large "
"%zu, entries %zu", "%zu, entries %zu",
backlog_size(txn), err, txn->dbs[FREE_DBI].height, backlog_size(txn), err, txn->dbs[FREE_DBI].height, (size_t)txn->dbs[FREE_DBI].branch_pages,
(size_t)txn->dbs[FREE_DBI].branch_pages, (size_t)txn->dbs[FREE_DBI].leaf_pages, (size_t)txn->dbs[FREE_DBI].large_pages,
(size_t)txn->dbs[FREE_DBI].leaf_pages,
(size_t)txn->dbs[FREE_DBI].large_pages,
(size_t)txn->dbs[FREE_DBI].items); (size_t)txn->dbs[FREE_DBI].items);
tASSERT(txn, err != MDBX_NOTFOUND || (txn->flags & txn_gc_drained) != 0); tASSERT(txn, err != MDBX_NOTFOUND || (txn->flags & txn_gc_drained) != 0);
return (err != MDBX_NOTFOUND) ? err : MDBX_SUCCESS; return (err != MDBX_NOTFOUND) ? err : MDBX_SUCCESS;
@ -164,12 +150,10 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
* though usually none are left at this point. * though usually none are left at this point.
* The pages themselves remain in dirtylist. */ * The pages themselves remain in dirtylist. */
if (unlikely(!txn->tw.gc.reclaimed && txn->tw.gc.last_reclaimed < 1)) { if (unlikely(!txn->tw.gc.reclaimed && txn->tw.gc.last_reclaimed < 1)) {
TRACE("%s: try allocate gc-slot for %zu loose-pages", dbg_prefix(ctx), TRACE("%s: try allocate gc-slot for %zu loose-pages", dbg_prefix(ctx), txn->tw.loose_count);
txn->tw.loose_count);
int err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err; int err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err;
if (err == MDBX_SUCCESS) { if (err == MDBX_SUCCESS) {
TRACE("%s: retry since gc-slot for %zu loose-pages available", TRACE("%s: retry since gc-slot for %zu loose-pages available", dbg_prefix(ctx), txn->tw.loose_count);
dbg_prefix(ctx), txn->tw.loose_count);
return MDBX_RESULT_TRUE; return MDBX_RESULT_TRUE;
} }
@ -183,15 +167,13 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(lp), sizeof(page_t *)); MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(lp), sizeof(page_t *));
VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *)); VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *));
} }
TRACE("%s: append %zu loose-pages to retired-pages", dbg_prefix(ctx), TRACE("%s: append %zu loose-pages to retired-pages", dbg_prefix(ctx), txn->tw.loose_count);
txn->tw.loose_count);
} else { } else {
/* Room for loose pages + temp PNL with same */ /* Room for loose pages + temp PNL with same */
int err = pnl_need(&txn->tw.relist, 2 * txn->tw.loose_count + 2); int err = pnl_need(&txn->tw.relist, 2 * txn->tw.loose_count + 2);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
pnl_t loose = txn->tw.relist + MDBX_PNL_ALLOCLEN(txn->tw.relist) - pnl_t loose = txn->tw.relist + MDBX_PNL_ALLOCLEN(txn->tw.relist) - txn->tw.loose_count - 1;
txn->tw.loose_count - 1;
size_t count = 0; size_t count = 0;
for (page_t *lp = txn->tw.loose_pages; lp; lp = page_next(lp)) { for (page_t *lp = txn->tw.loose_pages; lp; lp = page_next(lp)) {
tASSERT(txn, lp->flags == P_LOOSE); tASSERT(txn, lp->flags == P_LOOSE);
@ -203,8 +185,7 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
MDBX_PNL_SETSIZE(loose, count); MDBX_PNL_SETSIZE(loose, count);
pnl_sort(loose, txn->geo.first_unallocated); pnl_sort(loose, txn->geo.first_unallocated);
pnl_merge(txn->tw.relist, loose); pnl_merge(txn->tw.relist, loose);
TRACE("%s: append %zu loose-pages to reclaimed-pages", dbg_prefix(ctx), TRACE("%s: append %zu loose-pages to reclaimed-pages", dbg_prefix(ctx), txn->tw.loose_count);
txn->tw.loose_count);
} }
/* filter-out list of dirty-pages from loose-pages */ /* filter-out list of dirty-pages from loose-pages */
@ -227,8 +208,7 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
page_shadow_release(txn->env, dp, 1); page_shadow_release(txn->env, dp, 1);
} }
} }
TRACE("%s: filtered-out loose-pages from %zu -> %zu dirty-pages", TRACE("%s: filtered-out loose-pages from %zu -> %zu dirty-pages", dbg_prefix(ctx), dl->length, w);
dbg_prefix(ctx), dl->length, w);
tASSERT(txn, txn->tw.loose_count == dl->length - w); tASSERT(txn, txn->tw.loose_count == dl->length - w);
dl->sorted -= sorted_out; dl->sorted -= sorted_out;
tASSERT(txn, dl->sorted <= w); tASSERT(txn, dl->sorted <= w);
@ -236,8 +216,7 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
dl->pages_including_loose -= txn->tw.loose_count; dl->pages_including_loose -= txn->tw.loose_count;
txn->tw.dirtyroom += txn->tw.loose_count; txn->tw.dirtyroom += txn->tw.loose_count;
tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length == tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length ==
(txn->parent ? txn->parent->tw.dirtyroom (txn->parent ? txn->parent->tw.dirtyroom : txn->env->options.dp_limit));
: txn->env->options.dp_limit));
} else { } else {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) != 0 && !MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) != 0 && !MDBX_AVOID_MSYNC);
} }
@ -276,8 +255,8 @@ static int gcu_retired(MDBX_txn *txn, gcu_t *ctx) {
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
if (retired_pages_before != MDBX_PNL_GETSIZE(txn->tw.retired_pages)) { if (retired_pages_before != MDBX_PNL_GETSIZE(txn->tw.retired_pages)) {
TRACE("%s: retired-list changed (%zu -> %zu), retry", dbg_prefix(ctx), TRACE("%s: retired-list changed (%zu -> %zu), retry", dbg_prefix(ctx), retired_pages_before,
retired_pages_before, MDBX_PNL_GETSIZE(txn->tw.retired_pages)); MDBX_PNL_GETSIZE(txn->tw.retired_pages));
break; break;
} }
@ -290,19 +269,16 @@ static int gcu_retired(MDBX_txn *txn, gcu_t *ctx) {
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
if (ctx->retired_stored >= MDBX_PNL_GETSIZE(txn->tw.retired_pages)) { if (ctx->retired_stored >= MDBX_PNL_GETSIZE(txn->tw.retired_pages)) {
TRACE("%s: retired-list changed (%zu -> %zu), retry", dbg_prefix(ctx), TRACE("%s: retired-list changed (%zu -> %zu), retry", dbg_prefix(ctx), retired_pages_before,
retired_pages_before, MDBX_PNL_GETSIZE(txn->tw.retired_pages)); MDBX_PNL_GETSIZE(txn->tw.retired_pages));
break; break;
} }
} }
key.iov_len = sizeof(txnid_t); key.iov_len = sizeof(txnid_t);
key.iov_base = &ctx->bigfoot; key.iov_base = &ctx->bigfoot;
const size_t left = const size_t left = MDBX_PNL_GETSIZE(txn->tw.retired_pages) - ctx->retired_stored;
MDBX_PNL_GETSIZE(txn->tw.retired_pages) - ctx->retired_stored;
const size_t chunk = const size_t chunk =
(left > txn->env->maxgc_large1page && ctx->bigfoot < MAX_TXNID) (left > txn->env->maxgc_large1page && ctx->bigfoot < MAX_TXNID) ? txn->env->maxgc_large1page : left;
? txn->env->maxgc_large1page
: left;
data.iov_len = (chunk + 1) * sizeof(pgno_t); data.iov_len = (chunk + 1) * sizeof(pgno_t);
err = cursor_put(&ctx->cursor, &key, &data, MDBX_RESERVE); err = cursor_put(&ctx->cursor, &key, &data, MDBX_RESERVE);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
@ -318,9 +294,7 @@ static int gcu_retired(MDBX_txn *txn, gcu_t *ctx) {
#endif /* MDBX_DEBUG && (ENABLE_MEMCHECK || __SANITIZE_ADDRESS__) */ #endif /* MDBX_DEBUG && (ENABLE_MEMCHECK || __SANITIZE_ADDRESS__) */
if (retired_pages_before == MDBX_PNL_GETSIZE(txn->tw.retired_pages)) { if (retired_pages_before == MDBX_PNL_GETSIZE(txn->tw.retired_pages)) {
const size_t at = (is_lifo(txn) == MDBX_PNL_ASCENDING) const size_t at = (is_lifo(txn) == MDBX_PNL_ASCENDING) ? left - chunk : ctx->retired_stored;
? left - chunk
: ctx->retired_stored;
pgno_t *const begin = txn->tw.retired_pages + at; pgno_t *const begin = txn->tw.retired_pages + at;
/* MDBX_PNL_ASCENDING == false && LIFO == false: /* MDBX_PNL_ASCENDING == false && LIFO == false:
* - the larger pgno is at the beginning of retired list * - the larger pgno is at the beginning of retired list
@ -332,15 +306,11 @@ static int gcu_retired(MDBX_txn *txn, gcu_t *ctx) {
*begin = (pgno_t)chunk; *begin = (pgno_t)chunk;
memcpy(data.iov_base, begin, data.iov_len); memcpy(data.iov_base, begin, data.iov_len);
*begin = save; *begin = save;
TRACE("%s: put-retired/bigfoot @ %" PRIaTXN TRACE("%s: put-retired/bigfoot @ %" PRIaTXN " (slice #%u) #%zu [%zu..%zu] of %zu", dbg_prefix(ctx),
" (slice #%u) #%zu [%zu..%zu] of %zu", ctx->bigfoot, (unsigned)(ctx->bigfoot - txn->txnid), chunk, at, at + chunk, retired_pages_before);
dbg_prefix(ctx), ctx->bigfoot,
(unsigned)(ctx->bigfoot - txn->txnid), chunk, at, at + chunk,
retired_pages_before);
} }
ctx->retired_stored += chunk; ctx->retired_stored += chunk;
} while (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages) && } while (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages) && (++ctx->bigfoot, true));
(++ctx->bigfoot, true));
} while (retired_pages_before != MDBX_PNL_GETSIZE(txn->tw.retired_pages)); } while (retired_pages_before != MDBX_PNL_GETSIZE(txn->tw.retired_pages));
#else #else
/* Write to last page of GC */ /* Write to last page of GC */
@ -369,13 +339,11 @@ static int gcu_retired(MDBX_txn *txn, gcu_t *ctx) {
tASSERT(txn, data.iov_len == MDBX_PNL_SIZEOF(txn->tw.retired_pages)); tASSERT(txn, data.iov_len == MDBX_PNL_SIZEOF(txn->tw.retired_pages));
memcpy(data.iov_base, txn->tw.retired_pages, data.iov_len); memcpy(data.iov_base, txn->tw.retired_pages, data.iov_len);
TRACE("%s: put-retired #%zu @ %" PRIaTXN, dbg_prefix(ctx), TRACE("%s: put-retired #%zu @ %" PRIaTXN, dbg_prefix(ctx), ctx->retired_stored, txn->txnid);
ctx->retired_stored, txn->txnid);
#endif /* MDBX_ENABLE_BIGFOOT */ #endif /* MDBX_ENABLE_BIGFOOT */
if (LOG_ENABLED(MDBX_LOG_EXTRA)) { if (LOG_ENABLED(MDBX_LOG_EXTRA)) {
size_t i = ctx->retired_stored; size_t i = ctx->retired_stored;
DEBUG_EXTRA("txn %" PRIaTXN " root %" PRIaPGNO " num %zu, retired-PNL", DEBUG_EXTRA("txn %" PRIaTXN " root %" PRIaPGNO " num %zu, retired-PNL", txn->txnid, txn->dbs[FREE_DBI].root, i);
txn->txnid, txn->dbs[FREE_DBI].root, i);
for (; i; i--) for (; i; i--)
DEBUG_EXTRA_PRINT(" %" PRIaPGNO, txn->tw.retired_pages[i]); DEBUG_EXTRA_PRINT(" %" PRIaPGNO, txn->tw.retired_pages[i]);
DEBUG_EXTRA_PRINT("%s\n", "."); DEBUG_EXTRA_PRINT("%s\n", ".");
@ -388,8 +356,7 @@ typedef struct gcu_rid_result {
txnid_t rid; txnid_t rid;
} rid_t; } rid_t;
static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left) {
const size_t left) {
rid_t r; rid_t r;
if (is_lifo(txn)) { if (is_lifo(txn)) {
if (txn->tw.gc.reclaimed == nullptr) { if (txn->tw.gc.reclaimed == nullptr) {
@ -400,8 +367,7 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
} }
} }
if (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max && if (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max &&
left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * txn->env->maxgc_large1page &&
txn->env->maxgc_large1page &&
!ctx->dense) { !ctx->dense) {
/* Hужен свободный для для сохранения списка страниц. */ /* Hужен свободный для для сохранения списка страниц. */
bool need_cleanup = false; bool need_cleanup = false;
@ -411,15 +377,11 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
r.err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err; r.err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err;
snap_oldest = txn->env->lck->cached_oldest.weak; snap_oldest = txn->env->lck->cached_oldest.weak;
if (likely(r.err == MDBX_SUCCESS)) { if (likely(r.err == MDBX_SUCCESS)) {
TRACE("%s: took @%" PRIaTXN " from GC", dbg_prefix(ctx), TRACE("%s: took @%" PRIaTXN " from GC", dbg_prefix(ctx), MDBX_PNL_LAST(txn->tw.gc.reclaimed));
MDBX_PNL_LAST(txn->tw.gc.reclaimed));
need_cleanup = true; need_cleanup = true;
} }
} while (r.err == MDBX_SUCCESS && } while (r.err == MDBX_SUCCESS && MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max &&
MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max && left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * txn->env->maxgc_large1page);
left >
(MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) *
txn->env->maxgc_large1page);
if (likely(r.err == MDBX_SUCCESS)) { if (likely(r.err == MDBX_SUCCESS)) {
TRACE("%s: got enough from GC.", dbg_prefix(ctx)); TRACE("%s: got enough from GC.", dbg_prefix(ctx));
@ -443,20 +405,16 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
/* no reclaimable GC entries, /* no reclaimable GC entries,
* therefore no entries with ID < mdbx_find_oldest(txn) */ * therefore no entries with ID < mdbx_find_oldest(txn) */
txn->tw.gc.last_reclaimed = ctx->rid = snap_oldest; txn->tw.gc.last_reclaimed = ctx->rid = snap_oldest;
TRACE("%s: none recycled yet, set rid to @%" PRIaTXN, dbg_prefix(ctx), TRACE("%s: none recycled yet, set rid to @%" PRIaTXN, dbg_prefix(ctx), ctx->rid);
ctx->rid);
} }
/* В GC нет годных к переработке записей, /* В GC нет годных к переработке записей,
* будем использовать свободные id в обратном порядке. */ * будем использовать свободные id в обратном порядке. */
while (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max && while (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max &&
left > left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * txn->env->maxgc_large1page) {
(MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) *
txn->env->maxgc_large1page) {
if (unlikely(ctx->rid <= MIN_TXNID)) { if (unlikely(ctx->rid <= MIN_TXNID)) {
ctx->dense = true; ctx->dense = true;
if (unlikely(MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) <= if (unlikely(MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) <= ctx->reused_slot)) {
ctx->reused_slot)) {
NOTICE("** restart: reserve depleted (reused_gc_slot %zu >= " NOTICE("** restart: reserve depleted (reused_gc_slot %zu >= "
"gc.reclaimed %zu)", "gc.reclaimed %zu)",
ctx->reused_slot, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)); ctx->reused_slot, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed));
@ -470,21 +428,16 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
MDBX_val key = {&ctx->rid, sizeof(ctx->rid)}, data; MDBX_val key = {&ctx->rid, sizeof(ctx->rid)}, data;
r.err = cursor_seek(&ctx->cursor, &key, &data, MDBX_SET_KEY).err; r.err = cursor_seek(&ctx->cursor, &key, &data, MDBX_SET_KEY).err;
if (unlikely(r.err == MDBX_SUCCESS)) { if (unlikely(r.err == MDBX_SUCCESS)) {
DEBUG("%s: GC's id %" PRIaTXN " is present, going to first", DEBUG("%s: GC's id %" PRIaTXN " is present, going to first", dbg_prefix(ctx), ctx->rid);
dbg_prefix(ctx), ctx->rid);
r.err = outer_first(&ctx->cursor, &key, nullptr); r.err = outer_first(&ctx->cursor, &key, nullptr);
if (unlikely(r.err != MDBX_SUCCESS || if (unlikely(r.err != MDBX_SUCCESS || key.iov_len != sizeof(txnid_t))) {
key.iov_len != sizeof(txnid_t))) { ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-key size", (unsigned)key.iov_len);
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED,
"invalid GC-key size", (unsigned)key.iov_len);
r.err = MDBX_CORRUPTED; r.err = MDBX_CORRUPTED;
goto return_error; goto return_error;
} }
const txnid_t gc_first = unaligned_peek_u64(4, key.iov_base); const txnid_t gc_first = unaligned_peek_u64(4, key.iov_base);
if (unlikely(gc_first <= INITIAL_TXNID)) { if (unlikely(gc_first <= INITIAL_TXNID)) {
NOTICE("%s: no free GC's id(s) less than %" PRIaTXN NOTICE("%s: no free GC's id(s) less than %" PRIaTXN " (going dense-mode)", dbg_prefix(ctx), ctx->rid);
" (going dense-mode)",
dbg_prefix(ctx), ctx->rid);
ctx->dense = true; ctx->dense = true;
goto return_restart; goto return_restart;
} }
@ -501,18 +454,15 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
* with less fragmentation. */ * with less fragmentation. */
need_cleanup = true; need_cleanup = true;
else else
ctx->cleaned_slot += ctx->cleaned_slot += 1 /* mark cleanup is not needed for added slot. */;
1 /* mark cleanup is not needed for added slot. */;
TRACE("%s: append @%" PRIaTXN TRACE("%s: append @%" PRIaTXN " to lifo-reclaimed, cleaned-gc-slot = %zu", dbg_prefix(ctx), ctx->rid,
" to lifo-reclaimed, cleaned-gc-slot = %zu", ctx->cleaned_slot);
dbg_prefix(ctx), ctx->rid, ctx->cleaned_slot);
} }
if (need_cleanup) { if (need_cleanup) {
if (ctx->cleaned_slot) { if (ctx->cleaned_slot) {
TRACE("%s: restart to clear and re-create GC entries", TRACE("%s: restart to clear and re-create GC entries", dbg_prefix(ctx));
dbg_prefix(ctx));
goto return_restart; goto return_restart;
} }
goto return_continue; goto return_continue;
@ -522,8 +472,7 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
const size_t i = MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot; const size_t i = MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot;
tASSERT(txn, i > 0 && i <= MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)); tASSERT(txn, i > 0 && i <= MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed));
r.rid = txn->tw.gc.reclaimed[i]; r.rid = txn->tw.gc.reclaimed[i];
TRACE("%s: take @%" PRIaTXN " from lifo-reclaimed[%zu]", dbg_prefix(ctx), TRACE("%s: take @%" PRIaTXN " from lifo-reclaimed[%zu]", dbg_prefix(ctx), r.rid, i);
r.rid, i);
} else { } else {
tASSERT(txn, txn->tw.gc.reclaimed == nullptr); tASSERT(txn, txn->tw.gc.reclaimed == nullptr);
if (unlikely(ctx->rid == 0)) { if (unlikely(ctx->rid == 0)) {
@ -532,8 +481,7 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx,
r.err = outer_first(&ctx->cursor, &key, nullptr); r.err = outer_first(&ctx->cursor, &key, nullptr);
if (likely(r.err == MDBX_SUCCESS)) { if (likely(r.err == MDBX_SUCCESS)) {
if (unlikely(key.iov_len != sizeof(txnid_t))) { if (unlikely(key.iov_len != sizeof(txnid_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-key size", (unsigned)key.iov_len);
"invalid GC-key size", (unsigned)key.iov_len);
r.err = MDBX_CORRUPTED; r.err = MDBX_CORRUPTED;
goto return_error; goto return_error;
} }
@ -600,18 +548,15 @@ retry:
ctx->loop += !(ctx->prev_first_unallocated > txn->geo.first_unallocated); ctx->loop += !(ctx->prev_first_unallocated > txn->geo.first_unallocated);
TRACE(">> restart, loop %u", ctx->loop); TRACE(">> restart, loop %u", ctx->loop);
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
if (unlikely(/* paranoia */ ctx->loop > ((MDBX_DEBUG > 0) ? 12 : 42))) { if (unlikely(/* paranoia */ ctx->loop > ((MDBX_DEBUG > 0) ? 12 : 42))) {
ERROR("txn #%" PRIaTXN " too more loops %u, bailout", txn->txnid, ERROR("txn #%" PRIaTXN " too more loops %u, bailout", txn->txnid, ctx->loop);
ctx->loop);
rc = MDBX_PROBLEM; rc = MDBX_PROBLEM;
goto bailout; goto bailout;
} }
if (unlikely(ctx->dense || if (unlikely(ctx->dense || ctx->prev_first_unallocated > txn->geo.first_unallocated)) {
ctx->prev_first_unallocated > txn->geo.first_unallocated)) {
rc = clean_stored_retired(txn, ctx); rc = clean_stored_retired(txn, ctx);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -630,13 +575,10 @@ retry:
/* Come back here after each Put() in case retired-list changed */ /* Come back here after each Put() in case retired-list changed */
TRACE("%s", " >> continue"); TRACE("%s", " >> continue");
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
MDBX_val key, data; MDBX_val key, data;
if (is_lifo(txn)) { if (is_lifo(txn)) {
if (ctx->cleaned_slot < if (ctx->cleaned_slot < (txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0)) {
(txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0)) {
ctx->reserved = 0; ctx->reserved = 0;
ctx->cleaned_slot = 0; ctx->cleaned_slot = 0;
ctx->reused_slot = 0; ctx->reused_slot = 0;
@ -644,8 +586,7 @@ retry:
/* LY: cleanup reclaimed records. */ /* LY: cleanup reclaimed records. */
do { do {
ctx->cleaned_id = txn->tw.gc.reclaimed[++ctx->cleaned_slot]; ctx->cleaned_id = txn->tw.gc.reclaimed[++ctx->cleaned_slot];
tASSERT(txn, ctx->cleaned_slot > 0 && tASSERT(txn, ctx->cleaned_slot > 0 && ctx->cleaned_id <= env->lck->cached_oldest.weak);
ctx->cleaned_id <= env->lck->cached_oldest.weak);
key.iov_base = &ctx->cleaned_id; key.iov_base = &ctx->cleaned_id;
key.iov_len = sizeof(ctx->cleaned_id); key.iov_len = sizeof(ctx->cleaned_id);
rc = cursor_seek(&ctx->cursor, &key, nullptr, MDBX_SET).err; rc = cursor_seek(&ctx->cursor, &key, nullptr, MDBX_SET).err;
@ -657,8 +598,7 @@ retry:
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
tASSERT(txn, ctx->cleaned_id <= env->lck->cached_oldest.weak); tASSERT(txn, ctx->cleaned_id <= env->lck->cached_oldest.weak);
TRACE("%s: cleanup-reclaimed-id [%zu]%" PRIaTXN, dbg_prefix(ctx), TRACE("%s: cleanup-reclaimed-id [%zu]%" PRIaTXN, dbg_prefix(ctx), ctx->cleaned_slot, ctx->cleaned_id);
ctx->cleaned_slot, ctx->cleaned_id);
tASSERT(txn, *txn->cursors == &ctx->cursor); tASSERT(txn, *txn->cursors == &ctx->cursor);
rc = cursor_del(&ctx->cursor, 0); rc = cursor_del(&ctx->cursor, 0);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -668,8 +608,7 @@ retry:
} }
} else { } else {
/* Удаляем оставшиеся вынутые из GC записи. */ /* Удаляем оставшиеся вынутые из GC записи. */
while (txn->tw.gc.last_reclaimed && while (txn->tw.gc.last_reclaimed && ctx->cleaned_id <= txn->tw.gc.last_reclaimed) {
ctx->cleaned_id <= txn->tw.gc.last_reclaimed) {
rc = outer_first(&ctx->cursor, &key, nullptr); rc = outer_first(&ctx->cursor, &key, nullptr);
if (rc == MDBX_NOTFOUND) { if (rc == MDBX_NOTFOUND) {
ctx->cleaned_id = txn->tw.gc.last_reclaimed + 1; ctx->cleaned_id = txn->tw.gc.last_reclaimed + 1;
@ -680,10 +619,8 @@ retry:
} }
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
if (!MDBX_DISABLE_VALIDATION && if (!MDBX_DISABLE_VALIDATION && unlikely(key.iov_len != sizeof(txnid_t))) {
unlikely(key.iov_len != sizeof(txnid_t))) { ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-key size", (unsigned)key.iov_len);
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED,
"invalid GC-key size", (unsigned)key.iov_len);
rc = MDBX_CORRUPTED; rc = MDBX_CORRUPTED;
goto bailout; goto bailout;
} }
@ -700,8 +637,7 @@ retry:
goto bailout; goto bailout;
tASSERT(txn, ctx->cleaned_id <= txn->tw.gc.last_reclaimed); tASSERT(txn, ctx->cleaned_id <= txn->tw.gc.last_reclaimed);
tASSERT(txn, ctx->cleaned_id <= env->lck->cached_oldest.weak); tASSERT(txn, ctx->cleaned_id <= env->lck->cached_oldest.weak);
TRACE("%s: cleanup-reclaimed-id %" PRIaTXN, dbg_prefix(ctx), TRACE("%s: cleanup-reclaimed-id %" PRIaTXN, dbg_prefix(ctx), ctx->cleaned_id);
ctx->cleaned_id);
tASSERT(txn, *txn->cursors == &ctx->cursor); tASSERT(txn, *txn->cursors == &ctx->cursor);
rc = cursor_del(&ctx->cursor, 0); rc = cursor_del(&ctx->cursor, 0);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -709,9 +645,7 @@ retry:
} }
} }
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
rc = audit_ex(txn, ctx->retired_stored, false); rc = audit_ex(txn, ctx->retired_stored, false);
@ -721,9 +655,7 @@ retry:
/* return suitable into unallocated space */ /* return suitable into unallocated space */
if (txn_refund(txn)) { if (txn_refund(txn)) {
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
rc = audit_ex(txn, ctx->retired_stored, false); rc = audit_ex(txn, ctx->retired_stored, false);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -743,10 +675,9 @@ retry:
} }
if (unlikely(ctx->reserved > MDBX_PNL_GETSIZE(txn->tw.relist)) && if (unlikely(ctx->reserved > MDBX_PNL_GETSIZE(txn->tw.relist)) &&
(ctx->loop < 5 || ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.relist) > (ctx->loop < 5 || ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.relist) > env->maxgc_large1page / 2)) {
env->maxgc_large1page / 2)) { TRACE("%s: reclaimed-list changed %zu -> %zu, retry", dbg_prefix(ctx), ctx->amount,
TRACE("%s: reclaimed-list changed %zu -> %zu, retry", dbg_prefix(ctx), MDBX_PNL_GETSIZE(txn->tw.relist));
ctx->amount, MDBX_PNL_GETSIZE(txn->tw.relist));
ctx->reserve_adj += ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.relist); ctx->reserve_adj += ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.relist);
goto retry; goto retry;
} }
@ -760,9 +691,7 @@ retry:
continue; continue;
} }
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
tASSERT(txn, txn->tw.loose_count == 0); tASSERT(txn, txn->tw.loose_count == 0);
TRACE("%s", " >> reserving"); TRACE("%s", " >> reserving");
@ -776,8 +705,7 @@ retry:
"lifo-reclaimed-slots %zu, " "lifo-reclaimed-slots %zu, "
"reused-gc-slots %zu", "reused-gc-slots %zu",
dbg_prefix(ctx), ctx->amount, ctx->reserved, ctx->reserve_adj, left, dbg_prefix(ctx), ctx->amount, ctx->reserved, ctx->reserve_adj, left,
txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0, txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0, ctx->reused_slot);
ctx->reused_slot);
if (0 >= (intptr_t)left) if (0 >= (intptr_t)left)
break; break;
@ -795,59 +723,46 @@ retry:
size_t chunk = left; size_t chunk = left;
if (unlikely(left > env->maxgc_large1page)) { if (unlikely(left > env->maxgc_large1page)) {
const size_t avail_gc_slots = const size_t avail_gc_slots = txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot + 1
txn->tw.gc.reclaimed : (ctx->rid < INT16_MAX) ? (size_t)ctx->rid
? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot + 1 : INT16_MAX;
: (ctx->rid < INT16_MAX) ? (size_t)ctx->rid
: INT16_MAX;
if (likely(avail_gc_slots > 1)) { if (likely(avail_gc_slots > 1)) {
#if MDBX_ENABLE_BIGFOOT #if MDBX_ENABLE_BIGFOOT
chunk = env->maxgc_large1page; chunk = env->maxgc_large1page;
if (avail_gc_slots < INT16_MAX && if (avail_gc_slots < INT16_MAX && unlikely(left > env->maxgc_large1page * avail_gc_slots))
unlikely(left > env->maxgc_large1page * avail_gc_slots))
/* TODO: Можно смотреть последовательности какой длины есть в relist /* TODO: Можно смотреть последовательности какой длины есть в relist
* и пробовать нарезать куски соответствующего размера. * и пробовать нарезать куски соответствующего размера.
* Смысл в том, чтобы не дробить последовательности страниц, * Смысл в том, чтобы не дробить последовательности страниц,
* а использовать целиком. */ * а использовать целиком. */
chunk = env->maxgc_large1page + chunk = env->maxgc_large1page + left / (env->maxgc_large1page * avail_gc_slots) * env->maxgc_large1page;
left / (env->maxgc_large1page * avail_gc_slots) *
env->maxgc_large1page;
#else #else
if (chunk < env->maxgc_large1page * 2) if (chunk < env->maxgc_large1page * 2)
chunk /= 2; chunk /= 2;
else { else {
const size_t prefer_max_scatter = 257; const size_t prefer_max_scatter = 257;
const size_t threshold = const size_t threshold =
env->maxgc_large1page * ((avail_gc_slots < prefer_max_scatter) env->maxgc_large1page * ((avail_gc_slots < prefer_max_scatter) ? avail_gc_slots : prefer_max_scatter);
? avail_gc_slots
: prefer_max_scatter);
if (left < threshold) if (left < threshold)
chunk = env->maxgc_large1page; chunk = env->maxgc_large1page;
else { else {
const size_t tail = left - threshold + env->maxgc_large1page + 1; const size_t tail = left - threshold + env->maxgc_large1page + 1;
size_t span = 1; size_t span = 1;
size_t avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / size_t avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) /* - 1 + span */;
sizeof(pgno_t)) /* - 1 + span */;
if (tail > avail) { if (tail > avail) {
for (size_t i = ctx->amount - span; i > 0; --i) { for (size_t i = ctx->amount - span; i > 0; --i) {
if (MDBX_PNL_ASCENDING ? (txn->tw.relist[i] + span) if (MDBX_PNL_ASCENDING ? (txn->tw.relist[i] + span)
: (txn->tw.relist[i] - span) == : (txn->tw.relist[i] - span) == txn->tw.relist[i + span]) {
txn->tw.relist[i + span]) {
span += 1; span += 1;
avail = avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) - 1 + span;
((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) -
1 + span;
if (avail >= tail) if (avail >= tail)
break; break;
} }
} }
} }
chunk = (avail >= tail) ? tail - span chunk = (avail >= tail) ? tail - span
: (avail_gc_slots > 3 && : (avail_gc_slots > 3 && ctx->reused_slot < prefer_max_scatter - 3) ? avail - span
ctx->reused_slot < prefer_max_scatter - 3) : tail;
? avail - span
: tail;
} }
} }
#endif /* MDBX_ENABLE_BIGFOOT */ #endif /* MDBX_ENABLE_BIGFOOT */
@ -859,57 +774,43 @@ retry:
"%" PRIaTXN, "%" PRIaTXN,
dbg_prefix(ctx), ctx->rid, ctx->reused_slot, reservation_gc_id); dbg_prefix(ctx), ctx->rid, ctx->reused_slot, reservation_gc_id);
TRACE("%s: chunk %zu, gc-per-ovpage %u", dbg_prefix(ctx), chunk, TRACE("%s: chunk %zu, gc-per-ovpage %u", dbg_prefix(ctx), chunk, env->maxgc_large1page);
env->maxgc_large1page);
tASSERT(txn, reservation_gc_id <= env->lck->cached_oldest.weak); tASSERT(txn, reservation_gc_id <= env->lck->cached_oldest.weak);
if (unlikely(reservation_gc_id < MIN_TXNID || if (unlikely(reservation_gc_id < MIN_TXNID ||
reservation_gc_id > reservation_gc_id > atomic_load64(&env->lck->cached_oldest, mo_Relaxed))) {
atomic_load64(&env->lck->cached_oldest, mo_Relaxed))) { ERROR("** internal error (reservation_gc_id %" PRIaTXN ")", reservation_gc_id);
ERROR("** internal error (reservation_gc_id %" PRIaTXN ")",
reservation_gc_id);
rc = MDBX_PROBLEM; rc = MDBX_PROBLEM;
goto bailout; goto bailout;
} }
tASSERT(txn, tASSERT(txn, reservation_gc_id >= MIN_TXNID && reservation_gc_id <= MAX_TXNID);
reservation_gc_id >= MIN_TXNID && reservation_gc_id <= MAX_TXNID);
key.iov_len = sizeof(reservation_gc_id); key.iov_len = sizeof(reservation_gc_id);
key.iov_base = (void *)&reservation_gc_id; key.iov_base = (void *)&reservation_gc_id;
data.iov_len = (chunk + 1) * sizeof(pgno_t); data.iov_len = (chunk + 1) * sizeof(pgno_t);
TRACE("%s: reserve %zu [%zu...%zu) @%" PRIaTXN, dbg_prefix(ctx), chunk, TRACE("%s: reserve %zu [%zu...%zu) @%" PRIaTXN, dbg_prefix(ctx), chunk, ctx->reserved + 1,
ctx->reserved + 1, ctx->reserved + chunk + 1, reservation_gc_id); ctx->reserved + chunk + 1, reservation_gc_id);
prepare_backlog(txn, ctx); prepare_backlog(txn, ctx);
rc = cursor_put(&ctx->cursor, &key, &data, MDBX_RESERVE | MDBX_NOOVERWRITE); rc = cursor_put(&ctx->cursor, &key, &data, MDBX_RESERVE | MDBX_NOOVERWRITE);
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
zeroize_reserved(env, data); zeroize_reserved(env, data);
ctx->reserved += chunk; ctx->reserved += chunk;
TRACE("%s: reserved %zu (+%zu), continue", dbg_prefix(ctx), ctx->reserved, TRACE("%s: reserved %zu (+%zu), continue", dbg_prefix(ctx), ctx->reserved, chunk);
chunk);
continue; continue;
} }
tASSERT( tASSERT(txn, ctx->cleaned_slot == (txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0));
txn,
ctx->cleaned_slot ==
(txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0));
TRACE("%s", " >> filling"); TRACE("%s", " >> filling");
/* Fill in the reserved records */ /* Fill in the reserved records */
size_t excess_slots = 0; size_t excess_slots = 0;
ctx->fill_idx = ctx->fill_idx = txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot : ctx->reused_slot;
txn->tw.gc.reclaimed
? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot
: ctx->reused_slot;
rc = MDBX_SUCCESS; rc = MDBX_SUCCESS;
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
if (ctx->amount) { if (ctx->amount) {
MDBX_val key, data; MDBX_val key, data;
@ -930,20 +831,17 @@ retry:
while (true) { while (true) {
txnid_t fill_gc_id; txnid_t fill_gc_id;
TRACE("%s: left %zu of %zu", dbg_prefix(ctx), left, TRACE("%s: left %zu of %zu", dbg_prefix(ctx), left, MDBX_PNL_GETSIZE(txn->tw.relist));
MDBX_PNL_GETSIZE(txn->tw.relist));
if (txn->tw.gc.reclaimed == nullptr) { if (txn->tw.gc.reclaimed == nullptr) {
tASSERT(txn, is_lifo(txn) == 0); tASSERT(txn, is_lifo(txn) == 0);
fill_gc_id = fill_gc_id = key.iov_base ? unaligned_peek_u64(4, key.iov_base) : MIN_TXNID;
key.iov_base ? unaligned_peek_u64(4, key.iov_base) : MIN_TXNID;
if (ctx->fill_idx == 0 || fill_gc_id > txn->tw.gc.last_reclaimed) { if (ctx->fill_idx == 0 || fill_gc_id > txn->tw.gc.last_reclaimed) {
if (!left) if (!left)
break; break;
NOTICE("** restart: reserve depleted (fill_idx %zu, fill_id %" PRIaTXN NOTICE("** restart: reserve depleted (fill_idx %zu, fill_id %" PRIaTXN " > last_reclaimed %" PRIaTXN
" > last_reclaimed %" PRIaTXN ", left %zu", ", left %zu",
ctx->fill_idx, fill_gc_id, txn->tw.gc.last_reclaimed, left); ctx->fill_idx, fill_gc_id, txn->tw.gc.last_reclaimed, left);
ctx->reserve_adj = ctx->reserve_adj = (ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0;
(ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0;
goto retry; goto retry;
} }
ctx->fill_idx -= 1; ctx->fill_idx -= 1;
@ -955,26 +853,20 @@ retry:
NOTICE("** restart: reserve depleted (fill_idx %zu >= " NOTICE("** restart: reserve depleted (fill_idx %zu >= "
"gc.reclaimed %zu, left %zu", "gc.reclaimed %zu, left %zu",
ctx->fill_idx, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed), left); ctx->fill_idx, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed), left);
ctx->reserve_adj = ctx->reserve_adj = (ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0;
(ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0;
goto retry; goto retry;
} }
ctx->fill_idx += 1; ctx->fill_idx += 1;
fill_gc_id = txn->tw.gc.reclaimed[ctx->fill_idx]; fill_gc_id = txn->tw.gc.reclaimed[ctx->fill_idx];
TRACE("%s: seek-reservation @%" PRIaTXN " at gc.reclaimed[%zu]", TRACE("%s: seek-reservation @%" PRIaTXN " at gc.reclaimed[%zu]", dbg_prefix(ctx), fill_gc_id, ctx->fill_idx);
dbg_prefix(ctx), fill_gc_id, ctx->fill_idx);
key.iov_base = &fill_gc_id; key.iov_base = &fill_gc_id;
key.iov_len = sizeof(fill_gc_id); key.iov_len = sizeof(fill_gc_id);
rc = cursor_seek(&ctx->cursor, &key, &data, MDBX_SET_KEY).err; rc = cursor_seek(&ctx->cursor, &key, &data, MDBX_SET_KEY).err;
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
} }
tASSERT(txn, tASSERT(txn, ctx->cleaned_slot == (txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0));
ctx->cleaned_slot == (txn->tw.gc.reclaimed tASSERT(txn, fill_gc_id > 0 && fill_gc_id <= env->lck->cached_oldest.weak);
? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)
: 0));
tASSERT(txn,
fill_gc_id > 0 && fill_gc_id <= env->lck->cached_oldest.weak);
key.iov_base = &fill_gc_id; key.iov_base = &fill_gc_id;
key.iov_len = sizeof(fill_gc_id); key.iov_len = sizeof(fill_gc_id);
@ -983,14 +875,12 @@ retry:
if (unlikely(chunk > left)) { if (unlikely(chunk > left)) {
const size_t delta = chunk - left; const size_t delta = chunk - left;
excess += delta; excess += delta;
TRACE("%s: chunk %zu > left %zu, @%" PRIaTXN, dbg_prefix(ctx), chunk, TRACE("%s: chunk %zu > left %zu, @%" PRIaTXN, dbg_prefix(ctx), chunk, left, fill_gc_id);
left, fill_gc_id);
if (!left) { if (!left) {
excess_slots += 1; excess_slots += 1;
goto next; goto next;
} }
if ((ctx->loop < 5 && delta > (ctx->loop / 2)) || if ((ctx->loop < 5 && delta > (ctx->loop / 2)) || delta > env->maxgc_large1page)
delta > env->maxgc_large1page)
data.iov_len = (left + 1) * sizeof(pgno_t); data.iov_len = (left + 1) * sizeof(pgno_t);
chunk = left; chunk = left;
} }
@ -999,29 +889,23 @@ retry:
goto bailout; goto bailout;
zeroize_reserved(env, data); zeroize_reserved(env, data);
if (unlikely(txn->tw.loose_count || if (unlikely(txn->tw.loose_count || ctx->amount != MDBX_PNL_GETSIZE(txn->tw.relist))) {
ctx->amount != MDBX_PNL_GETSIZE(txn->tw.relist))) { NOTICE("** restart: reclaimed-list changed (%zu -> %zu, loose +%zu)", ctx->amount,
NOTICE("** restart: reclaimed-list changed (%zu -> %zu, loose +%zu)", MDBX_PNL_GETSIZE(txn->tw.relist), txn->tw.loose_count);
ctx->amount, MDBX_PNL_GETSIZE(txn->tw.relist),
txn->tw.loose_count);
if (ctx->loop < 5 || (ctx->loop > 10 && (ctx->loop & 1))) if (ctx->loop < 5 || (ctx->loop > 10 && (ctx->loop & 1)))
goto retry_clean_adj; goto retry_clean_adj;
goto retry; goto retry;
} }
if (unlikely(txn->tw.gc.reclaimed if (unlikely(txn->tw.gc.reclaimed ? ctx->cleaned_slot < MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)
? ctx->cleaned_slot < : ctx->cleaned_id < txn->tw.gc.last_reclaimed)) {
MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)
: ctx->cleaned_id < txn->tw.gc.last_reclaimed)) {
NOTICE("%s", "** restart: reclaimed-slots changed"); NOTICE("%s", "** restart: reclaimed-slots changed");
goto retry; goto retry;
} }
if (unlikely(ctx->retired_stored != if (unlikely(ctx->retired_stored != MDBX_PNL_GETSIZE(txn->tw.retired_pages))) {
MDBX_PNL_GETSIZE(txn->tw.retired_pages))) { tASSERT(txn, ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages));
tASSERT(txn, NOTICE("** restart: retired-list growth (%zu -> %zu)", ctx->retired_stored,
ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages)); MDBX_PNL_GETSIZE(txn->tw.retired_pages));
NOTICE("** restart: retired-list growth (%zu -> %zu)",
ctx->retired_stored, MDBX_PNL_GETSIZE(txn->tw.retired_pages));
goto retry; goto retry;
} }
@ -1030,9 +914,8 @@ retry:
pgno_t *src = MDBX_PNL_BEGIN(txn->tw.relist) + left - chunk; pgno_t *src = MDBX_PNL_BEGIN(txn->tw.relist) + left - chunk;
memcpy(dst, src, chunk * sizeof(pgno_t)); memcpy(dst, src, chunk * sizeof(pgno_t));
pgno_t *from = src, *to = src + chunk; pgno_t *from = src, *to = src + chunk;
TRACE("%s: fill %zu [ %zu:%" PRIaPGNO "...%zu:%" PRIaPGNO "] @%" PRIaTXN, TRACE("%s: fill %zu [ %zu:%" PRIaPGNO "...%zu:%" PRIaPGNO "] @%" PRIaTXN, dbg_prefix(ctx), chunk,
dbg_prefix(ctx), chunk, from - txn->tw.relist, from[0], from - txn->tw.relist, from[0], to - txn->tw.relist, to[-1], fill_gc_id);
to - txn->tw.relist, to[-1], fill_gc_id);
left -= chunk; left -= chunk;
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
@ -1063,16 +946,14 @@ retry:
while (n >= env->maxgc_large1page) while (n >= env->maxgc_large1page)
adj -= n /= env->maxgc_large1page; adj -= n /= env->maxgc_large1page;
ctx->reserve_adj += adj; ctx->reserve_adj += adj;
TRACE("%s: extra %zu reserved space, adj +%zu (%zu)", dbg_prefix(ctx), TRACE("%s: extra %zu reserved space, adj +%zu (%zu)", dbg_prefix(ctx), excess, adj, ctx->reserve_adj);
excess, adj, ctx->reserve_adj);
} }
} }
tASSERT(txn, rc == MDBX_SUCCESS); tASSERT(txn, rc == MDBX_SUCCESS);
if (unlikely(txn->tw.loose_count != 0 || if (unlikely(txn->tw.loose_count != 0 || ctx->amount != MDBX_PNL_GETSIZE(txn->tw.relist))) {
ctx->amount != MDBX_PNL_GETSIZE(txn->tw.relist))) { NOTICE("** restart: got %zu loose pages (reclaimed-list %zu -> %zu)", txn->tw.loose_count, ctx->amount,
NOTICE("** restart: got %zu loose pages (reclaimed-list %zu -> %zu)", MDBX_PNL_GETSIZE(txn->tw.relist));
txn->tw.loose_count, ctx->amount, MDBX_PNL_GETSIZE(txn->tw.relist));
goto retry; goto retry;
} }
@ -1080,14 +961,12 @@ retry:
const bool will_retry = ctx->loop < 5 || excess_slots > 1; const bool will_retry = ctx->loop < 5 || excess_slots > 1;
NOTICE("** %s: reserve excess (excess-slots %zu, filled-slot %zu, adj %zu, " NOTICE("** %s: reserve excess (excess-slots %zu, filled-slot %zu, adj %zu, "
"loop %u)", "loop %u)",
will_retry ? "restart" : "ignore", excess_slots, ctx->fill_idx, will_retry ? "restart" : "ignore", excess_slots, ctx->fill_idx, ctx->reserve_adj, ctx->loop);
ctx->reserve_adj, ctx->loop);
if (will_retry) if (will_retry)
goto retry; goto retry;
} }
tASSERT(txn, txn->tw.gc.reclaimed == nullptr || tASSERT(txn, txn->tw.gc.reclaimed == nullptr || ctx->cleaned_slot == MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed));
ctx->cleaned_slot == MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed));
bailout: bailout:
txn->cursors[FREE_DBI] = ctx->cursor.next; txn->cursors[FREE_DBI] = ctx->cursor.next;

View File

@ -34,8 +34,7 @@ static inline int gc_update_init(MDBX_txn *txn, gcu_t *ctx) {
#define ALLOC_DEFAULT 0 #define ALLOC_DEFAULT 0
#define ALLOC_RESERVE 1 #define ALLOC_RESERVE 1
#define ALLOC_UNIMPORTANT 2 #define ALLOC_UNIMPORTANT 2
MDBX_INTERNAL pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, MDBX_INTERNAL pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags);
uint8_t flags);
MDBX_INTERNAL pgr_t gc_alloc_single(const MDBX_cursor *const mc); MDBX_INTERNAL pgr_t gc_alloc_single(const MDBX_cursor *const mc);
MDBX_INTERNAL int gc_update(MDBX_txn *txn, gcu_t *ctx); MDBX_INTERNAL int gc_update(MDBX_txn *txn, gcu_t *ctx);

View File

@ -122,8 +122,7 @@ extern void __gmon_start__(void) __attribute__((__weak__));
#endif /* ENABLE_GPROF */ #endif /* ENABLE_GPROF */
MDBX_EXCLUDE_FOR_GPROF MDBX_EXCLUDE_FOR_GPROF
__cold static __attribute__((__constructor__)) void __cold static __attribute__((__constructor__)) void mdbx_global_constructor(void) {
mdbx_global_constructor(void) {
#ifdef ENABLE_GPROF #ifdef ENABLE_GPROF
if (!&__gmon_start__) if (!&__gmon_start__)
monstartup((uintptr_t)&_init, (uintptr_t)&_fini); monstartup((uintptr_t)&_init, (uintptr_t)&_fini);
@ -154,9 +153,8 @@ mdbx_global_constructor(void) {
* So, the REQUIREMENTS for this code: * So, the REQUIREMENTS for this code:
* 1. MUST detect WSL1 without false-negatives. * 1. MUST detect WSL1 without false-negatives.
* 2. DESIRABLE detect WSL2 but without the risk of violating the first. */ * 2. DESIRABLE detect WSL2 but without the risk of violating the first. */
globals.running_on_WSL1 = probe_for_WSL(buffer.version) == 1 || globals.running_on_WSL1 =
probe_for_WSL(buffer.sysname) == 1 || probe_for_WSL(buffer.version) == 1 || probe_for_WSL(buffer.sysname) == 1 || probe_for_WSL(buffer.release) == 1;
probe_for_WSL(buffer.release) == 1;
} }
#endif /* Linux */ #endif /* Linux */
@ -164,8 +162,7 @@ mdbx_global_constructor(void) {
} }
MDBX_EXCLUDE_FOR_GPROF MDBX_EXCLUDE_FOR_GPROF
__cold static __attribute__((__destructor__)) void __cold static __attribute__((__destructor__)) void mdbx_global_destructor(void) {
mdbx_global_destructor(void) {
mdbx_fini(); mdbx_fini();
#ifdef ENABLE_GPROF #ifdef ENABLE_GPROF
if (!&__gmon_start__) if (!&__gmon_start__)
@ -180,13 +177,11 @@ mdbx_global_destructor(void) {
struct libmdbx_globals globals; struct libmdbx_globals globals;
__cold static void mdbx_init(void) { __cold static void mdbx_init(void) {
globals.runtime_flags = ((MDBX_DEBUG) > 0) * MDBX_DBG_ASSERT + globals.runtime_flags = ((MDBX_DEBUG) > 0) * MDBX_DBG_ASSERT + ((MDBX_DEBUG) > 1) * MDBX_DBG_AUDIT;
((MDBX_DEBUG) > 1) * MDBX_DBG_AUDIT;
globals.loglevel = MDBX_LOG_FATAL; globals.loglevel = MDBX_LOG_FATAL;
ENSURE(nullptr, osal_fastmutex_init(&globals.debug_lock) == 0); ENSURE(nullptr, osal_fastmutex_init(&globals.debug_lock) == 0);
osal_ctor(); osal_ctor();
assert(globals.sys_pagesize > 0 && assert(globals.sys_pagesize > 0 && (globals.sys_pagesize & (globals.sys_pagesize - 1)) == 0);
(globals.sys_pagesize & (globals.sys_pagesize - 1)) == 0);
rthc_ctor(); rthc_ctor();
#if MDBX_DEBUG #if MDBX_DEBUG
ENSURE(nullptr, troika_verify_fsm()); ENSURE(nullptr, troika_verify_fsm());

View File

@ -156,9 +156,8 @@ enum txn_flags {
txn_shrink_allowed = UINT32_C(0x40000000), txn_shrink_allowed = UINT32_C(0x40000000),
txn_parked = MDBX_TXN_PARKED, txn_parked = MDBX_TXN_PARKED,
txn_gc_drained = 0x40 /* GC was depleted up to oldest reader */, txn_gc_drained = 0x40 /* GC was depleted up to oldest reader */,
txn_state_flags = MDBX_TXN_FINISHED | MDBX_TXN_ERROR | MDBX_TXN_DIRTY | txn_state_flags = MDBX_TXN_FINISHED | MDBX_TXN_ERROR | MDBX_TXN_DIRTY | MDBX_TXN_SPILLS | MDBX_TXN_HAS_CHILD |
MDBX_TXN_SPILLS | MDBX_TXN_HAS_CHILD | MDBX_TXN_INVALID | MDBX_TXN_INVALID | txn_gc_drained
txn_gc_drained
}; };
/* A database transaction. /* A database transaction.
@ -336,12 +335,9 @@ enum env_flags {
/* Only a subset of the mdbx_env flags can be changed /* Only a subset of the mdbx_env flags can be changed
* at runtime. Changing other flags requires closing the * at runtime. Changing other flags requires closing the
* environment and re-opening it with the new flags. */ * environment and re-opening it with the new flags. */
ENV_CHANGEABLE_FLAGS = MDBX_SAFE_NOSYNC | MDBX_NOMETASYNC | ENV_CHANGEABLE_FLAGS = MDBX_SAFE_NOSYNC | MDBX_NOMETASYNC | DEPRECATED_MAPASYNC | MDBX_NOMEMINIT |
DEPRECATED_MAPASYNC | MDBX_NOMEMINIT | DEPRECATED_COALESCE | MDBX_PAGEPERTURB | MDBX_ACCEDE | MDBX_VALIDATION,
DEPRECATED_COALESCE | MDBX_PAGEPERTURB | MDBX_ACCEDE | ENV_CHANGELESS_FLAGS = MDBX_NOSUBDIR | MDBX_RDONLY | MDBX_WRITEMAP | MDBX_NOSTICKYTHREADS | MDBX_NORDAHEAD |
MDBX_VALIDATION,
ENV_CHANGELESS_FLAGS = MDBX_NOSUBDIR | MDBX_RDONLY | MDBX_WRITEMAP |
MDBX_NOSTICKYTHREADS | MDBX_NORDAHEAD |
MDBX_LIFORECLAIM | MDBX_EXCLUSIVE, MDBX_LIFORECLAIM | MDBX_EXCLUSIVE,
ENV_USABLE_FLAGS = ENV_CHANGEABLE_FLAGS | ENV_CHANGELESS_FLAGS ENV_USABLE_FLAGS = ENV_CHANGEABLE_FLAGS | ENV_CHANGELESS_FLAGS
}; };
@ -368,8 +364,8 @@ struct MDBX_env {
uint16_t subpage_reserve_prereq; uint16_t subpage_reserve_prereq;
uint16_t subpage_reserve_limit; uint16_t subpage_reserve_limit;
atomic_pgno_t mlocked_pgno; atomic_pgno_t mlocked_pgno;
uint8_t ps2ln; /* log2 of DB page size */ uint8_t ps2ln; /* log2 of DB page size */
int8_t stuck_meta; /* recovery-only: target meta page or less that zero */ int8_t stuck_meta; /* recovery-only: target meta page or less that zero */
uint16_t merge_threshold, merge_threshold_gc; /* pages emptier than this are uint16_t merge_threshold, merge_threshold_gc; /* pages emptier than this are
candidates for merging */ candidates for merging */
unsigned max_readers; /* size of the reader table */ unsigned max_readers; /* size of the reader table */
@ -385,7 +381,7 @@ struct MDBX_env {
kvx_t *kvs; /* array of auxiliary key-value properties */ kvx_t *kvs; /* array of auxiliary key-value properties */
uint8_t *__restrict dbs_flags; /* array of flags from tree_t.flags */ uint8_t *__restrict dbs_flags; /* array of flags from tree_t.flags */
mdbx_atomic_uint32_t *dbi_seqs; /* array of dbi sequence numbers */ mdbx_atomic_uint32_t *dbi_seqs; /* array of dbi sequence numbers */
unsigned maxgc_large1page; /* Number of pgno_t fit in a single large page */ unsigned maxgc_large1page; /* Number of pgno_t fit in a single large page */
unsigned maxgc_per_branch; unsigned maxgc_per_branch;
uint32_t registered_reader_pid; /* have liveness lock in reader table */ uint32_t registered_reader_pid; /* have liveness lock in reader table */
void *userctx; /* User-settable context */ void *userctx; /* User-settable context */
@ -492,9 +488,7 @@ struct MDBX_env {
#endif #endif
/* ------------------------------------------------- stub for lck-less mode */ /* ------------------------------------------------- stub for lck-less mode */
mdbx_atomic_uint64_t mdbx_atomic_uint64_t lckless_placeholder[(sizeof(lck_t) + MDBX_CACHELINE_SIZE - 1) / sizeof(mdbx_atomic_uint64_t)];
lckless_placeholder[(sizeof(lck_t) + MDBX_CACHELINE_SIZE - 1) /
sizeof(mdbx_atomic_uint64_t)];
}; };
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
@ -509,8 +503,8 @@ struct MDBX_env {
#define DEFAULT_READERS 61 #define DEFAULT_READERS 61
enum db_flags { enum db_flags {
DB_PERSISTENT_FLAGS = MDBX_REVERSEKEY | MDBX_DUPSORT | MDBX_INTEGERKEY | DB_PERSISTENT_FLAGS =
MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP, MDBX_REVERSEKEY | MDBX_DUPSORT | MDBX_INTEGERKEY | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP,
/* mdbx_dbi_open() flags */ /* mdbx_dbi_open() flags */
DB_USABLE_FLAGS = DB_PERSISTENT_FLAGS | MDBX_CREATE | MDBX_DB_ACCEDE, DB_USABLE_FLAGS = DB_PERSISTENT_FLAGS | MDBX_CREATE | MDBX_DB_ACCEDE,
@ -524,27 +518,19 @@ enum db_flags {
MDBX_MAYBE_UNUSED static void static_checks(void) { MDBX_MAYBE_UNUSED static void static_checks(void) {
STATIC_ASSERT(MDBX_WORDBITS == sizeof(void *) * CHAR_BIT); STATIC_ASSERT(MDBX_WORDBITS == sizeof(void *) * CHAR_BIT);
STATIC_ASSERT(UINT64_C(0x80000000) == (uint32_t)ENV_FATAL_ERROR); STATIC_ASSERT(UINT64_C(0x80000000) == (uint32_t)ENV_FATAL_ERROR);
STATIC_ASSERT_MSG(INT16_MAX - CORE_DBS == MDBX_MAX_DBI, STATIC_ASSERT_MSG(INT16_MAX - CORE_DBS == MDBX_MAX_DBI, "Oops, MDBX_MAX_DBI or CORE_DBS?");
"Oops, MDBX_MAX_DBI or CORE_DBS?");
STATIC_ASSERT_MSG((unsigned)(MDBX_DB_ACCEDE | MDBX_CREATE) == STATIC_ASSERT_MSG((unsigned)(MDBX_DB_ACCEDE | MDBX_CREATE) ==
((DB_USABLE_FLAGS | DB_INTERNAL_FLAGS) & ((DB_USABLE_FLAGS | DB_INTERNAL_FLAGS) & (ENV_USABLE_FLAGS | ENV_INTERNAL_FLAGS)),
(ENV_USABLE_FLAGS | ENV_INTERNAL_FLAGS)),
"Oops, some flags overlapped or wrong");
STATIC_ASSERT_MSG((DB_INTERNAL_FLAGS & DB_USABLE_FLAGS) == 0,
"Oops, some flags overlapped or wrong");
STATIC_ASSERT_MSG((DB_PERSISTENT_FLAGS & ~DB_USABLE_FLAGS) == 0,
"Oops, some flags overlapped or wrong"); "Oops, some flags overlapped or wrong");
STATIC_ASSERT_MSG((DB_INTERNAL_FLAGS & DB_USABLE_FLAGS) == 0, "Oops, some flags overlapped or wrong");
STATIC_ASSERT_MSG((DB_PERSISTENT_FLAGS & ~DB_USABLE_FLAGS) == 0, "Oops, some flags overlapped or wrong");
STATIC_ASSERT(DB_PERSISTENT_FLAGS <= UINT8_MAX); STATIC_ASSERT(DB_PERSISTENT_FLAGS <= UINT8_MAX);
STATIC_ASSERT_MSG((ENV_INTERNAL_FLAGS & ENV_USABLE_FLAGS) == 0, STATIC_ASSERT_MSG((ENV_INTERNAL_FLAGS & ENV_USABLE_FLAGS) == 0, "Oops, some flags overlapped or wrong");
"Oops, some flags overlapped or wrong");
STATIC_ASSERT_MSG( STATIC_ASSERT_MSG((txn_state_flags & (txn_rw_begin_flags | txn_ro_begin_flags)) == 0,
(txn_state_flags & (txn_rw_begin_flags | txn_ro_begin_flags)) == 0, "Oops, some txn flags overlapped or wrong");
"Oops, some txn flags overlapped or wrong"); STATIC_ASSERT_MSG(((txn_rw_begin_flags | txn_ro_begin_flags | txn_state_flags) & txn_shrink_allowed) == 0,
STATIC_ASSERT_MSG( "Oops, some txn flags overlapped or wrong");
((txn_rw_begin_flags | txn_ro_begin_flags | txn_state_flags) &
txn_shrink_allowed) == 0,
"Oops, some txn flags overlapped or wrong");
STATIC_ASSERT(sizeof(reader_slot_t) == 32); STATIC_ASSERT(sizeof(reader_slot_t) == 32);
#if MDBX_LOCKING > 0 #if MDBX_LOCKING > 0

View File

@ -17,10 +17,8 @@
/* FROZEN: The version number for a database's datafile format. */ /* FROZEN: The version number for a database's datafile format. */
#define MDBX_DATA_VERSION 3 #define MDBX_DATA_VERSION 3
#define MDBX_DATA_MAGIC \ #define MDBX_DATA_MAGIC ((MDBX_MAGIC << 8) + MDBX_PNL_ASCENDING * 64 + MDBX_DATA_VERSION)
((MDBX_MAGIC << 8) + MDBX_PNL_ASCENDING * 64 + MDBX_DATA_VERSION) #define MDBX_DATA_MAGIC_LEGACY_COMPAT ((MDBX_MAGIC << 8) + MDBX_PNL_ASCENDING * 64 + 2)
#define MDBX_DATA_MAGIC_LEGACY_COMPAT \
((MDBX_MAGIC << 8) + MDBX_PNL_ASCENDING * 64 + 2)
#define MDBX_DATA_MAGIC_LEGACY_DEVEL ((MDBX_MAGIC << 8) + 255) #define MDBX_DATA_MAGIC_LEGACY_DEVEL ((MDBX_MAGIC << 8) + 255)
/* handle for the DB used to track free pages. */ /* handle for the DB used to track free pages. */
@ -261,40 +259,30 @@ typedef enum node_flags {
#pragma pack(pop) #pragma pack(pop)
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t page_type(const page_t *mp) { return mp->flags; }
page_type(const page_t *mp) {
return mp->flags;
}
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t page_type_compat(const page_t *mp) {
page_type_compat(const page_t *mp) {
/* Drop legacy P_DIRTY flag for sub-pages for compatilibity, /* Drop legacy P_DIRTY flag for sub-pages for compatilibity,
* for assertions only. */ * for assertions only. */
return unlikely(mp->flags & P_SUBP) ? mp->flags & ~(P_SUBP | P_LEGACY_DIRTY) return unlikely(mp->flags & P_SUBP) ? mp->flags & ~(P_SUBP | P_LEGACY_DIRTY) : mp->flags;
: mp->flags;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_leaf(const page_t *mp) {
is_leaf(const page_t *mp) {
return (mp->flags & P_LEAF) != 0; return (mp->flags & P_LEAF) != 0;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_dupfix_leaf(const page_t *mp) {
is_dupfix_leaf(const page_t *mp) {
return (mp->flags & P_DUPFIX) != 0; return (mp->flags & P_DUPFIX) != 0;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_branch(const page_t *mp) {
is_branch(const page_t *mp) {
return (mp->flags & P_BRANCH) != 0; return (mp->flags & P_BRANCH) != 0;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_largepage(const page_t *mp) {
is_largepage(const page_t *mp) {
return (mp->flags & P_LARGE) != 0; return (mp->flags & P_LARGE) != 0;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_subpage(const page_t *mp) {
is_subpage(const page_t *mp) {
return (mp->flags & P_SUBP) != 0; return (mp->flags & P_SUBP) != 0;
} }

View File

@ -19,8 +19,7 @@ typedef void osal_ipclock_t;
#define MDBX_LCK_SIGN UINT32_C(0xF18D) #define MDBX_LCK_SIGN UINT32_C(0xF18D)
typedef mdbx_pid_t osal_ipclock_t; typedef mdbx_pid_t osal_ipclock_t;
#elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || \ #elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008
MDBX_LOCKING == MDBX_LOCKING_POSIX2008
#define MDBX_LCK_SIGN UINT32_C(0x8017) #define MDBX_LCK_SIGN UINT32_C(0x8017)
typedef pthread_mutex_t osal_ipclock_t; typedef pthread_mutex_t osal_ipclock_t;
@ -64,19 +63,15 @@ typedef struct pgops {
mdbx_atomic_uint64_t merge; /* Page merges */ mdbx_atomic_uint64_t merge; /* Page merges */
mdbx_atomic_uint64_t spill; /* Quantity of spilled dirty pages */ mdbx_atomic_uint64_t spill; /* Quantity of spilled dirty pages */
mdbx_atomic_uint64_t unspill; /* Quantity of unspilled/reloaded pages */ mdbx_atomic_uint64_t unspill; /* Quantity of unspilled/reloaded pages */
mdbx_atomic_uint64_t mdbx_atomic_uint64_t wops; /* Number of explicit write operations (not a pages) to a disk */
wops; /* Number of explicit write operations (not a pages) to a disk */ mdbx_atomic_uint64_t msync; /* Number of explicit msync/flush-to-disk operations */
mdbx_atomic_uint64_t mdbx_atomic_uint64_t fsync; /* Number of explicit fsync/flush-to-disk operations */
msync; /* Number of explicit msync/flush-to-disk operations */
mdbx_atomic_uint64_t
fsync; /* Number of explicit fsync/flush-to-disk operations */
mdbx_atomic_uint64_t prefault; /* Number of prefault write operations */ mdbx_atomic_uint64_t prefault; /* Number of prefault write operations */
mdbx_atomic_uint64_t mincore; /* Number of mincore() calls */ mdbx_atomic_uint64_t mincore; /* Number of mincore() calls */
mdbx_atomic_uint32_t mdbx_atomic_uint32_t incoherence; /* number of https://libmdbx.dqdkfa.ru/dead-github/issues/269
incoherence; /* number of https://libmdbx.dqdkfa.ru/dead-github/issues/269 caught */
caught */
mdbx_atomic_uint32_t reserved; mdbx_atomic_uint32_t reserved;
/* Статистика для профилирования GC. /* Статистика для профилирования GC.
@ -202,8 +197,7 @@ typedef struct shared_lck {
* i.e. for sync-polling in the MDBX_NOMETASYNC mode. */ * i.e. for sync-polling in the MDBX_NOMETASYNC mode. */
#define MDBX_NOMETASYNC_LAZY_UNK (UINT32_MAX / 3) #define MDBX_NOMETASYNC_LAZY_UNK (UINT32_MAX / 3)
#define MDBX_NOMETASYNC_LAZY_FD (MDBX_NOMETASYNC_LAZY_UNK + UINT32_MAX / 8) #define MDBX_NOMETASYNC_LAZY_FD (MDBX_NOMETASYNC_LAZY_UNK + UINT32_MAX / 8)
#define MDBX_NOMETASYNC_LAZY_WRITEMAP \ #define MDBX_NOMETASYNC_LAZY_WRITEMAP (MDBX_NOMETASYNC_LAZY_UNK - UINT32_MAX / 8)
(MDBX_NOMETASYNC_LAZY_UNK - UINT32_MAX / 8)
mdbx_atomic_uint32_t meta_sync_txnid; mdbx_atomic_uint32_t meta_sync_txnid;
/* Period for timed auto-sync feature, i.e. at the every steady checkpoint /* Period for timed auto-sync feature, i.e. at the every steady checkpoint
@ -277,12 +271,10 @@ typedef struct shared_lck {
reader_slot_t rdt[] /* dynamic size */; reader_slot_t rdt[] /* dynamic size */;
/* Lockfile format signature: version, features and field layout */ /* Lockfile format signature: version, features and field layout */
#define MDBX_LOCK_FORMAT \ #define MDBX_LOCK_FORMAT \
(MDBX_LCK_SIGN * 27733 + (unsigned)sizeof(reader_slot_t) * 13 + \ (MDBX_LCK_SIGN * 27733 + (unsigned)sizeof(reader_slot_t) * 13 + \
(unsigned)offsetof(reader_slot_t, snapshot_pages_used) * 251 + \ (unsigned)offsetof(reader_slot_t, snapshot_pages_used) * 251 + (unsigned)offsetof(lck_t, cached_oldest) * 83 + \
(unsigned)offsetof(lck_t, cached_oldest) * 83 + \ (unsigned)offsetof(lck_t, rdt_length) * 37 + (unsigned)offsetof(lck_t, rdt) * 29)
(unsigned)offsetof(lck_t, rdt_length) * 37 + \
(unsigned)offsetof(lck_t, rdt) * 29)
#endif /* FLEXIBLE_ARRAY_MEMBERS */ #endif /* FLEXIBLE_ARRAY_MEMBERS */
} lck_t; } lck_t;

View File

@ -71,11 +71,10 @@ __cold static void choice_fcntl(void) {
assert(!op_setlk && !op_setlkw && !op_getlk); assert(!op_setlk && !op_setlkw && !op_getlk);
if ((globals.runtime_flags & MDBX_DBG_LEGACY_MULTIOPEN) == 0 if ((globals.runtime_flags & MDBX_DBG_LEGACY_MULTIOPEN) == 0
#if defined(__linux__) || defined(__gnu_linux__) #if defined(__linux__) || defined(__gnu_linux__)
&& globals.linux_kernel_version > && globals.linux_kernel_version > 0x030f0000 /* OFD locks are available since 3.15, but engages here
0x030f0000 /* OFD locks are available since 3.15, but engages here only for 3.16 and later kernels (i.e. LTS) because
only for 3.16 and later kernels (i.e. LTS) because of reliability reasons */
of reliability reasons */ #endif /* linux */
#endif /* linux */
) { ) {
op_setlk = MDBX_F_OFD_SETLK; op_setlk = MDBX_F_OFD_SETLK;
op_setlkw = MDBX_F_OFD_SETLKW; op_setlkw = MDBX_F_OFD_SETLKW;
@ -92,32 +91,25 @@ __cold static void choice_fcntl(void) {
#define op_getlk MDBX_F_GETLK #define op_getlk MDBX_F_GETLK
#endif /* MDBX_USE_OFDLOCKS */ #endif /* MDBX_USE_OFDLOCKS */
static int lck_op(const mdbx_filehandle_t fd, int cmd, const int lck, static int lck_op(const mdbx_filehandle_t fd, int cmd, const int lck, const off_t offset, off_t len) {
const off_t offset, off_t len) { STATIC_ASSERT(sizeof(off_t) >= sizeof(void *) && sizeof(off_t) >= sizeof(size_t));
STATIC_ASSERT(sizeof(off_t) >= sizeof(void *) &&
sizeof(off_t) >= sizeof(size_t));
#ifdef __ANDROID_API__ #ifdef __ANDROID_API__
STATIC_ASSERT_MSG((sizeof(off_t) * 8 == MDBX_WORDBITS), STATIC_ASSERT_MSG((sizeof(off_t) * 8 == MDBX_WORDBITS), "The bitness of system `off_t` type is mismatch. Please "
"The bitness of system `off_t` type is mismatch. Please " "fix build and/or NDK configuration.");
"fix build and/or NDK configuration.");
#endif /* Android */ #endif /* Android */
assert(offset >= 0 && len > 0); assert(offset >= 0 && len > 0);
assert((uint64_t)offset < (uint64_t)INT64_MAX && assert((uint64_t)offset < (uint64_t)INT64_MAX && (uint64_t)len < (uint64_t)INT64_MAX &&
(uint64_t)len < (uint64_t)INT64_MAX &&
(uint64_t)(offset + len) > (uint64_t)offset); (uint64_t)(offset + len) > (uint64_t)offset);
assert((uint64_t)offset < (uint64_t)OFF_T_MAX && assert((uint64_t)offset < (uint64_t)OFF_T_MAX && (uint64_t)len <= (uint64_t)OFF_T_MAX &&
(uint64_t)len <= (uint64_t)OFF_T_MAX &&
(uint64_t)(offset + len) <= (uint64_t)OFF_T_MAX); (uint64_t)(offset + len) <= (uint64_t)OFF_T_MAX);
assert((uint64_t)((off_t)((uint64_t)offset + (uint64_t)len)) == assert((uint64_t)((off_t)((uint64_t)offset + (uint64_t)len)) == ((uint64_t)offset + (uint64_t)len));
((uint64_t)offset + (uint64_t)len));
jitter4testing(true); jitter4testing(true);
for (;;) { for (;;) {
MDBX_STRUCT_FLOCK lock_op; MDBX_STRUCT_FLOCK lock_op;
STATIC_ASSERT_MSG(sizeof(off_t) <= sizeof(lock_op.l_start) && STATIC_ASSERT_MSG(sizeof(off_t) <= sizeof(lock_op.l_start) && sizeof(off_t) <= sizeof(lock_op.l_len) &&
sizeof(off_t) <= sizeof(lock_op.l_len) &&
OFF_T_MAX == (off_t)OFF_T_MAX, OFF_T_MAX == (off_t)OFF_T_MAX,
"Support for large/64-bit-sized files is misconfigured " "Support for large/64-bit-sized files is misconfigured "
"for the target system and/or toolchain. " "for the target system and/or toolchain. "
@ -134,15 +126,13 @@ static int lck_op(const mdbx_filehandle_t fd, int cmd, const int lck,
/* Checks reader by pid. Returns: /* Checks reader by pid. Returns:
* MDBX_RESULT_TRUE - if pid is live (reader holds a lock). * MDBX_RESULT_TRUE - if pid is live (reader holds a lock).
* MDBX_RESULT_FALSE - if pid is dead (a lock could be placed). */ * MDBX_RESULT_FALSE - if pid is dead (a lock could be placed). */
return (lock_op.l_type == F_UNLCK) ? MDBX_RESULT_FALSE return (lock_op.l_type == F_UNLCK) ? MDBX_RESULT_FALSE : MDBX_RESULT_TRUE;
: MDBX_RESULT_TRUE;
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
rc = errno; rc = errno;
#if MDBX_USE_OFDLOCKS #if MDBX_USE_OFDLOCKS
if (rc == EINVAL && (cmd == MDBX_F_OFD_SETLK || cmd == MDBX_F_OFD_SETLKW || if (rc == EINVAL && (cmd == MDBX_F_OFD_SETLK || cmd == MDBX_F_OFD_SETLKW || cmd == MDBX_F_OFD_GETLK)) {
cmd == MDBX_F_OFD_GETLK)) {
/* fallback to non-OFD locks */ /* fallback to non-OFD locks */
if (cmd == MDBX_F_OFD_SETLK) if (cmd == MDBX_F_OFD_SETLK)
cmd = MDBX_F_SETLK; cmd = MDBX_F_SETLK;
@ -197,8 +187,7 @@ MDBX_INTERNAL int lck_rpid_check(MDBX_env *env, uint32_t pid) {
MDBX_INTERNAL int lck_ipclock_stubinit(osal_ipclock_t *ipc) { MDBX_INTERNAL int lck_ipclock_stubinit(osal_ipclock_t *ipc) {
#if MDBX_LOCKING == MDBX_LOCKING_POSIX1988 #if MDBX_LOCKING == MDBX_LOCKING_POSIX1988
return sem_init(ipc, false, 1) ? errno : 0; return sem_init(ipc, false, 1) ? errno : 0;
#elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || \ #elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008
MDBX_LOCKING == MDBX_LOCKING_POSIX2008
return pthread_mutex_init(ipc, nullptr); return pthread_mutex_init(ipc, nullptr);
#else #else
#error "FIXME" #error "FIXME"
@ -208,8 +197,7 @@ MDBX_INTERNAL int lck_ipclock_stubinit(osal_ipclock_t *ipc) {
MDBX_INTERNAL int lck_ipclock_destroy(osal_ipclock_t *ipc) { MDBX_INTERNAL int lck_ipclock_destroy(osal_ipclock_t *ipc) {
#if MDBX_LOCKING == MDBX_LOCKING_POSIX1988 #if MDBX_LOCKING == MDBX_LOCKING_POSIX1988
return sem_destroy(ipc) ? errno : 0; return sem_destroy(ipc) ? errno : 0;
#elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || \ #elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008
MDBX_LOCKING == MDBX_LOCKING_POSIX2008
return pthread_mutex_destroy(ipc); return pthread_mutex_destroy(ipc);
#else #else
#error "FIXME" #error "FIXME"
@ -233,14 +221,12 @@ static int check_fstat(MDBX_env *env) {
#else #else
rc = EPERM; rc = EPERM;
#endif #endif
ERROR("%s %s, err %d", "DXB", ERROR("%s %s, err %d", "DXB", (st.st_nlink < 1) ? "file was removed" : "not a regular file", rc);
(st.st_nlink < 1) ? "file was removed" : "not a regular file", rc);
return rc; return rc;
} }
if (st.st_size < (off_t)(MDBX_MIN_PAGESIZE * NUM_METAS)) { if (st.st_size < (off_t)(MDBX_MIN_PAGESIZE * NUM_METAS)) {
VERBOSE("dxb-file is too short (%u), exclusive-lock needed", VERBOSE("dxb-file is too short (%u), exclusive-lock needed", (unsigned)st.st_size);
(unsigned)st.st_size);
rc = MDBX_RESULT_TRUE; rc = MDBX_RESULT_TRUE;
} }
@ -258,16 +244,14 @@ static int check_fstat(MDBX_env *env) {
#else #else
rc = EPERM; rc = EPERM;
#endif #endif
ERROR("%s %s, err %d", "LCK", ERROR("%s %s, err %d", "LCK", (st.st_nlink < 1) ? "file was removed" : "not a regular file", rc);
(st.st_nlink < 1) ? "file was removed" : "not a regular file", rc);
return rc; return rc;
} }
/* Checking file size for detect the situation when we got the shared lock /* Checking file size for detect the situation when we got the shared lock
* immediately after lck_destroy(). */ * immediately after lck_destroy(). */
if (st.st_size < (off_t)(sizeof(lck_t) + sizeof(reader_slot_t))) { if (st.st_size < (off_t)(sizeof(lck_t) + sizeof(reader_slot_t))) {
VERBOSE("lck-file is too short (%u), exclusive-lock needed", VERBOSE("lck-file is too short (%u), exclusive-lock needed", (unsigned)st.st_size);
(unsigned)st.st_size);
rc = MDBX_RESULT_TRUE; rc = MDBX_RESULT_TRUE;
} }
@ -298,8 +282,7 @@ __cold MDBX_INTERNAL int lck_seize(MDBX_env *env) {
if (env->lck_mmap.fd == INVALID_HANDLE_VALUE) { if (env->lck_mmap.fd == INVALID_HANDLE_VALUE) {
/* LY: without-lck mode (e.g. exclusive or on read-only filesystem) */ /* LY: without-lck mode (e.g. exclusive or on read-only filesystem) */
rc = lck_op(env->lazy_fd, op_setlk, rc = lck_op(env->lazy_fd, op_setlk, (env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, 0, OFF_T_MAX);
(env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, 0, OFF_T_MAX);
if (rc != MDBX_SUCCESS) { if (rc != MDBX_SUCCESS) {
ERROR("%s, err %u", "without-lck", rc); ERROR("%s, err %u", "without-lck", rc);
eASSERT(env, MDBX_IS_ERROR(rc)); eASSERT(env, MDBX_IS_ERROR(rc));
@ -329,8 +312,7 @@ retry:
return rc; return rc;
continue_dxb_exclusive: continue_dxb_exclusive:
rc = lck_op(env->lazy_fd, op_setlk, rc = lck_op(env->lazy_fd, op_setlk, (env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, 0, OFF_T_MAX);
(env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, 0, OFF_T_MAX);
if (rc == MDBX_SUCCESS) if (rc == MDBX_SUCCESS)
return MDBX_RESULT_TRUE /* Done: return with exclusive locking. */; return MDBX_RESULT_TRUE /* Done: return with exclusive locking. */;
@ -339,16 +321,14 @@ retry:
return err; return err;
/* the cause may be a collision with POSIX's file-lock recovery. */ /* the cause may be a collision with POSIX's file-lock recovery. */
if (!(rc == EAGAIN || rc == EACCES || rc == EBUSY || rc == EWOULDBLOCK || if (!(rc == EAGAIN || rc == EACCES || rc == EBUSY || rc == EWOULDBLOCK || rc == EDEADLK)) {
rc == EDEADLK)) {
ERROR("%s, err %u", "dxb-exclusive", rc); ERROR("%s, err %u", "dxb-exclusive", rc);
eASSERT(env, MDBX_IS_ERROR(rc)); eASSERT(env, MDBX_IS_ERROR(rc));
return rc; return rc;
} }
/* Fallback to lck-shared */ /* Fallback to lck-shared */
} else if (!(rc == EAGAIN || rc == EACCES || rc == EBUSY || } else if (!(rc == EAGAIN || rc == EACCES || rc == EBUSY || rc == EWOULDBLOCK || rc == EDEADLK)) {
rc == EWOULDBLOCK || rc == EDEADLK)) {
ERROR("%s, err %u", "try-exclusive", rc); ERROR("%s, err %u", "try-exclusive", rc);
eASSERT(env, MDBX_IS_ERROR(rc)); eASSERT(env, MDBX_IS_ERROR(rc));
return rc; return rc;
@ -384,16 +364,14 @@ retry:
if (rc == MDBX_SUCCESS) if (rc == MDBX_SUCCESS)
goto continue_dxb_exclusive; goto continue_dxb_exclusive;
if (!(rc == EAGAIN || rc == EACCES || rc == EBUSY || rc == EWOULDBLOCK || if (!(rc == EAGAIN || rc == EACCES || rc == EBUSY || rc == EWOULDBLOCK || rc == EDEADLK)) {
rc == EDEADLK)) {
ERROR("%s, err %u", "try-exclusive", rc); ERROR("%s, err %u", "try-exclusive", rc);
eASSERT(env, MDBX_IS_ERROR(rc)); eASSERT(env, MDBX_IS_ERROR(rc));
return rc; return rc;
} }
/* Lock against another process operating in without-lck or exclusive mode. */ /* Lock against another process operating in without-lck or exclusive mode. */
rc = lck_op(env->lazy_fd, op_setlk, rc = lck_op(env->lazy_fd, op_setlk, (env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, env->pid, 1);
(env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, env->pid, 1);
if (rc != MDBX_SUCCESS) { if (rc != MDBX_SUCCESS) {
ERROR("%s, err %u", "lock-against-without-lck", rc); ERROR("%s, err %u", "lock-against-without-lck", rc);
eASSERT(env, MDBX_IS_ERROR(rc)); eASSERT(env, MDBX_IS_ERROR(rc));
@ -413,8 +391,7 @@ MDBX_INTERNAL int lck_downgrade(MDBX_env *env) {
if ((env->flags & MDBX_EXCLUSIVE) == 0) { if ((env->flags & MDBX_EXCLUSIVE) == 0) {
rc = lck_op(env->lazy_fd, op_setlk, F_UNLCK, 0, env->pid); rc = lck_op(env->lazy_fd, op_setlk, F_UNLCK, 0, env->pid);
if (rc == MDBX_SUCCESS) if (rc == MDBX_SUCCESS)
rc = lck_op(env->lazy_fd, op_setlk, F_UNLCK, env->pid + 1, rc = lck_op(env->lazy_fd, op_setlk, F_UNLCK, env->pid + 1, OFF_T_MAX - env->pid - 1);
OFF_T_MAX - env->pid - 1);
} }
if (rc == MDBX_SUCCESS) if (rc == MDBX_SUCCESS)
rc = lck_op(env->lck_mmap.fd, op_setlk, F_RDLCK, 0, 1); rc = lck_op(env->lck_mmap.fd, op_setlk, F_RDLCK, 0, 1);
@ -433,13 +410,10 @@ MDBX_INTERNAL int lck_upgrade(MDBX_env *env, bool dont_wait) {
const int cmd = dont_wait ? op_setlk : op_setlkw; const int cmd = dont_wait ? op_setlk : op_setlkw;
int rc = lck_op(env->lck_mmap.fd, cmd, F_WRLCK, 0, 1); int rc = lck_op(env->lck_mmap.fd, cmd, F_WRLCK, 0, 1);
if (rc == MDBX_SUCCESS && (env->flags & MDBX_EXCLUSIVE) == 0) { if (rc == MDBX_SUCCESS && (env->flags & MDBX_EXCLUSIVE) == 0) {
rc = (env->pid > 1) ? lck_op(env->lazy_fd, cmd, F_WRLCK, 0, env->pid - 1) rc = (env->pid > 1) ? lck_op(env->lazy_fd, cmd, F_WRLCK, 0, env->pid - 1) : MDBX_SUCCESS;
: MDBX_SUCCESS;
if (rc == MDBX_SUCCESS) { if (rc == MDBX_SUCCESS) {
rc = lck_op(env->lazy_fd, cmd, F_WRLCK, env->pid + 1, rc = lck_op(env->lazy_fd, cmd, F_WRLCK, env->pid + 1, OFF_T_MAX - env->pid - 1);
OFF_T_MAX - env->pid - 1); if (rc != MDBX_SUCCESS && env->pid > 1 && lck_op(env->lazy_fd, op_setlk, F_UNLCK, 0, env->pid - 1))
if (rc != MDBX_SUCCESS && env->pid > 1 &&
lck_op(env->lazy_fd, op_setlk, F_UNLCK, 0, env->pid - 1))
rc = MDBX_PANIC; rc = MDBX_PANIC;
} }
if (rc != MDBX_SUCCESS && lck_op(env->lck_mmap.fd, op_setlk, F_RDLCK, 0, 1)) if (rc != MDBX_SUCCESS && lck_op(env->lck_mmap.fd, op_setlk, F_RDLCK, 0, 1))
@ -452,9 +426,7 @@ MDBX_INTERNAL int lck_upgrade(MDBX_env *env, bool dont_wait) {
return rc; return rc;
} }
__cold MDBX_INTERNAL int lck_destroy(MDBX_env *env, __cold MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor, const uint32_t current_pid) {
MDBX_env *inprocess_neighbor,
const uint32_t current_pid) {
eASSERT(env, osal_getpid() == current_pid); eASSERT(env, osal_getpid() == current_pid);
int rc = MDBX_SUCCESS; int rc = MDBX_SUCCESS;
struct stat lck_info; struct stat lck_info;
@ -464,9 +436,7 @@ __cold MDBX_INTERNAL int lck_destroy(MDBX_env *env,
lck_op(env->lck_mmap.fd, op_setlk, F_WRLCK, 0, OFF_T_MAX) == 0 && lck_op(env->lck_mmap.fd, op_setlk, F_WRLCK, 0, OFF_T_MAX) == 0 &&
/* if LCK was not removed */ /* if LCK was not removed */
fstat(env->lck_mmap.fd, &lck_info) == 0 && lck_info.st_nlink > 0 && fstat(env->lck_mmap.fd, &lck_info) == 0 && lck_info.st_nlink > 0 &&
lck_op(env->lazy_fd, op_setlk, lck_op(env->lazy_fd, op_setlk, (env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, 0, OFF_T_MAX) == 0) {
(env->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, 0,
OFF_T_MAX) == 0) {
VERBOSE("%p got exclusive, drown ipc-locks", (void *)env); VERBOSE("%p got exclusive, drown ipc-locks", (void *)env);
eASSERT(env, current_pid == env->pid); eASSERT(env, current_pid == env->pid);
@ -492,8 +462,7 @@ __cold MDBX_INTERNAL int lck_destroy(MDBX_env *env,
if (current_pid != env->pid) { if (current_pid != env->pid) {
eASSERT(env, !inprocess_neighbor); eASSERT(env, !inprocess_neighbor);
NOTICE("drown env %p after-fork pid %d -> %d", NOTICE("drown env %p after-fork pid %d -> %d", __Wpedantic_format_voidptr(env), env->pid, current_pid);
__Wpedantic_format_voidptr(env), env->pid, current_pid);
inprocess_neighbor = nullptr; inprocess_neighbor = nullptr;
} }
@ -516,11 +485,8 @@ __cold MDBX_INTERNAL int lck_destroy(MDBX_env *env,
env->lazy_fd = INVALID_HANDLE_VALUE; env->lazy_fd = INVALID_HANDLE_VALUE;
if (op_setlk == F_SETLK && inprocess_neighbor && rc == MDBX_SUCCESS) { if (op_setlk == F_SETLK && inprocess_neighbor && rc == MDBX_SUCCESS) {
/* restore file-lock */ /* restore file-lock */
rc = lck_op(inprocess_neighbor->lazy_fd, F_SETLKW, rc = lck_op(inprocess_neighbor->lazy_fd, F_SETLKW, (inprocess_neighbor->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK,
(inprocess_neighbor->flags & MDBX_RDONLY) ? F_RDLCK : F_WRLCK, (inprocess_neighbor->flags & MDBX_EXCLUSIVE) ? 0 : inprocess_neighbor->pid,
(inprocess_neighbor->flags & MDBX_EXCLUSIVE)
? 0
: inprocess_neighbor->pid,
(inprocess_neighbor->flags & MDBX_EXCLUSIVE) ? OFF_T_MAX : 1); (inprocess_neighbor->flags & MDBX_EXCLUSIVE) ? OFF_T_MAX : 1);
} }
} }
@ -545,8 +511,7 @@ __cold MDBX_INTERNAL int lck_destroy(MDBX_env *env,
/*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/
__cold MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor, __cold MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor, int global_uniqueness_flag) {
int global_uniqueness_flag) {
#if MDBX_LOCKING == MDBX_LOCKING_SYSV #if MDBX_LOCKING == MDBX_LOCKING_SYSV
int semid = -1; int semid = -1;
/* don't initialize semaphores twice */ /* don't initialize semaphores twice */
@ -556,9 +521,7 @@ __cold MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
if (fstat(env->lazy_fd, &st)) if (fstat(env->lazy_fd, &st))
return errno; return errno;
sysv_retry_create: sysv_retry_create:
semid = semget(env->me_sysv_ipc.key, 2, semid = semget(env->me_sysv_ipc.key, 2, IPC_CREAT | IPC_EXCL | (st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)));
IPC_CREAT | IPC_EXCL |
(st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)));
if (unlikely(semid == -1)) { if (unlikely(semid == -1)) {
int err = errno; int err = errno;
if (err != EEXIST) if (err != EEXIST)
@ -614,8 +577,7 @@ __cold MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
#elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || \ #elif MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008
MDBX_LOCKING == MDBX_LOCKING_POSIX2008
if (inprocess_neighbor) if (inprocess_neighbor)
return MDBX_SUCCESS /* don't need any initialization for mutexes return MDBX_SUCCESS /* don't need any initialization for mutexes
if LCK already opened/used inside current process */ if LCK already opened/used inside current process */
@ -653,8 +615,7 @@ __cold MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
#if MDBX_LOCKING == MDBX_LOCKING_POSIX2008 #if MDBX_LOCKING == MDBX_LOCKING_POSIX2008
#if defined(PTHREAD_MUTEX_ROBUST) || defined(pthread_mutexattr_setrobust) #if defined(PTHREAD_MUTEX_ROBUST) || defined(pthread_mutexattr_setrobust)
rc = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST); rc = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
#elif defined(PTHREAD_MUTEX_ROBUST_NP) || \ #elif defined(PTHREAD_MUTEX_ROBUST_NP) || defined(pthread_mutexattr_setrobust_np)
defined(pthread_mutexattr_setrobust_np)
rc = pthread_mutexattr_setrobust_np(&ma, PTHREAD_MUTEX_ROBUST_NP); rc = pthread_mutexattr_setrobust_np(&ma, PTHREAD_MUTEX_ROBUST_NP);
#elif _POSIX_THREAD_PROCESS_SHARED < 200809L #elif _POSIX_THREAD_PROCESS_SHARED < 200809L
rc = pthread_mutexattr_setrobust_np(&ma, PTHREAD_MUTEX_ROBUST_NP); rc = pthread_mutexattr_setrobust_np(&ma, PTHREAD_MUTEX_ROBUST_NP);
@ -665,8 +626,7 @@ __cold MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
goto bailout; goto bailout;
#endif /* MDBX_LOCKING == MDBX_LOCKING_POSIX2008 */ #endif /* MDBX_LOCKING == MDBX_LOCKING_POSIX2008 */
#if defined(_POSIX_THREAD_PRIO_INHERIT) && _POSIX_THREAD_PRIO_INHERIT >= 0 && \ #if defined(_POSIX_THREAD_PRIO_INHERIT) && _POSIX_THREAD_PRIO_INHERIT >= 0 && !defined(MDBX_SAFE4QEMU)
!defined(MDBX_SAFE4QEMU)
rc = pthread_mutexattr_setprotocol(&ma, PTHREAD_PRIO_INHERIT); rc = pthread_mutexattr_setprotocol(&ma, PTHREAD_PRIO_INHERIT);
if (rc == ENOTSUP) if (rc == ENOTSUP)
rc = pthread_mutexattr_setprotocol(&ma, PTHREAD_PRIO_NONE); rc = pthread_mutexattr_setprotocol(&ma, PTHREAD_PRIO_NONE);
@ -691,8 +651,7 @@ bailout:
#endif /* MDBX_LOCKING > 0 */ #endif /* MDBX_LOCKING > 0 */
} }
__cold static int osal_ipclock_failed(MDBX_env *env, osal_ipclock_t *ipc, __cold static int osal_ipclock_failed(MDBX_env *env, osal_ipclock_t *ipc, const int err) {
const int err) {
int rc = err; int rc = err;
#if MDBX_LOCKING == MDBX_LOCKING_POSIX2008 || MDBX_LOCKING == MDBX_LOCKING_SYSV #if MDBX_LOCKING == MDBX_LOCKING_POSIX2008 || MDBX_LOCKING == MDBX_LOCKING_SYSV
@ -712,8 +671,7 @@ __cold static int osal_ipclock_failed(MDBX_env *env, osal_ipclock_t *ipc,
rc = MDBX_PANIC; rc = MDBX_PANIC;
} }
} }
WARNING("%clock owner died, %s", (rlocked ? 'r' : 'w'), WARNING("%clock owner died, %s", (rlocked ? 'r' : 'w'), (rc ? "this process' env is hosed" : "recovering"));
(rc ? "this process' env is hosed" : "recovering"));
int check_rc = mvcc_cleanup_dead(env, rlocked, nullptr); int check_rc = mvcc_cleanup_dead(env, rlocked, nullptr);
check_rc = (check_rc == MDBX_SUCCESS) ? MDBX_RESULT_TRUE : check_rc; check_rc = (check_rc == MDBX_SUCCESS) ? MDBX_RESULT_TRUE : check_rc;
@ -781,10 +739,8 @@ MDBX_INTERNAL int osal_check_tid4bionic(void) {
} }
#endif /* __ANDROID_API__ || ANDROID) || BIONIC */ #endif /* __ANDROID_API__ || ANDROID) || BIONIC */
static int osal_ipclock_lock(MDBX_env *env, osal_ipclock_t *ipc, static int osal_ipclock_lock(MDBX_env *env, osal_ipclock_t *ipc, const bool dont_wait) {
const bool dont_wait) { #if MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008
#if MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || \
MDBX_LOCKING == MDBX_LOCKING_POSIX2008
int rc = osal_check_tid4bionic(); int rc = osal_check_tid4bionic();
if (likely(rc == 0)) if (likely(rc == 0))
rc = dont_wait ? pthread_mutex_trylock(ipc) : pthread_mutex_lock(ipc); rc = dont_wait ? pthread_mutex_trylock(ipc) : pthread_mutex_lock(ipc);
@ -800,9 +756,8 @@ static int osal_ipclock_lock(MDBX_env *env, osal_ipclock_t *ipc,
} else if (sem_wait(ipc)) } else if (sem_wait(ipc))
rc = errno; rc = errno;
#elif MDBX_LOCKING == MDBX_LOCKING_SYSV #elif MDBX_LOCKING == MDBX_LOCKING_SYSV
struct sembuf op = {.sem_num = (ipc != &env->lck->wrt_lock), struct sembuf op = {
.sem_op = -1, .sem_num = (ipc != &env->lck->wrt_lock), .sem_op = -1, .sem_flg = dont_wait ? IPC_NOWAIT | SEM_UNDO : SEM_UNDO};
.sem_flg = dont_wait ? IPC_NOWAIT | SEM_UNDO : SEM_UNDO};
int rc; int rc;
if (semop(env->me_sysv_ipc.semid, &op, 1)) { if (semop(env->me_sysv_ipc.semid, &op, 1)) {
rc = errno; rc = errno;
@ -823,8 +778,7 @@ static int osal_ipclock_lock(MDBX_env *env, osal_ipclock_t *ipc,
int osal_ipclock_unlock(MDBX_env *env, osal_ipclock_t *ipc) { int osal_ipclock_unlock(MDBX_env *env, osal_ipclock_t *ipc) {
int err = MDBX_ENOSYS; int err = MDBX_ENOSYS;
#if MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || \ #if MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008
MDBX_LOCKING == MDBX_LOCKING_POSIX2008
err = pthread_mutex_unlock(ipc); err = pthread_mutex_unlock(ipc);
#elif MDBX_LOCKING == MDBX_LOCKING_POSIX1988 #elif MDBX_LOCKING == MDBX_LOCKING_POSIX1988
err = sem_post(ipc) ? errno : MDBX_SUCCESS; err = sem_post(ipc) ? errno : MDBX_SUCCESS;
@ -833,9 +787,7 @@ int osal_ipclock_unlock(MDBX_env *env, osal_ipclock_t *ipc) {
err = EPERM; err = EPERM;
else { else {
*ipc = 0; *ipc = 0;
struct sembuf op = {.sem_num = (ipc != &env->lck->wrt_lock), struct sembuf op = {.sem_num = (ipc != &env->lck->wrt_lock), .sem_op = 1, .sem_flg = SEM_UNDO};
.sem_op = 1,
.sem_flg = SEM_UNDO};
err = semop(env->me_sysv_ipc.semid, &op, 1) ? errno : MDBX_SUCCESS; err = semop(env->me_sysv_ipc.semid, &op, 1) ? errno : MDBX_SUCCESS;
} }
#else #else
@ -845,13 +797,9 @@ int osal_ipclock_unlock(MDBX_env *env, osal_ipclock_t *ipc) {
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
const uint32_t current_pid = osal_getpid(); const uint32_t current_pid = osal_getpid();
if (current_pid == env->pid || LOG_ENABLED(MDBX_LOG_NOTICE)) if (current_pid == env->pid || LOG_ENABLED(MDBX_LOG_NOTICE))
debug_log((current_pid == env->pid) debug_log((current_pid == env->pid) ? MDBX_LOG_FATAL : (rc = MDBX_SUCCESS, MDBX_LOG_NOTICE), "ipc-unlock()",
? MDBX_LOG_FATAL __LINE__, "failed: env %p, lck-%s %p, err %d\n", __Wpedantic_format_voidptr(env),
: (rc = MDBX_SUCCESS, MDBX_LOG_NOTICE), (env->lck == env->lck_mmap.lck) ? "mmap" : "stub", __Wpedantic_format_voidptr(env->lck), err);
"ipc-unlock()", __LINE__, "failed: env %p, lck-%s %p, err %d\n",
__Wpedantic_format_voidptr(env),
(env->lck == env->lck_mmap.lck) ? "mmap" : "stub",
__Wpedantic_format_voidptr(env->lck), err);
} }
return rc; return rc;
} }
@ -879,10 +827,9 @@ int lck_txn_lock(MDBX_env *env, bool dont_wait) {
const int err = osal_ipclock_lock(env, &env->lck->wrt_lock, dont_wait); const int err = osal_ipclock_lock(env, &env->lck->wrt_lock, dont_wait);
int rc = err; int rc = err;
if (likely(!MDBX_IS_ERROR(err))) { if (likely(!MDBX_IS_ERROR(err))) {
eASSERT(env, !env->basal_txn->owner || eASSERT(env, !env->basal_txn->owner || err == /* если другой поток в этом-же процессе завершился
err == /* если другой поток в этом-же процессе завершился не освободив блокировку */
не освободив блокировку */ MDBX_RESULT_TRUE);
MDBX_RESULT_TRUE);
env->basal_txn->owner = osal_thread_self(); env->basal_txn->owner = osal_thread_self();
rc = MDBX_SUCCESS; rc = MDBX_SUCCESS;
} }

View File

@ -16,10 +16,8 @@
#define LCK_WAITFOR 0 #define LCK_WAITFOR 0
#define LCK_DONTWAIT LOCKFILE_FAIL_IMMEDIATELY #define LCK_DONTWAIT LOCKFILE_FAIL_IMMEDIATELY
static int flock_with_event(HANDLE fd, HANDLE event, unsigned flags, static int flock_with_event(HANDLE fd, HANDLE event, unsigned flags, size_t offset, size_t bytes) {
size_t offset, size_t bytes) { TRACE("lock>>: fd %p, event %p, flags 0x%x offset %zu, bytes %zu >>", fd, event, flags, offset, bytes);
TRACE("lock>>: fd %p, event %p, flags 0x%x offset %zu, bytes %zu >>", fd,
event, flags, offset, bytes);
OVERLAPPED ov; OVERLAPPED ov;
ov.Internal = 0; ov.Internal = 0;
ov.InternalHigh = 0; ov.InternalHigh = 0;
@ -27,8 +25,7 @@ static int flock_with_event(HANDLE fd, HANDLE event, unsigned flags,
ov.Offset = (DWORD)offset; ov.Offset = (DWORD)offset;
ov.OffsetHigh = HIGH_DWORD(offset); ov.OffsetHigh = HIGH_DWORD(offset);
if (LockFileEx(fd, flags, 0, (DWORD)bytes, HIGH_DWORD(bytes), &ov)) { if (LockFileEx(fd, flags, 0, (DWORD)bytes, HIGH_DWORD(bytes), &ov)) {
TRACE("lock<<: fd %p, event %p, flags 0x%x offset %zu, bytes %zu << %s", fd, TRACE("lock<<: fd %p, event %p, flags 0x%x offset %zu, bytes %zu << %s", fd, event, flags, offset, bytes, "done");
event, flags, offset, bytes, "done");
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -36,37 +33,32 @@ static int flock_with_event(HANDLE fd, HANDLE event, unsigned flags,
if (rc == ERROR_IO_PENDING) { if (rc == ERROR_IO_PENDING) {
if (event) { if (event) {
if (GetOverlappedResult(fd, &ov, &rc, true)) { if (GetOverlappedResult(fd, &ov, &rc, true)) {
TRACE("lock<<: fd %p, event %p, flags 0x%x offset %zu, bytes %zu << %s", TRACE("lock<<: fd %p, event %p, flags 0x%x offset %zu, bytes %zu << %s", fd, event, flags, offset, bytes,
fd, event, flags, offset, bytes, "overlapped-done"); "overlapped-done");
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
rc = GetLastError(); rc = GetLastError();
} else } else
CancelIo(fd); CancelIo(fd);
} }
TRACE("lock<<: fd %p, event %p, flags 0x%x offset %zu, bytes %zu << err %d", TRACE("lock<<: fd %p, event %p, flags 0x%x offset %zu, bytes %zu << err %d", fd, event, flags, offset, bytes,
fd, event, flags, offset, bytes, (int)rc); (int)rc);
return (int)rc; return (int)rc;
} }
static inline int flock(HANDLE fd, unsigned flags, size_t offset, static inline int flock(HANDLE fd, unsigned flags, size_t offset, size_t bytes) {
size_t bytes) {
return flock_with_event(fd, 0, flags, offset, bytes); return flock_with_event(fd, 0, flags, offset, bytes);
} }
static inline int flock_data(const MDBX_env *env, unsigned flags, size_t offset, static inline int flock_data(const MDBX_env *env, unsigned flags, size_t offset, size_t bytes) {
size_t bytes) { const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
const HANDLE fd4data =
env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
return flock_with_event(fd4data, env->dxb_lock_event, flags, offset, bytes); return flock_with_event(fd4data, env->dxb_lock_event, flags, offset, bytes);
} }
static int funlock(mdbx_filehandle_t fd, size_t offset, size_t bytes) { static int funlock(mdbx_filehandle_t fd, size_t offset, size_t bytes) {
TRACE("unlock: fd %p, offset %zu, bytes %zu", fd, offset, bytes); TRACE("unlock: fd %p, offset %zu, bytes %zu", fd, offset, bytes);
return UnlockFile(fd, (DWORD)offset, HIGH_DWORD(offset), (DWORD)bytes, return UnlockFile(fd, (DWORD)offset, HIGH_DWORD(offset), (DWORD)bytes, HIGH_DWORD(bytes)) ? MDBX_SUCCESS
HIGH_DWORD(bytes)) : (int)GetLastError();
? MDBX_SUCCESS
: (int)GetLastError();
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
@ -88,9 +80,7 @@ int lck_txn_lock(MDBX_env *env, bool dontwait) {
} else { } else {
__try { __try {
EnterCriticalSection(&env->windowsbug_lock); EnterCriticalSection(&env->windowsbug_lock);
} } __except ((GetExceptionCode() == 0xC0000194 /* STATUS_POSSIBLE_DEADLOCK / EXCEPTION_POSSIBLE_DEADLOCK */)
__except ((GetExceptionCode() ==
0xC0000194 /* STATUS_POSSIBLE_DEADLOCK / EXCEPTION_POSSIBLE_DEADLOCK */)
? EXCEPTION_EXECUTE_HANDLER ? EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH) { : EXCEPTION_CONTINUE_SEARCH) {
return MDBX_EDEADLK; return MDBX_EDEADLK;
@ -101,20 +91,15 @@ int lck_txn_lock(MDBX_env *env, bool dontwait) {
if (env->flags & MDBX_EXCLUSIVE) if (env->flags & MDBX_EXCLUSIVE)
goto done; goto done;
const HANDLE fd4data = const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
int rc = flock_with_event(fd4data, env->dxb_lock_event, int rc = flock_with_event(fd4data, env->dxb_lock_event,
dontwait ? (LCK_EXCLUSIVE | LCK_DONTWAIT) dontwait ? (LCK_EXCLUSIVE | LCK_DONTWAIT) : (LCK_EXCLUSIVE | LCK_WAITFOR), DXB_BODY);
: (LCK_EXCLUSIVE | LCK_WAITFOR),
DXB_BODY);
if (rc == ERROR_LOCK_VIOLATION && dontwait) { if (rc == ERROR_LOCK_VIOLATION && dontwait) {
SleepEx(0, true); SleepEx(0, true);
rc = flock_with_event(fd4data, env->dxb_lock_event, rc = flock_with_event(fd4data, env->dxb_lock_event, LCK_EXCLUSIVE | LCK_DONTWAIT, DXB_BODY);
LCK_EXCLUSIVE | LCK_DONTWAIT, DXB_BODY);
if (rc == ERROR_LOCK_VIOLATION) { if (rc == ERROR_LOCK_VIOLATION) {
SleepEx(0, true); SleepEx(0, true);
rc = flock_with_event(fd4data, env->dxb_lock_event, rc = flock_with_event(fd4data, env->dxb_lock_event, LCK_EXCLUSIVE | LCK_DONTWAIT, DXB_BODY);
LCK_EXCLUSIVE | LCK_DONTWAIT, DXB_BODY);
} }
} }
if (rc == MDBX_SUCCESS) { if (rc == MDBX_SUCCESS) {
@ -133,8 +118,7 @@ int lck_txn_lock(MDBX_env *env, bool dontwait) {
void lck_txn_unlock(MDBX_env *env) { void lck_txn_unlock(MDBX_env *env) {
eASSERT(env, env->basal_txn->owner == osal_thread_self()); eASSERT(env, env->basal_txn->owner == osal_thread_self());
if ((env->flags & MDBX_EXCLUSIVE) == 0) { if ((env->flags & MDBX_EXCLUSIVE) == 0) {
const HANDLE fd4data = const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
int err = funlock(fd4data, DXB_BODY); int err = funlock(fd4data, DXB_BODY);
if (err != MDBX_SUCCESS) if (err != MDBX_SUCCESS)
mdbx_panic("%s failed: err %u", __func__, err); mdbx_panic("%s failed: err %u", __func__, err);
@ -173,8 +157,7 @@ MDBX_INTERNAL int lck_rdt_lock(MDBX_env *env) {
} }
MDBX_INTERNAL void lck_rdt_unlock(MDBX_env *env) { MDBX_INTERNAL void lck_rdt_unlock(MDBX_env *env) {
if (env->lck_mmap.fd != INVALID_HANDLE_VALUE && if (env->lck_mmap.fd != INVALID_HANDLE_VALUE && (env->flags & MDBX_EXCLUSIVE) == 0) {
(env->flags & MDBX_EXCLUSIVE) == 0) {
/* transition from S-E (locked) to S-? (used), e.g. unlock upper-part */ /* transition from S-E (locked) to S-? (used), e.g. unlock upper-part */
int err = funlock(env->lck_mmap.fd, LCK_UPPER); int err = funlock(env->lck_mmap.fd, LCK_UPPER);
if (err != MDBX_SUCCESS) if (err != MDBX_SUCCESS)
@ -184,22 +167,15 @@ MDBX_INTERNAL void lck_rdt_unlock(MDBX_env *env) {
} }
MDBX_INTERNAL int osal_lockfile(mdbx_filehandle_t fd, bool wait) { MDBX_INTERNAL int osal_lockfile(mdbx_filehandle_t fd, bool wait) {
return flock( return flock(fd, wait ? LCK_EXCLUSIVE | LCK_WAITFOR : LCK_EXCLUSIVE | LCK_DONTWAIT, 0, DXB_MAXLEN);
fd, wait ? LCK_EXCLUSIVE | LCK_WAITFOR : LCK_EXCLUSIVE | LCK_DONTWAIT, 0,
DXB_MAXLEN);
} }
static int suspend_and_append(mdbx_handle_array_t **array, static int suspend_and_append(mdbx_handle_array_t **array, const DWORD ThreadId) {
const DWORD ThreadId) {
const unsigned limit = (*array)->limit; const unsigned limit = (*array)->limit;
if ((*array)->count == limit) { if ((*array)->count == limit) {
mdbx_handle_array_t *const ptr = mdbx_handle_array_t *const ptr = osal_realloc(
osal_realloc((limit > ARRAY_LENGTH((*array)->handles)) (limit > ARRAY_LENGTH((*array)->handles)) ? *array : /* don't free initial array on the stack */ nullptr,
? *array sizeof(mdbx_handle_array_t) + sizeof(HANDLE) * (limit * (size_t)2 - ARRAY_LENGTH((*array)->handles)));
: /* don't free initial array on the stack */ nullptr,
sizeof(mdbx_handle_array_t) +
sizeof(HANDLE) * (limit * (size_t)2 -
ARRAY_LENGTH((*array)->handles)));
if (!ptr) if (!ptr)
return MDBX_ENOMEM; return MDBX_ENOMEM;
if (limit == ARRAY_LENGTH((*array)->handles)) if (limit == ARRAY_LENGTH((*array)->handles))
@ -208,16 +184,15 @@ static int suspend_and_append(mdbx_handle_array_t **array,
(*array)->limit = limit * 2; (*array)->limit = limit * 2;
} }
HANDLE hThread = OpenThread(THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION, HANDLE hThread = OpenThread(THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION, FALSE, ThreadId);
FALSE, ThreadId);
if (hThread == nullptr) if (hThread == nullptr)
return (int)GetLastError(); return (int)GetLastError();
if (SuspendThread(hThread) == (DWORD)-1) { if (SuspendThread(hThread) == (DWORD)-1) {
int err = (int)GetLastError(); int err = (int)GetLastError();
DWORD ExitCode; DWORD ExitCode;
if (err == /* workaround for Win10 UCRT bug */ ERROR_ACCESS_DENIED || if (err == /* workaround for Win10 UCRT bug */ ERROR_ACCESS_DENIED || !GetExitCodeThread(hThread, &ExitCode) ||
!GetExitCodeThread(hThread, &ExitCode) || ExitCode != STILL_ACTIVE) ExitCode != STILL_ACTIVE)
err = MDBX_SUCCESS; err = MDBX_SUCCESS;
CloseHandle(hThread); CloseHandle(hThread);
return err; return err;
@ -227,21 +202,17 @@ static int suspend_and_append(mdbx_handle_array_t **array,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
MDBX_INTERNAL int MDBX_INTERNAL int osal_suspend_threads_before_remap(MDBX_env *env, mdbx_handle_array_t **array) {
osal_suspend_threads_before_remap(MDBX_env *env, mdbx_handle_array_t **array) {
eASSERT(env, (env->flags & MDBX_NOSTICKYTHREADS) == 0); eASSERT(env, (env->flags & MDBX_NOSTICKYTHREADS) == 0);
const uintptr_t CurrentTid = GetCurrentThreadId(); const uintptr_t CurrentTid = GetCurrentThreadId();
int rc; int rc;
if (env->lck_mmap.lck) { if (env->lck_mmap.lck) {
/* Scan LCK for threads of the current process */ /* Scan LCK for threads of the current process */
const reader_slot_t *const begin = env->lck_mmap.lck->rdt; const reader_slot_t *const begin = env->lck_mmap.lck->rdt;
const reader_slot_t *const end = const reader_slot_t *const end = begin + atomic_load32(&env->lck_mmap.lck->rdt_length, mo_AcquireRelease);
begin +
atomic_load32(&env->lck_mmap.lck->rdt_length, mo_AcquireRelease);
const uintptr_t WriteTxnOwner = env->basal_txn ? env->basal_txn->owner : 0; const uintptr_t WriteTxnOwner = env->basal_txn ? env->basal_txn->owner : 0;
for (const reader_slot_t *reader = begin; reader < end; ++reader) { for (const reader_slot_t *reader = begin; reader < end; ++reader) {
if (reader->pid.weak != env->pid || !reader->tid.weak || if (reader->pid.weak != env->pid || !reader->tid.weak || reader->tid.weak >= MDBX_TID_TXN_OUSTED) {
reader->tid.weak >= MDBX_TID_TXN_OUSTED) {
skip_lck: skip_lck:
continue; continue;
} }
@ -280,8 +251,7 @@ osal_suspend_threads_before_remap(MDBX_env *env, mdbx_handle_array_t **array) {
} }
do { do {
if (entry.th32OwnerProcessID != env->pid || if (entry.th32OwnerProcessID != env->pid || entry.th32ThreadID == CurrentTid)
entry.th32ThreadID == CurrentTid)
continue; continue;
rc = suspend_and_append(array, entry.th32ThreadID); rc = suspend_and_append(array, entry.th32ThreadID);
@ -306,8 +276,8 @@ MDBX_INTERNAL int osal_resume_threads_after_remap(mdbx_handle_array_t *array) {
if (ResumeThread(hThread) == (DWORD)-1) { if (ResumeThread(hThread) == (DWORD)-1) {
const int err = (int)GetLastError(); const int err = (int)GetLastError();
DWORD ExitCode; DWORD ExitCode;
if (err != /* workaround for Win10 UCRT bug */ ERROR_ACCESS_DENIED && if (err != /* workaround for Win10 UCRT bug */ ERROR_ACCESS_DENIED && GetExitCodeThread(hThread, &ExitCode) &&
GetExitCodeThread(hThread, &ExitCode) && ExitCode == STILL_ACTIVE) ExitCode == STILL_ACTIVE)
rc = err; rc = err;
} }
CloseHandle(hThread); CloseHandle(hThread);
@ -358,35 +328,30 @@ static void lck_unlock(MDBX_env *env) {
do do
err = funlock(env->lck_mmap.fd, LCK_LOWER); err = funlock(env->lck_mmap.fd, LCK_LOWER);
while (err == MDBX_SUCCESS); while (err == MDBX_SUCCESS);
assert(err == ERROR_NOT_LOCKED || assert(err == ERROR_NOT_LOCKED || (globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
(globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
SetLastError(ERROR_SUCCESS); SetLastError(ERROR_SUCCESS);
do do
err = funlock(env->lck_mmap.fd, LCK_UPPER); err = funlock(env->lck_mmap.fd, LCK_UPPER);
while (err == MDBX_SUCCESS); while (err == MDBX_SUCCESS);
assert(err == ERROR_NOT_LOCKED || assert(err == ERROR_NOT_LOCKED || (globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
(globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
SetLastError(ERROR_SUCCESS); SetLastError(ERROR_SUCCESS);
} }
const HANDLE fd4data = const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
if (fd4data != INVALID_HANDLE_VALUE) { if (fd4data != INVALID_HANDLE_VALUE) {
/* explicitly unlock to avoid latency for other processes (windows kernel /* explicitly unlock to avoid latency for other processes (windows kernel
* releases such locks via deferred queues) */ * releases such locks via deferred queues) */
do do
err = funlock(fd4data, DXB_BODY); err = funlock(fd4data, DXB_BODY);
while (err == MDBX_SUCCESS); while (err == MDBX_SUCCESS);
assert(err == ERROR_NOT_LOCKED || assert(err == ERROR_NOT_LOCKED || (globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
(globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
SetLastError(ERROR_SUCCESS); SetLastError(ERROR_SUCCESS);
do do
err = funlock(fd4data, DXB_WHOLE); err = funlock(fd4data, DXB_WHOLE);
while (err == MDBX_SUCCESS); while (err == MDBX_SUCCESS);
assert(err == ERROR_NOT_LOCKED || assert(err == ERROR_NOT_LOCKED || (globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
(globals.running_under_Wine && err == ERROR_LOCK_VIOLATION));
SetLastError(ERROR_SUCCESS); SetLastError(ERROR_SUCCESS);
} }
} }
@ -418,8 +383,7 @@ static int internal_seize_lck(HANDLE lfd) {
/* 6) something went wrong, give up */ /* 6) something went wrong, give up */
rc = funlock(lfd, LCK_UPPER); rc = funlock(lfd, LCK_UPPER);
if (rc != MDBX_SUCCESS) if (rc != MDBX_SUCCESS)
mdbx_panic("%s(%s) failed: err %u", __func__, "?-E(middle) >> ?-?(free)", mdbx_panic("%s(%s) failed: err %u", __func__, "?-E(middle) >> ?-?(free)", rc);
rc);
return rc; return rc;
} }
@ -435,16 +399,14 @@ static int internal_seize_lck(HANDLE lfd) {
* transition to S-? (used) or ?-? (free) */ * transition to S-? (used) or ?-? (free) */
int err = funlock(lfd, LCK_UPPER); int err = funlock(lfd, LCK_UPPER);
if (err != MDBX_SUCCESS) if (err != MDBX_SUCCESS)
mdbx_panic("%s(%s) failed: err %u", __func__, mdbx_panic("%s(%s) failed: err %u", __func__, "X-E(locked/middle) >> X-?(used/free)", err);
"X-E(locked/middle) >> X-?(used/free)", err);
/* 9) now on S-? (used, DONE) or ?-? (free, FAILURE) */ /* 9) now on S-? (used, DONE) or ?-? (free, FAILURE) */
return rc; return rc;
} }
MDBX_INTERNAL int lck_seize(MDBX_env *env) { MDBX_INTERNAL int lck_seize(MDBX_env *env) {
const HANDLE fd4data = const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
assert(fd4data != INVALID_HANDLE_VALUE); assert(fd4data != INVALID_HANDLE_VALUE);
if (env->flags & MDBX_EXCLUSIVE) if (env->flags & MDBX_EXCLUSIVE)
return MDBX_RESULT_TRUE /* nope since files were must be opened return MDBX_RESULT_TRUE /* nope since files were must be opened
@ -479,16 +441,14 @@ MDBX_INTERNAL int lck_seize(MDBX_env *env) {
jitter4testing(false); jitter4testing(false);
err = funlock(fd4data, DXB_WHOLE); err = funlock(fd4data, DXB_WHOLE);
if (err != MDBX_SUCCESS) if (err != MDBX_SUCCESS)
mdbx_panic("%s(%s) failed: err %u", __func__, mdbx_panic("%s(%s) failed: err %u", __func__, "unlock-against-without-lck", err);
"unlock-against-without-lck", err);
} }
return rc; return rc;
} }
MDBX_INTERNAL int lck_downgrade(MDBX_env *env) { MDBX_INTERNAL int lck_downgrade(MDBX_env *env) {
const HANDLE fd4data = const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd;
/* Transite from exclusive-write state (E-E) to used (S-?) */ /* Transite from exclusive-write state (E-E) to used (S-?) */
assert(fd4data != INVALID_HANDLE_VALUE); assert(fd4data != INVALID_HANDLE_VALUE);
assert(env->lck_mmap.fd != INVALID_HANDLE_VALUE); assert(env->lck_mmap.fd != INVALID_HANDLE_VALUE);
@ -499,8 +459,7 @@ MDBX_INTERNAL int lck_downgrade(MDBX_env *env) {
/* 1) now at E-E (exclusive-write), transition to ?_E (middle) */ /* 1) now at E-E (exclusive-write), transition to ?_E (middle) */
int rc = funlock(env->lck_mmap.fd, LCK_LOWER); int rc = funlock(env->lck_mmap.fd, LCK_LOWER);
if (rc != MDBX_SUCCESS) if (rc != MDBX_SUCCESS)
mdbx_panic("%s(%s) failed: err %u", __func__, mdbx_panic("%s(%s) failed: err %u", __func__, "E-E(exclusive-write) >> ?-E(middle)", rc);
"E-E(exclusive-write) >> ?-E(middle)", rc);
/* 2) now at ?-E (middle), transition to S-E (locked) */ /* 2) now at ?-E (middle), transition to S-E (locked) */
rc = flock(env->lck_mmap.fd, LCK_SHARED | LCK_DONTWAIT, LCK_LOWER); rc = flock(env->lck_mmap.fd, LCK_SHARED | LCK_DONTWAIT, LCK_LOWER);
@ -513,8 +472,7 @@ MDBX_INTERNAL int lck_downgrade(MDBX_env *env) {
/* 4) got S-E (locked), continue transition to S-? (used) */ /* 4) got S-E (locked), continue transition to S-? (used) */
rc = funlock(env->lck_mmap.fd, LCK_UPPER); rc = funlock(env->lck_mmap.fd, LCK_UPPER);
if (rc != MDBX_SUCCESS) if (rc != MDBX_SUCCESS)
mdbx_panic("%s(%s) failed: err %u", __func__, "S-E(locked) >> S-?(used)", mdbx_panic("%s(%s) failed: err %u", __func__, "S-E(locked) >> S-?(used)", rc);
rc);
return MDBX_SUCCESS /* 5) now at S-? (used), done */; return MDBX_SUCCESS /* 5) now at S-? (used), done */;
} }
@ -529,9 +487,7 @@ MDBX_INTERNAL int lck_upgrade(MDBX_env *env, bool dont_wait) {
/* 1) now on S-? (used), try S-E (locked) */ /* 1) now on S-? (used), try S-E (locked) */
jitter4testing(false); jitter4testing(false);
int rc = flock(env->lck_mmap.fd, int rc = flock(env->lck_mmap.fd, dont_wait ? LCK_EXCLUSIVE | LCK_DONTWAIT : LCK_EXCLUSIVE, LCK_UPPER);
dont_wait ? LCK_EXCLUSIVE | LCK_DONTWAIT : LCK_EXCLUSIVE,
LCK_UPPER);
if (rc != MDBX_SUCCESS) { if (rc != MDBX_SUCCESS) {
/* 2) something went wrong, give up */; /* 2) something went wrong, give up */;
VERBOSE("%s, err %u", "S-?(used) >> S-E(locked)", rc); VERBOSE("%s, err %u", "S-?(used) >> S-E(locked)", rc);
@ -541,14 +497,11 @@ MDBX_INTERNAL int lck_upgrade(MDBX_env *env, bool dont_wait) {
/* 3) now on S-E (locked), transition to ?-E (middle) */ /* 3) now on S-E (locked), transition to ?-E (middle) */
rc = funlock(env->lck_mmap.fd, LCK_LOWER); rc = funlock(env->lck_mmap.fd, LCK_LOWER);
if (rc != MDBX_SUCCESS) if (rc != MDBX_SUCCESS)
mdbx_panic("%s(%s) failed: err %u", __func__, "S-E(locked) >> ?-E(middle)", mdbx_panic("%s(%s) failed: err %u", __func__, "S-E(locked) >> ?-E(middle)", rc);
rc);
/* 4) now on ?-E (middle), try E-E (exclusive-write) */ /* 4) now on ?-E (middle), try E-E (exclusive-write) */
jitter4testing(false); jitter4testing(false);
rc = flock(env->lck_mmap.fd, rc = flock(env->lck_mmap.fd, dont_wait ? LCK_EXCLUSIVE | LCK_DONTWAIT : LCK_EXCLUSIVE, LCK_LOWER);
dont_wait ? LCK_EXCLUSIVE | LCK_DONTWAIT : LCK_EXCLUSIVE,
LCK_LOWER);
if (rc != MDBX_SUCCESS) { if (rc != MDBX_SUCCESS) {
/* 5) something went wrong, give up */; /* 5) something went wrong, give up */;
VERBOSE("%s, err %u", "?-E(middle) >> E-E(exclusive-write)", rc); VERBOSE("%s, err %u", "?-E(middle) >> E-E(exclusive-write)", rc);
@ -558,8 +511,7 @@ MDBX_INTERNAL int lck_upgrade(MDBX_env *env, bool dont_wait) {
return MDBX_SUCCESS /* 6) now at E-E (exclusive-write), done */; return MDBX_SUCCESS /* 6) now at E-E (exclusive-write), done */;
} }
MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor, MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor, int global_uniqueness_flag) {
int global_uniqueness_flag) {
(void)env; (void)env;
(void)inprocess_neighbor; (void)inprocess_neighbor;
(void)global_uniqueness_flag; (void)global_uniqueness_flag;
@ -568,12 +520,9 @@ MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
TOKEN_PRIVILEGES privileges; TOKEN_PRIVILEGES privileges;
privileges.PrivilegeCount = 1; privileges.PrivilegeCount = 1;
privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token) ||
&token) || !LookupPrivilegeValue(nullptr, SE_LOCK_MEMORY_NAME, &privileges.Privileges[0].Luid) ||
!LookupPrivilegeValue(nullptr, SE_LOCK_MEMORY_NAME, !AdjustTokenPrivileges(token, FALSE, &privileges, sizeof(privileges), nullptr, nullptr) ||
&privileges.Privileges[0].Luid) ||
!AdjustTokenPrivileges(token, FALSE, &privileges, sizeof(privileges),
nullptr, nullptr) ||
GetLastError() != ERROR_SUCCESS) GetLastError() != ERROR_SUCCESS)
imports.SetFileIoOverlappedRange = nullptr; imports.SetFileIoOverlappedRange = nullptr;
@ -583,8 +532,7 @@ MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor, MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor, const uint32_t current_pid) {
const uint32_t current_pid) {
(void)current_pid; (void)current_pid;
/* LY: should unmap before releasing the locks to avoid race condition and /* LY: should unmap before releasing the locks to avoid race condition and
* STATUS_USER_MAPPED_FILE/ERROR_USER_MAPPED_FILE */ * STATUS_USER_MAPPED_FILE/ERROR_USER_MAPPED_FILE */
@ -593,8 +541,7 @@ MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor,
if (env->lck_mmap.lck) { if (env->lck_mmap.lck) {
const bool synced = env->lck_mmap.lck->unsynced_pages.weak == 0; const bool synced = env->lck_mmap.lck->unsynced_pages.weak == 0;
osal_munmap(&env->lck_mmap); osal_munmap(&env->lck_mmap);
if (synced && !inprocess_neighbor && if (synced && !inprocess_neighbor && env->lck_mmap.fd != INVALID_HANDLE_VALUE &&
env->lck_mmap.fd != INVALID_HANDLE_VALUE &&
lck_upgrade(env, true) == MDBX_SUCCESS) lck_upgrade(env, true) == MDBX_SUCCESS)
/* this will fail if LCK is used/mmapped by other process(es) */ /* this will fail if LCK is used/mmapped by other process(es) */
osal_ftruncate(env->lck_mmap.fd, 0); osal_ftruncate(env->lck_mmap.fd, 0);

View File

@ -15,14 +15,12 @@ __cold static int lck_setup_locked(MDBX_env *env) {
if (env->lck_mmap.fd == INVALID_HANDLE_VALUE) { if (env->lck_mmap.fd == INVALID_HANDLE_VALUE) {
env->lck = lckless_stub(env); env->lck = lckless_stub(env);
env->max_readers = UINT_MAX; env->max_readers = UINT_MAX;
DEBUG("lck-setup:%s%s%s", " lck-less", DEBUG("lck-setup:%s%s%s", " lck-less", (env->flags & MDBX_RDONLY) ? " readonly" : "",
(env->flags & MDBX_RDONLY) ? " readonly" : "",
(lck_seize_rc == MDBX_RESULT_TRUE) ? " exclusive" : " cooperative"); (lck_seize_rc == MDBX_RESULT_TRUE) ? " exclusive" : " cooperative");
return lck_seize_rc; return lck_seize_rc;
} }
DEBUG("lck-setup:%s%s%s", " with-lck", DEBUG("lck-setup:%s%s%s", " with-lck", (env->flags & MDBX_RDONLY) ? " readonly" : "",
(env->flags & MDBX_RDONLY) ? " readonly" : "",
(lck_seize_rc == MDBX_RESULT_TRUE) ? " exclusive" : " cooperative"); (lck_seize_rc == MDBX_RESULT_TRUE) ? " exclusive" : " cooperative");
MDBX_env *inprocess_neighbor = nullptr; MDBX_env *inprocess_neighbor = nullptr;
@ -30,8 +28,7 @@ __cold static int lck_setup_locked(MDBX_env *env) {
if (unlikely(MDBX_IS_ERROR(err))) if (unlikely(MDBX_IS_ERROR(err)))
return err; return err;
if (inprocess_neighbor) { if (inprocess_neighbor) {
if ((globals.runtime_flags & MDBX_DBG_LEGACY_MULTIOPEN) == 0 || if ((globals.runtime_flags & MDBX_DBG_LEGACY_MULTIOPEN) == 0 || (inprocess_neighbor->flags & MDBX_EXCLUSIVE) != 0)
(inprocess_neighbor->flags & MDBX_EXCLUSIVE) != 0)
return MDBX_BUSY; return MDBX_BUSY;
if (lck_seize_rc == MDBX_RESULT_TRUE) { if (lck_seize_rc == MDBX_RESULT_TRUE) {
err = lck_downgrade(env); err = lck_downgrade(env);
@ -47,52 +44,41 @@ __cold static int lck_setup_locked(MDBX_env *env) {
return err; return err;
if (lck_seize_rc == MDBX_RESULT_TRUE) { if (lck_seize_rc == MDBX_RESULT_TRUE) {
size = size = ceil_powerof2(env->max_readers * sizeof(reader_slot_t) + sizeof(lck_t), globals.sys_pagesize);
ceil_powerof2(env->max_readers * sizeof(reader_slot_t) + sizeof(lck_t),
globals.sys_pagesize);
jitter4testing(false); jitter4testing(false);
} else { } else {
if (env->flags & MDBX_EXCLUSIVE) if (env->flags & MDBX_EXCLUSIVE)
return MDBX_BUSY; return MDBX_BUSY;
if (size > INT_MAX || (size & (globals.sys_pagesize - 1)) != 0 || if (size > INT_MAX || (size & (globals.sys_pagesize - 1)) != 0 || size < globals.sys_pagesize) {
size < globals.sys_pagesize) {
ERROR("lck-file has invalid size %" PRIu64 " bytes", size); ERROR("lck-file has invalid size %" PRIu64 " bytes", size);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
} }
const size_t maxreaders = const size_t maxreaders = ((size_t)size - sizeof(lck_t)) / sizeof(reader_slot_t);
((size_t)size - sizeof(lck_t)) / sizeof(reader_slot_t);
if (maxreaders < 4) { if (maxreaders < 4) {
ERROR("lck-size too small (up to %" PRIuPTR " readers)", maxreaders); ERROR("lck-size too small (up to %" PRIuPTR " readers)", maxreaders);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
env->max_readers = (maxreaders <= MDBX_READERS_LIMIT) env->max_readers = (maxreaders <= MDBX_READERS_LIMIT) ? (unsigned)maxreaders : (unsigned)MDBX_READERS_LIMIT;
? (unsigned)maxreaders
: (unsigned)MDBX_READERS_LIMIT;
err = osal_mmap((env->flags & MDBX_EXCLUSIVE) | MDBX_WRITEMAP, &env->lck_mmap, err = osal_mmap((env->flags & MDBX_EXCLUSIVE) | MDBX_WRITEMAP, &env->lck_mmap, (size_t)size, (size_t)size,
(size_t)size, (size_t)size, lck_seize_rc ? MMAP_OPTION_TRUNCATE | MMAP_OPTION_SEMAPHORE : MMAP_OPTION_SEMAPHORE);
lck_seize_rc ? MMAP_OPTION_TRUNCATE | MMAP_OPTION_SEMAPHORE
: MMAP_OPTION_SEMAPHORE);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
#ifdef MADV_DODUMP #ifdef MADV_DODUMP
err = madvise(env->lck_mmap.lck, size, MADV_DODUMP) ? ignore_enosys(errno) err = madvise(env->lck_mmap.lck, size, MADV_DODUMP) ? ignore_enosys(errno) : MDBX_SUCCESS;
: MDBX_SUCCESS;
if (unlikely(MDBX_IS_ERROR(err))) if (unlikely(MDBX_IS_ERROR(err)))
return err; return err;
#endif /* MADV_DODUMP */ #endif /* MADV_DODUMP */
#ifdef MADV_WILLNEED #ifdef MADV_WILLNEED
err = madvise(env->lck_mmap.lck, size, MADV_WILLNEED) ? ignore_enosys(errno) err = madvise(env->lck_mmap.lck, size, MADV_WILLNEED) ? ignore_enosys(errno) : MDBX_SUCCESS;
: MDBX_SUCCESS;
if (unlikely(MDBX_IS_ERROR(err))) if (unlikely(MDBX_IS_ERROR(err)))
return err; return err;
#elif defined(POSIX_MADV_WILLNEED) #elif defined(POSIX_MADV_WILLNEED)
err = ignore_enosys( err = ignore_enosys(posix_madvise(env->lck_mmap.lck, size, POSIX_MADV_WILLNEED));
posix_madvise(env->lck_mmap.lck, size, POSIX_MADV_WILLNEED));
if (unlikely(MDBX_IS_ERROR(err))) if (unlikely(MDBX_IS_ERROR(err)))
return err; return err;
#endif /* MADV_WILLNEED */ #endif /* MADV_WILLNEED */
@ -108,8 +94,7 @@ __cold static int lck_setup_locked(MDBX_env *env) {
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
lck->pgops.wops.weak = 1; lck->pgops.wops.weak = 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
err = osal_msync(&env->lck_mmap, 0, (size_t)size, err = osal_msync(&env->lck_mmap, 0, (size_t)size, MDBX_SYNC_DATA | MDBX_SYNC_SIZE);
MDBX_SYNC_DATA | MDBX_SYNC_SIZE);
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
ERROR("initial-%s for lck-file failed, err %d", "msync/fsync", err); ERROR("initial-%s for lck-file failed, err %d", "msync/fsync", err);
eASSERT(env, MDBX_IS_ERROR(err)); eASSERT(env, MDBX_IS_ERROR(err));
@ -118,17 +103,14 @@ __cold static int lck_setup_locked(MDBX_env *env) {
} else { } else {
if (lck->magic_and_version != MDBX_LOCK_MAGIC) { if (lck->magic_and_version != MDBX_LOCK_MAGIC) {
const bool invalid = (lck->magic_and_version >> 8) != MDBX_MAGIC; const bool invalid = (lck->magic_and_version >> 8) != MDBX_MAGIC;
ERROR("lock region has %s", ERROR("lock region has %s", invalid ? "invalid magic"
invalid : "incompatible version (only applications with nearly or the "
? "invalid magic" "same versions of libmdbx can share the same database)");
: "incompatible version (only applications with nearly or the "
"same versions of libmdbx can share the same database)");
return invalid ? MDBX_INVALID : MDBX_VERSION_MISMATCH; return invalid ? MDBX_INVALID : MDBX_VERSION_MISMATCH;
} }
if (lck->os_and_format != MDBX_LOCK_FORMAT) { if (lck->os_and_format != MDBX_LOCK_FORMAT) {
ERROR("lock region has os/format signature 0x%" PRIx32 ERROR("lock region has os/format signature 0x%" PRIx32 ", expected 0x%" PRIx32, lck->os_and_format,
", expected 0x%" PRIx32, MDBX_LOCK_FORMAT);
lck->os_and_format, MDBX_LOCK_FORMAT);
return MDBX_VERSION_MISMATCH; return MDBX_VERSION_MISMATCH;
} }
} }
@ -148,8 +130,7 @@ __cold int lck_setup(MDBX_env *env, mdbx_mode_t mode) {
eASSERT(env, env->lazy_fd != INVALID_HANDLE_VALUE); eASSERT(env, env->lazy_fd != INVALID_HANDLE_VALUE);
eASSERT(env, env->lck_mmap.fd == INVALID_HANDLE_VALUE); eASSERT(env, env->lck_mmap.fd == INVALID_HANDLE_VALUE);
int err = osal_openfile(MDBX_OPEN_LCK, env, env->pathname.lck, int err = osal_openfile(MDBX_OPEN_LCK, env, env->pathname.lck, &env->lck_mmap.fd, mode);
&env->lck_mmap.fd, mode);
if (err != MDBX_SUCCESS) { if (err != MDBX_SUCCESS) {
switch (err) { switch (err) {
default: default:
@ -186,6 +167,5 @@ __cold int lck_setup(MDBX_env *env, mdbx_mode_t mode) {
} }
void mincore_clean_cache(const MDBX_env *const env) { void mincore_clean_cache(const MDBX_env *const env) {
memset(env->lck->mincore_cache.begin, -1, memset(env->lck->mincore_cache.begin, -1, sizeof(env->lck->mincore_cache.begin));
sizeof(env->lck->mincore_cache.begin));
} }

View File

@ -23,8 +23,7 @@ MDBX_INTERNAL int lck_ipclock_destroy(osal_ipclock_t *ipc);
/// MUST NOT initialize shared synchronization objects in memory-mapped /// MUST NOT initialize shared synchronization objects in memory-mapped
/// LCK-file that are already in use. /// LCK-file that are already in use.
/// \return Error code or zero on success. /// \return Error code or zero on success.
MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor, MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor, int global_uniqueness_flag);
int global_uniqueness_flag);
/// \brief Disconnects from shared interprocess objects and destructs /// \brief Disconnects from shared interprocess objects and destructs
/// synchronization objects linked with MDBX_env instance /// synchronization objects linked with MDBX_env instance
@ -43,8 +42,7 @@ MDBX_INTERNAL int lck_init(MDBX_env *env, MDBX_env *inprocess_neighbor,
/// of other instances of MDBX_env within the current process, e.g. /// of other instances of MDBX_env within the current process, e.g.
/// restore POSIX-fcntl locks after the closing of file descriptors. /// restore POSIX-fcntl locks after the closing of file descriptors.
/// \return Error code (MDBX_PANIC) or zero on success. /// \return Error code (MDBX_PANIC) or zero on success.
MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor, MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor, const uint32_t current_pid);
const uint32_t current_pid);
/// \brief Connects to shared interprocess locking objects and tries to acquire /// \brief Connects to shared interprocess locking objects and tries to acquire
/// the maximum lock level (shared if exclusive is not available) /// the maximum lock level (shared if exclusive is not available)

View File

@ -3,15 +3,13 @@
#include "internals.h" #include "internals.h"
__cold void debug_log_va(int level, const char *function, int line, __cold void debug_log_va(int level, const char *function, int line, const char *fmt, va_list args) {
const char *fmt, va_list args) {
ENSURE(nullptr, osal_fastmutex_acquire(&globals.debug_lock) == 0); ENSURE(nullptr, osal_fastmutex_acquire(&globals.debug_lock) == 0);
if (globals.logger.ptr) { if (globals.logger.ptr) {
if (globals.logger_buffer == nullptr) if (globals.logger_buffer == nullptr)
globals.logger.fmt(level, function, line, fmt, args); globals.logger.fmt(level, function, line, fmt, args);
else { else {
const int len = vsnprintf(globals.logger_buffer, const int len = vsnprintf(globals.logger_buffer, globals.logger_buffer_size, fmt, args);
globals.logger_buffer_size, fmt, args);
if (len > 0) if (len > 0)
globals.logger.nofmt(level, function, line, globals.logger_buffer, len); globals.logger.nofmt(level, function, line, globals.logger_buffer, len);
} }
@ -51,8 +49,7 @@ __cold void debug_log_va(int level, const char *function, int line,
ENSURE(nullptr, osal_fastmutex_release(&globals.debug_lock) == 0); ENSURE(nullptr, osal_fastmutex_release(&globals.debug_lock) == 0);
} }
__cold void debug_log(int level, const char *function, int line, __cold void debug_log(int level, const char *function, int line, const char *fmt, ...) {
const char *fmt, ...) {
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
debug_log_va(level, function, line, fmt, args); debug_log_va(level, function, line, fmt, args);
@ -62,18 +59,15 @@ __cold void debug_log(int level, const char *function, int line,
__cold int log_error(const int err, const char *func, unsigned line) { __cold int log_error(const int err, const char *func, unsigned line) {
assert(err != MDBX_SUCCESS); assert(err != MDBX_SUCCESS);
if (unlikely(globals.loglevel >= MDBX_LOG_DEBUG) && if (unlikely(globals.loglevel >= MDBX_LOG_DEBUG) &&
(globals.loglevel >= MDBX_LOG_TRACE || (globals.loglevel >= MDBX_LOG_TRACE || !(err == MDBX_RESULT_TRUE || err == MDBX_NOTFOUND))) {
!(err == MDBX_RESULT_TRUE || err == MDBX_NOTFOUND))) {
char buf[256]; char buf[256];
debug_log(MDBX_LOG_ERROR, func, line, "error %d (%s)\n", err, debug_log(MDBX_LOG_ERROR, func, line, "error %d (%s)\n", err, mdbx_strerror_r(err, buf, sizeof(buf)));
mdbx_strerror_r(err, buf, sizeof(buf)));
} }
return err; return err;
} }
/* Dump a val in ascii or hexadecimal. */ /* Dump a val in ascii or hexadecimal. */
__cold const char *mdbx_dump_val(const MDBX_val *val, char *const buf, __cold const char *mdbx_dump_val(const MDBX_val *val, char *const buf, const size_t bufsize) {
const size_t bufsize) {
if (!val) if (!val)
return "<null>"; return "<null>";
if (!val->iov_len) if (!val->iov_len)
@ -97,9 +91,7 @@ __cold const char *mdbx_dump_val(const MDBX_val *val, char *const buf,
} }
if (is_ascii) { if (is_ascii) {
int len = int len = snprintf(buf, bufsize, "%.*s", (val->iov_len > INT_MAX) ? INT_MAX : (int)val->iov_len, data);
snprintf(buf, bufsize, "%.*s",
(val->iov_len > INT_MAX) ? INT_MAX : (int)val->iov_len, data);
assert(len > 0 && (size_t)len < bufsize); assert(len > 0 && (size_t)len < bufsize);
(void)len; (void)len;
} else { } else {
@ -107,8 +99,7 @@ __cold const char *mdbx_dump_val(const MDBX_val *val, char *const buf,
char *ptr = buf; char *ptr = buf;
*ptr++ = '<'; *ptr++ = '<';
for (size_t i = 0; i < val->iov_len && ptr < detent; i++) { for (size_t i = 0; i < val->iov_len && ptr < detent; i++) {
const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
*ptr++ = hex[data[i] >> 4]; *ptr++ = hex[data[i] >> 4];
*ptr++ = hex[data[i] & 15]; *ptr++ = hex[data[i] & 15];
} }
@ -145,11 +136,8 @@ __cold const char *pagetype_caption(const uint8_t type, char buf4unknown[16]) {
} }
__cold static const char *leafnode_type(node_t *n) { __cold static const char *leafnode_type(node_t *n) {
static const char *const tp[2][2] = {{"", ": DB"}, static const char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}};
{": sub-page", ": sub-DB"}}; return (node_flags(n) & N_BIG) ? ": large page" : tp[!!(node_flags(n) & N_DUP)][!!(node_flags(n) & N_TREE)];
return (node_flags(n) & N_BIG)
? ": large page"
: tp[!!(node_flags(n) & N_DUP)][!!(node_flags(n) & N_TREE)];
} }
/* Display all the keys in the page. */ /* Display all the keys in the page. */
@ -181,8 +169,7 @@ __cold void page_list(page_t *mp) {
VERBOSE("Overflow page %" PRIaPGNO " pages %u\n", pgno, mp->pages); VERBOSE("Overflow page %" PRIaPGNO " pages %u\n", pgno, mp->pages);
return; return;
case P_META: case P_META:
VERBOSE("Meta-page %" PRIaPGNO " txnid %" PRIu64 "\n", pgno, VERBOSE("Meta-page %" PRIaPGNO " txnid %" PRIu64 "\n", pgno, unaligned_peek_u64(4, page_meta(mp)->txnid_a));
unaligned_peek_u64(4, page_meta(mp)->txnid_a));
return; return;
default: default:
VERBOSE("Bad page %" PRIaPGNO " flags 0x%X\n", pgno, mp->flags); VERBOSE("Bad page %" PRIaPGNO " flags 0x%X\n", pgno, mp->flags);
@ -193,8 +180,7 @@ __cold void page_list(page_t *mp) {
VERBOSE("%s %" PRIaPGNO " numkeys %zu\n", type, pgno, nkeys); VERBOSE("%s %" PRIaPGNO " numkeys %zu\n", type, pgno, nkeys);
for (i = 0; i < nkeys; i++) { for (i = 0; i < nkeys; i++) {
if (is_dupfix_leaf( if (is_dupfix_leaf(mp)) { /* DUPFIX pages have no entries[] or node headers */
mp)) { /* DUPFIX pages have no entries[] or node headers */
key = page_dupfix_key(mp, i, nsize = mp->dupfix_ksize); key = page_dupfix_key(mp, i, nsize = mp->dupfix_ksize);
total += nsize; total += nsize;
VERBOSE("key %zu: nsize %zu, %s\n", i, nsize, DKEY(&key)); VERBOSE("key %zu: nsize %zu, %s\n", i, nsize, DKEY(&key));
@ -205,8 +191,7 @@ __cold void page_list(page_t *mp) {
key.iov_base = node->payload; key.iov_base = node->payload;
nsize = NODESIZE + key.iov_len; nsize = NODESIZE + key.iov_len;
if (is_branch(mp)) { if (is_branch(mp)) {
VERBOSE("key %zu: page %" PRIaPGNO ", %s\n", i, node_pgno(node), VERBOSE("key %zu: page %" PRIaPGNO ", %s\n", i, node_pgno(node), DKEY(&key));
DKEY(&key));
total += nsize; total += nsize;
} else { } else {
if (node_flags(node) & N_BIG) if (node_flags(node) & N_BIG)
@ -215,18 +200,15 @@ __cold void page_list(page_t *mp) {
nsize += node_ds(node); nsize += node_ds(node);
total += nsize; total += nsize;
nsize += sizeof(indx_t); nsize += sizeof(indx_t);
VERBOSE("key %zu: nsize %zu, %s%s\n", i, nsize, DKEY(&key), VERBOSE("key %zu: nsize %zu, %s%s\n", i, nsize, DKEY(&key), leafnode_type(node));
leafnode_type(node));
} }
total = EVEN_CEIL(total); total = EVEN_CEIL(total);
} }
VERBOSE("Total: header %u + contents %zu + unused %zu\n", VERBOSE("Total: header %u + contents %zu + unused %zu\n", is_dupfix_leaf(mp) ? PAGEHDRSZ : PAGEHDRSZ + mp->lower,
is_dupfix_leaf(mp) ? PAGEHDRSZ : PAGEHDRSZ + mp->lower, total, total, page_room(mp));
page_room(mp));
} }
__cold static int setup_debug(MDBX_log_level_t level, MDBX_debug_flags_t flags, __cold static int setup_debug(MDBX_log_level_t level, MDBX_debug_flags_t flags, union logger_union logger, char *buffer,
union logger_union logger, char *buffer,
size_t buffer_size) { size_t buffer_size) {
ENSURE(nullptr, osal_fastmutex_acquire(&globals.debug_lock) == 0); ENSURE(nullptr, osal_fastmutex_acquire(&globals.debug_lock) == 0);
@ -239,8 +221,7 @@ __cold static int setup_debug(MDBX_log_level_t level, MDBX_debug_flags_t flags,
#if MDBX_DEBUG #if MDBX_DEBUG
MDBX_DBG_ASSERT | MDBX_DBG_AUDIT | MDBX_DBG_JITTER | MDBX_DBG_ASSERT | MDBX_DBG_AUDIT | MDBX_DBG_JITTER |
#endif #endif
MDBX_DBG_DUMP | MDBX_DBG_LEGACY_MULTIOPEN | MDBX_DBG_LEGACY_OVERLAP | MDBX_DBG_DUMP | MDBX_DBG_LEGACY_MULTIOPEN | MDBX_DBG_LEGACY_OVERLAP | MDBX_DBG_DONT_UPGRADE;
MDBX_DBG_DONT_UPGRADE;
globals.runtime_flags = (uint8_t)flags; globals.runtime_flags = (uint8_t)flags;
} }
@ -255,18 +236,14 @@ __cold static int setup_debug(MDBX_log_level_t level, MDBX_debug_flags_t flags,
return rc; return rc;
} }
__cold int mdbx_setup_debug_nofmt(MDBX_log_level_t level, __cold int mdbx_setup_debug_nofmt(MDBX_log_level_t level, MDBX_debug_flags_t flags, MDBX_debug_func_nofmt *logger,
MDBX_debug_flags_t flags, char *buffer, size_t buffer_size) {
MDBX_debug_func_nofmt *logger, char *buffer,
size_t buffer_size) {
union logger_union thunk; union logger_union thunk;
thunk.nofmt = thunk.nofmt = (logger && buffer && buffer_size) ? logger : MDBX_LOGGER_NOFMT_DONTCHANGE;
(logger && buffer && buffer_size) ? logger : MDBX_LOGGER_NOFMT_DONTCHANGE;
return setup_debug(level, flags, thunk, buffer, buffer_size); return setup_debug(level, flags, thunk, buffer, buffer_size);
} }
__cold int mdbx_setup_debug(MDBX_log_level_t level, MDBX_debug_flags_t flags, __cold int mdbx_setup_debug(MDBX_log_level_t level, MDBX_debug_flags_t flags, MDBX_debug_func *logger) {
MDBX_debug_func *logger) {
union logger_union thunk; union logger_union thunk;
thunk.fmt = logger; thunk.fmt = logger;
return setup_debug(level, flags, thunk, nullptr, 0); return setup_debug(level, flags, thunk, nullptr, 0);

View File

@ -6,23 +6,17 @@
#include "essentials.h" #include "essentials.h"
#ifndef __Wpedantic_format_voidptr #ifndef __Wpedantic_format_voidptr
MDBX_MAYBE_UNUSED static inline const void * MDBX_MAYBE_UNUSED static inline const void *__Wpedantic_format_voidptr(const void *ptr) { return ptr; }
__Wpedantic_format_voidptr(const void *ptr) {
return ptr;
}
#define __Wpedantic_format_voidptr(ARG) __Wpedantic_format_voidptr(ARG) #define __Wpedantic_format_voidptr(ARG) __Wpedantic_format_voidptr(ARG)
#endif /* __Wpedantic_format_voidptr */ #endif /* __Wpedantic_format_voidptr */
MDBX_INTERNAL void MDBX_PRINTF_ARGS(4, 5) MDBX_INTERNAL void MDBX_PRINTF_ARGS(4, 5) debug_log(int level, const char *function, int line, const char *fmt, ...)
debug_log(int level, const char *function, int line, const char *fmt, ...) MDBX_PRINTF_ARGS(4, 5);
MDBX_PRINTF_ARGS(4, 5); MDBX_INTERNAL void debug_log_va(int level, const char *function, int line, const char *fmt, va_list args);
MDBX_INTERNAL void debug_log_va(int level, const char *function, int line,
const char *fmt, va_list args);
#if MDBX_DEBUG #if MDBX_DEBUG
#define LOG_ENABLED(LVL) unlikely(LVL <= globals.loglevel) #define LOG_ENABLED(LVL) unlikely(LVL <= globals.loglevel)
#define AUDIT_ENABLED() \ #define AUDIT_ENABLED() unlikely((globals.runtime_flags & (unsigned)MDBX_DBG_AUDIT))
unlikely((globals.runtime_flags & (unsigned)MDBX_DBG_AUDIT))
#else /* MDBX_DEBUG */ #else /* MDBX_DEBUG */
#define LOG_ENABLED(LVL) (LVL < MDBX_LOG_VERBOSE && LVL <= globals.loglevel) #define LOG_ENABLED(LVL) (LVL < MDBX_LOG_VERBOSE && LVL <= globals.loglevel)
#define AUDIT_ENABLED() (0) #define AUDIT_ENABLED() (0)
@ -31,91 +25,88 @@ MDBX_INTERNAL void debug_log_va(int level, const char *function, int line,
#if MDBX_FORCE_ASSERTIONS #if MDBX_FORCE_ASSERTIONS
#define ASSERT_ENABLED() (1) #define ASSERT_ENABLED() (1)
#elif MDBX_DEBUG #elif MDBX_DEBUG
#define ASSERT_ENABLED() \ #define ASSERT_ENABLED() likely((globals.runtime_flags & (unsigned)MDBX_DBG_ASSERT))
likely((globals.runtime_flags & (unsigned)MDBX_DBG_ASSERT))
#else #else
#define ASSERT_ENABLED() (0) #define ASSERT_ENABLED() (0)
#endif /* ASSERT_ENABLED() */ #endif /* ASSERT_ENABLED() */
#define DEBUG_EXTRA(fmt, ...) \ #define DEBUG_EXTRA(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_EXTRA)) \ if (LOG_ENABLED(MDBX_LOG_EXTRA)) \
debug_log(MDBX_LOG_EXTRA, __func__, __LINE__, fmt, __VA_ARGS__); \ debug_log(MDBX_LOG_EXTRA, __func__, __LINE__, fmt, __VA_ARGS__); \
} while (0) } while (0)
#define DEBUG_EXTRA_PRINT(fmt, ...) \ #define DEBUG_EXTRA_PRINT(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_EXTRA)) \ if (LOG_ENABLED(MDBX_LOG_EXTRA)) \
debug_log(MDBX_LOG_EXTRA, nullptr, 0, fmt, __VA_ARGS__); \ debug_log(MDBX_LOG_EXTRA, nullptr, 0, fmt, __VA_ARGS__); \
} while (0) } while (0)
#define TRACE(fmt, ...) \ #define TRACE(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_TRACE)) \ if (LOG_ENABLED(MDBX_LOG_TRACE)) \
debug_log(MDBX_LOG_TRACE, __func__, __LINE__, fmt "\n", __VA_ARGS__); \ debug_log(MDBX_LOG_TRACE, __func__, __LINE__, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#define DEBUG(fmt, ...) \ #define DEBUG(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_DEBUG)) \ if (LOG_ENABLED(MDBX_LOG_DEBUG)) \
debug_log(MDBX_LOG_DEBUG, __func__, __LINE__, fmt "\n", __VA_ARGS__); \ debug_log(MDBX_LOG_DEBUG, __func__, __LINE__, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#define VERBOSE(fmt, ...) \ #define VERBOSE(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_VERBOSE)) \ if (LOG_ENABLED(MDBX_LOG_VERBOSE)) \
debug_log(MDBX_LOG_VERBOSE, __func__, __LINE__, fmt "\n", __VA_ARGS__); \ debug_log(MDBX_LOG_VERBOSE, __func__, __LINE__, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#define NOTICE(fmt, ...) \ #define NOTICE(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_NOTICE)) \ if (LOG_ENABLED(MDBX_LOG_NOTICE)) \
debug_log(MDBX_LOG_NOTICE, __func__, __LINE__, fmt "\n", __VA_ARGS__); \ debug_log(MDBX_LOG_NOTICE, __func__, __LINE__, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#define WARNING(fmt, ...) \ #define WARNING(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_WARN)) \ if (LOG_ENABLED(MDBX_LOG_WARN)) \
debug_log(MDBX_LOG_WARN, __func__, __LINE__, fmt "\n", __VA_ARGS__); \ debug_log(MDBX_LOG_WARN, __func__, __LINE__, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#undef ERROR /* wingdi.h \ #undef ERROR /* wingdi.h \
Yeah, morons from M$ put such definition to the public header. */ Yeah, morons from M$ put such definition to the public header. */
#define ERROR(fmt, ...) \ #define ERROR(fmt, ...) \
do { \ do { \
if (LOG_ENABLED(MDBX_LOG_ERROR)) \ if (LOG_ENABLED(MDBX_LOG_ERROR)) \
debug_log(MDBX_LOG_ERROR, __func__, __LINE__, fmt "\n", __VA_ARGS__); \ debug_log(MDBX_LOG_ERROR, __func__, __LINE__, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#define FATAL(fmt, ...) \ #define FATAL(fmt, ...) debug_log(MDBX_LOG_FATAL, __func__, __LINE__, fmt "\n", __VA_ARGS__);
debug_log(MDBX_LOG_FATAL, __func__, __LINE__, fmt "\n", __VA_ARGS__);
#if MDBX_DEBUG #if MDBX_DEBUG
#define ASSERT_FAIL(env, msg, func, line) mdbx_assert_fail(env, msg, func, line) #define ASSERT_FAIL(env, msg, func, line) mdbx_assert_fail(env, msg, func, line)
#else /* MDBX_DEBUG */ #else /* MDBX_DEBUG */
MDBX_NORETURN __cold void assert_fail(const char *msg, const char *func, MDBX_NORETURN __cold void assert_fail(const char *msg, const char *func, unsigned line);
unsigned line); #define ASSERT_FAIL(env, msg, func, line) \
#define ASSERT_FAIL(env, msg, func, line) \ do { \
do { \ (void)(env); \
(void)(env); \ assert_fail(msg, func, line); \
assert_fail(msg, func, line); \
} while (0) } while (0)
#endif /* MDBX_DEBUG */ #endif /* MDBX_DEBUG */
#define ENSURE_MSG(env, expr, msg) \ #define ENSURE_MSG(env, expr, msg) \
do { \ do { \
if (unlikely(!(expr))) \ if (unlikely(!(expr))) \
ASSERT_FAIL(env, msg, __func__, __LINE__); \ ASSERT_FAIL(env, msg, __func__, __LINE__); \
} while (0) } while (0)
#define ENSURE(env, expr) ENSURE_MSG(env, expr, #expr) #define ENSURE(env, expr) ENSURE_MSG(env, expr, #expr)
/* assert(3) variant in environment context */ /* assert(3) variant in environment context */
#define eASSERT(env, expr) \ #define eASSERT(env, expr) \
do { \ do { \
if (ASSERT_ENABLED()) \ if (ASSERT_ENABLED()) \
ENSURE(env, expr); \ ENSURE(env, expr); \
} while (0) } while (0)
/* assert(3) variant in cursor context */ /* assert(3) variant in cursor context */
@ -140,14 +131,12 @@ MDBX_MAYBE_UNUSED static inline void jitter4testing(bool tiny) {
MDBX_MAYBE_UNUSED MDBX_INTERNAL void page_list(page_t *mp); MDBX_MAYBE_UNUSED MDBX_INTERNAL void page_list(page_t *mp);
MDBX_INTERNAL const char *pagetype_caption(const uint8_t type, MDBX_INTERNAL const char *pagetype_caption(const uint8_t type, char buf4unknown[16]);
char buf4unknown[16]);
/* Key size which fits in a DKBUF (debug key buffer). */ /* Key size which fits in a DKBUF (debug key buffer). */
#define DKBUF_MAX 127 #define DKBUF_MAX 127
#define DKBUF char dbg_kbuf[DKBUF_MAX * 4 + 2] #define DKBUF char dbg_kbuf[DKBUF_MAX * 4 + 2]
#define DKEY(x) mdbx_dump_val(x, dbg_kbuf, DKBUF_MAX * 2 + 1) #define DKEY(x) mdbx_dump_val(x, dbg_kbuf, DKBUF_MAX * 2 + 1)
#define DVAL(x) \ #define DVAL(x) mdbx_dump_val(x, dbg_kbuf + DKBUF_MAX * 2 + 1, DKBUF_MAX * 2 + 1)
mdbx_dump_val(x, dbg_kbuf + DKBUF_MAX * 2 + 1, DKBUF_MAX * 2 + 1)
#if MDBX_DEBUG #if MDBX_DEBUG
#define DKBUF_DEBUG DKBUF #define DKBUF_DEBUG DKBUF
@ -161,8 +150,7 @@ MDBX_INTERNAL const char *pagetype_caption(const uint8_t type,
MDBX_INTERNAL int log_error(const int err, const char *func, unsigned line); MDBX_INTERNAL int log_error(const int err, const char *func, unsigned line);
MDBX_MAYBE_UNUSED static inline int MDBX_MAYBE_UNUSED static inline int log_if_error(const int err, const char *func, unsigned line) {
log_if_error(const int err, const char *func, unsigned line) {
if (likely(err == MDBX_SUCCESS)) if (likely(err == MDBX_SUCCESS))
return err; return err;
int rc = log_error(err, func, line); int rc = log_error(err, func, line);

View File

@ -187,21 +187,19 @@ __cold bug::~bug() noexcept {}
throw bug(what_and_where); throw bug(what_and_where);
} }
#define RAISE_BUG(line, condition, function, file) \ #define RAISE_BUG(line, condition, function, file) \
do { \ do { \
static MDBX_CXX11_CONSTEXPR_VAR trouble_location bug(line, condition, \ static MDBX_CXX11_CONSTEXPR_VAR trouble_location bug(line, condition, function, file); \
function, file); \ raise_bug(bug); \
raise_bug(bug); \
} while (0) } while (0)
#define ENSURE(condition) \ #define ENSURE(condition) \
do \ do \
if (MDBX_UNLIKELY(!(condition))) \ if (MDBX_UNLIKELY(!(condition))) \
MDBX_CXX20_UNLIKELY RAISE_BUG(__LINE__, #condition, __func__, __FILE__); \ MDBX_CXX20_UNLIKELY RAISE_BUG(__LINE__, #condition, __func__, __FILE__); \
while (0) while (0)
#define NOT_IMPLEMENTED() \ #define NOT_IMPLEMENTED() RAISE_BUG(__LINE__, "not_implemented", __func__, __FILE__);
RAISE_BUG(__LINE__, "not_implemented", __func__, __FILE__);
#endif /* Unused*/ #endif /* Unused*/
@ -226,14 +224,12 @@ struct line_wrapper {
} }
}; };
template <typename TYPE, unsigned INPLACE_BYTES = unsigned(sizeof(void *) * 64)> template <typename TYPE, unsigned INPLACE_BYTES = unsigned(sizeof(void *) * 64)> struct temp_buffer {
struct temp_buffer {
TYPE inplace[(INPLACE_BYTES + sizeof(TYPE) - 1) / sizeof(TYPE)]; TYPE inplace[(INPLACE_BYTES + sizeof(TYPE) - 1) / sizeof(TYPE)];
const size_t size; const size_t size;
TYPE *const area; TYPE *const area;
temp_buffer(size_t bytes) temp_buffer(size_t bytes)
: size((bytes + sizeof(TYPE) - 1) / sizeof(TYPE)), : size((bytes + sizeof(TYPE) - 1) / sizeof(TYPE)), area((bytes > sizeof(inplace)) ? new TYPE[size] : inplace) {
area((bytes > sizeof(inplace)) ? new TYPE[size] : inplace) {
memset(area, 0, sizeof(TYPE) * size); memset(area, 0, sizeof(TYPE) * size);
} }
~temp_buffer() { ~temp_buffer() {
@ -265,8 +261,7 @@ struct temp_buffer {
namespace mdbx { namespace mdbx {
[[noreturn]] __cold void throw_max_length_exceeded() { [[noreturn]] __cold void throw_max_length_exceeded() {
throw std::length_error( throw std::length_error("mdbx:: Exceeded the maximal length of data/slice/buffer.");
"mdbx:: Exceeded the maximal length of data/slice/buffer.");
} }
[[noreturn]] __cold void throw_too_small_target_buffer() { [[noreturn]] __cold void throw_too_small_target_buffer() {
@ -279,38 +274,31 @@ namespace mdbx {
} }
[[noreturn]] __cold void throw_allocators_mismatch() { [[noreturn]] __cold void throw_allocators_mismatch() {
throw std::logic_error( throw std::logic_error("mdbx:: An allocators mismatch, so an object could not be transferred "
"mdbx:: An allocators mismatch, so an object could not be transferred " "into an incompatible memory allocation scheme.");
"into an incompatible memory allocation scheme.");
} }
[[noreturn]] __cold void throw_incomparable_cursors() { [[noreturn]] __cold void throw_incomparable_cursors() {
throw std::logic_error( throw std::logic_error("mdbx:: incomparable and/or invalid cursors to compare positions.");
"mdbx:: incomparable and/or invalid cursors to compare positions.");
} }
[[noreturn]] __cold void throw_bad_value_size() { [[noreturn]] __cold void throw_bad_value_size() { throw bad_value_size(MDBX_BAD_VALSIZE); }
throw bad_value_size(MDBX_BAD_VALSIZE);
}
__cold exception::exception(const ::mdbx::error &error) noexcept __cold exception::exception(const ::mdbx::error &error) noexcept : base(error.what()), error_(error) {}
: base(error.what()), error_(error) {}
__cold exception::~exception() noexcept {} __cold exception::~exception() noexcept {}
static std::atomic_int fatal_countdown; static std::atomic_int fatal_countdown;
__cold fatal::fatal(const ::mdbx::error &error) noexcept : base(error) { __cold fatal::fatal(const ::mdbx::error &error) noexcept : base(error) { ++fatal_countdown; }
++fatal_countdown;
}
__cold fatal::~fatal() noexcept { __cold fatal::~fatal() noexcept {
if (--fatal_countdown == 0) if (--fatal_countdown == 0)
std::terminate(); std::terminate();
} }
#define DEFINE_EXCEPTION(NAME) \ #define DEFINE_EXCEPTION(NAME) \
__cold NAME::NAME(const ::mdbx::error &rc) : exception(rc) {} \ __cold NAME::NAME(const ::mdbx::error &rc) : exception(rc) {} \
__cold NAME::~NAME() noexcept {} __cold NAME::~NAME() noexcept {}
DEFINE_EXCEPTION(bad_map_id) DEFINE_EXCEPTION(bad_map_id)
@ -352,8 +340,8 @@ __cold const char *error::what() const noexcept {
return mdbx_liberr2str(code()); return mdbx_liberr2str(code());
switch (code()) { switch (code()) {
#define ERROR_CASE(CODE) \ #define ERROR_CASE(CODE) \
case CODE: \ case CODE: \
return MDBX_STRINGIFY(CODE) return MDBX_STRINGIFY(CODE)
ERROR_CASE(MDBX_ENODATA); ERROR_CASE(MDBX_ENODATA);
ERROR_CASE(MDBX_EINVAL); ERROR_CASE(MDBX_EINVAL);
@ -379,8 +367,7 @@ __cold std::string error::message() const {
return std::string(msg ? msg : "unknown"); return std::string(msg ? msg : "unknown");
} }
[[noreturn]] __cold void error::panic(const char *context, [[noreturn]] __cold void error::panic(const char *context, const char *func) const noexcept {
const char *func) const noexcept {
assert(code() != MDBX_SUCCESS); assert(code() != MDBX_SUCCESS);
::mdbx_panic("mdbx::%s.%s(): \"%s\" (%d)", context, func, what(), code()); ::mdbx_panic("mdbx::%s.%s(): \"%s\" (%d)", context, func, what(), code());
std::terminate(); std::terminate();
@ -397,8 +384,8 @@ __cold void error::throw_exception() const {
throw std::logic_error("MDBX_SUCCESS (MDBX_RESULT_FALSE)"); throw std::logic_error("MDBX_SUCCESS (MDBX_RESULT_FALSE)");
case MDBX_RESULT_TRUE: case MDBX_RESULT_TRUE:
throw std::logic_error("MDBX_RESULT_TRUE"); throw std::logic_error("MDBX_RESULT_TRUE");
#define CASE_EXCEPTION(NAME, CODE) \ #define CASE_EXCEPTION(NAME, CODE) \
case CODE: \ case CODE: \
throw NAME(code()) throw NAME(code())
CASE_EXCEPTION(bad_map_id, MDBX_BAD_DBI); CASE_EXCEPTION(bad_map_id, MDBX_BAD_DBI);
CASE_EXCEPTION(bad_transaction, MDBX_BAD_TXN); CASE_EXCEPTION(bad_transaction, MDBX_BAD_TXN);
@ -702,27 +689,23 @@ char *to_hex::write_bytes(char *__restrict const dest, size_t dest_size) const {
return out; return out;
} }
char *from_hex::write_bytes(char *__restrict const dest, char *from_hex::write_bytes(char *__restrict const dest, size_t dest_size) const {
size_t dest_size) const {
if (MDBX_UNLIKELY(source.length() % 2 && !ignore_spaces)) if (MDBX_UNLIKELY(source.length() % 2 && !ignore_spaces))
MDBX_CXX20_UNLIKELY throw std::domain_error( MDBX_CXX20_UNLIKELY throw std::domain_error("mdbx::from_hex:: odd length of hexadecimal string");
"mdbx::from_hex:: odd length of hexadecimal string");
if (MDBX_UNLIKELY(envisage_result_length() > dest_size)) if (MDBX_UNLIKELY(envisage_result_length() > dest_size))
MDBX_CXX20_UNLIKELY throw_too_small_target_buffer(); MDBX_CXX20_UNLIKELY throw_too_small_target_buffer();
auto ptr = dest; auto ptr = dest;
auto src = source.byte_ptr(); auto src = source.byte_ptr();
for (auto left = source.length(); left > 0;) { for (auto left = source.length(); left > 0;) {
if (MDBX_UNLIKELY(*src <= ' ') && if (MDBX_UNLIKELY(*src <= ' ') && MDBX_LIKELY(ignore_spaces && isspace(*src))) {
MDBX_LIKELY(ignore_spaces && isspace(*src))) {
++src; ++src;
--left; --left;
continue; continue;
} }
if (MDBX_UNLIKELY(left < 1 || !isxdigit(src[0]) || !isxdigit(src[1]))) if (MDBX_UNLIKELY(left < 1 || !isxdigit(src[0]) || !isxdigit(src[1])))
MDBX_CXX20_UNLIKELY throw std::domain_error( MDBX_CXX20_UNLIKELY throw std::domain_error("mdbx::from_hex:: invalid hexadecimal string");
"mdbx::from_hex:: invalid hexadecimal string");
int8_t hi = src[0]; int8_t hi = src[0];
hi = (hi | 0x20) - 'a'; hi = (hi | 0x20) - 'a';
@ -747,8 +730,7 @@ bool from_hex::is_erroneous() const noexcept {
bool got = false; bool got = false;
auto src = source.byte_ptr(); auto src = source.byte_ptr();
for (auto left = source.length(); left > 0;) { for (auto left = source.length(); left > 0;) {
if (MDBX_UNLIKELY(*src <= ' ') && if (MDBX_UNLIKELY(*src <= ' ') && MDBX_LIKELY(ignore_spaces && isspace(*src))) {
MDBX_LIKELY(ignore_spaces && isspace(*src))) {
++src; ++src;
--left; --left;
continue; continue;
@ -780,25 +762,21 @@ using b58_uint = uint_fast32_t;
#endif #endif
struct b58_buffer : public temp_buffer<b58_uint> { struct b58_buffer : public temp_buffer<b58_uint> {
b58_buffer(size_t bytes, size_t estimation_ratio_numerator, b58_buffer(size_t bytes, size_t estimation_ratio_numerator, size_t estimation_ratio_denominator, size_t extra = 0)
size_t estimation_ratio_denominator, size_t extra = 0) : temp_buffer(
: temp_buffer((/* пересчитываем по указанной пропорции */ (/* пересчитываем по указанной пропорции */
bytes = (bytes * estimation_ratio_numerator + bytes =
estimation_ratio_denominator - 1) / (bytes * estimation_ratio_numerator + estimation_ratio_denominator - 1) / estimation_ratio_denominator,
estimation_ratio_denominator, /* учитываем резервный старший байт в каждом слове */
/* учитываем резервный старший байт в каждом слове */ ((bytes + sizeof(b58_uint) - 2) / (sizeof(b58_uint) - 1) * sizeof(b58_uint) + extra) * sizeof(b58_uint))) {
((bytes + sizeof(b58_uint) - 2) / (sizeof(b58_uint) - 1) * }
sizeof(b58_uint) +
extra) *
sizeof(b58_uint))) {}
}; };
static byte b58_8to11(b58_uint &v) noexcept { static byte b58_8to11(b58_uint &v) noexcept {
static const char b58_alphabet[58] = { static const char b58_alphabet[58] = {'1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F',
'1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm',
'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
const auto i = size_t(v % 58); const auto i = size_t(v % 58);
v /= 58; v /= 58;
@ -807,9 +785,8 @@ static byte b58_8to11(b58_uint &v) noexcept {
static slice b58_encode(b58_buffer &buf, const byte *begin, const byte *end) { static slice b58_encode(b58_buffer &buf, const byte *begin, const byte *end) {
auto high = buf.end(); auto high = buf.end();
const auto modulo = const auto modulo = b58_uint((sizeof(b58_uint) > 4) ? UINT64_C(0x1A636A90B07A00) /* 58^9 */
b58_uint((sizeof(b58_uint) > 4) ? UINT64_C(0x1A636A90B07A00) /* 58^9 */ : UINT32_C(0xACAD10) /* 58^4 */);
: UINT32_C(0xACAD10) /* 58^4 */);
static_assert(sizeof(modulo) == 4 || sizeof(modulo) == 8, "WTF?"); static_assert(sizeof(modulo) == 4 || sizeof(modulo) == 8, "WTF?");
while (begin < end) { while (begin < end) {
b58_uint carry = *begin++; b58_uint carry = *begin++;
@ -855,8 +832,7 @@ static slice b58_encode(b58_buffer &buf, const byte *begin, const byte *end) {
return slice(output, ptr); return slice(output, ptr);
} }
char *to_base58::write_bytes(char *__restrict const dest, char *to_base58::write_bytes(char *__restrict const dest, size_t dest_size) const {
size_t dest_size) const {
if (MDBX_UNLIKELY(envisage_result_length() > dest_size)) if (MDBX_UNLIKELY(envisage_result_length() > dest_size))
MDBX_CXX20_UNLIKELY throw_too_small_target_buffer(); MDBX_CXX20_UNLIKELY throw_too_small_target_buffer();
@ -927,8 +903,7 @@ const signed char b58_map[256] = {
IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL // f0 IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL // f0
}; };
static slice b58_decode(b58_buffer &buf, const byte *begin, const byte *end, static slice b58_decode(b58_buffer &buf, const byte *begin, const byte *end, bool ignore_spaces) {
bool ignore_spaces) {
auto high = buf.end(); auto high = buf.end();
while (begin < end) { while (begin < end) {
const auto c = b58_map[*begin++]; const auto c = b58_map[*begin++];
@ -969,8 +944,7 @@ static slice b58_decode(b58_buffer &buf, const byte *begin, const byte *end,
return slice(output, ptr); return slice(output, ptr);
} }
char *from_base58::write_bytes(char *__restrict const dest, char *from_base58::write_bytes(char *__restrict const dest, size_t dest_size) const {
size_t dest_size) const {
if (MDBX_UNLIKELY(envisage_result_length() > dest_size)) if (MDBX_UNLIKELY(envisage_result_length() > dest_size))
MDBX_CXX20_UNLIKELY throw_too_small_target_buffer(); MDBX_CXX20_UNLIKELY throw_too_small_target_buffer();
@ -996,8 +970,7 @@ bool from_base58::is_erroneous() const noexcept {
auto begin = source.byte_ptr(); auto begin = source.byte_ptr();
auto const end = source.end_byte_ptr(); auto const end = source.end_byte_ptr();
while (begin < end) { while (begin < end) {
if (MDBX_UNLIKELY(b58_map[*begin] < 0 && if (MDBX_UNLIKELY(b58_map[*begin] < 0 && !(ignore_spaces && isspace(*begin))))
!(ignore_spaces && isspace(*begin))))
return true; return true;
++begin; ++begin;
} }
@ -1006,22 +979,18 @@ bool from_base58::is_erroneous() const noexcept {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
static inline void b64_3to4(const byte x, const byte y, const byte z, static inline void b64_3to4(const byte x, const byte y, const byte z, char *__restrict dest) noexcept {
char *__restrict dest) noexcept { static const byte alphabet[64] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
static const byte alphabet[64] = { 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'};
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'};
dest[0] = alphabet[(x & 0xfc) >> 2]; dest[0] = alphabet[(x & 0xfc) >> 2];
dest[1] = alphabet[((x & 0x03) << 4) + ((y & 0xf0) >> 4)]; dest[1] = alphabet[((x & 0x03) << 4) + ((y & 0xf0) >> 4)];
dest[2] = alphabet[((y & 0x0f) << 2) + ((z & 0xc0) >> 6)]; dest[2] = alphabet[((y & 0x0f) << 2) + ((z & 0xc0) >> 6)];
dest[3] = alphabet[z & 0x3f]; dest[3] = alphabet[z & 0x3f];
} }
char *to_base64::write_bytes(char *__restrict const dest, char *to_base64::write_bytes(char *__restrict const dest, size_t dest_size) const {
size_t dest_size) const {
if (MDBX_UNLIKELY(envisage_result_length() > dest_size)) if (MDBX_UNLIKELY(envisage_result_length() > dest_size))
MDBX_CXX20_UNLIKELY throw_too_small_target_buffer(); MDBX_CXX20_UNLIKELY throw_too_small_target_buffer();
@ -1115,8 +1084,7 @@ static const signed char b64_map[256] = {
IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL // f0 IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL, IL // f0
}; };
static inline signed char b64_4to3(signed char a, signed char b, signed char c, static inline signed char b64_4to3(signed char a, signed char b, signed char c, signed char d,
signed char d,
char *__restrict dest) noexcept { char *__restrict dest) noexcept {
dest[0] = byte((a << 2) + ((b & 0x30) >> 4)); dest[0] = byte((a << 2) + ((b & 0x30) >> 4));
dest[1] = byte(((b & 0xf) << 4) + ((c & 0x3c) >> 2)); dest[1] = byte(((b & 0xf) << 4) + ((c & 0x3c) >> 2));
@ -1124,19 +1092,16 @@ static inline signed char b64_4to3(signed char a, signed char b, signed char c,
return a | b | c | d; return a | b | c | d;
} }
char *from_base64::write_bytes(char *__restrict const dest, char *from_base64::write_bytes(char *__restrict const dest, size_t dest_size) const {
size_t dest_size) const {
if (MDBX_UNLIKELY(source.length() % 4 && !ignore_spaces)) if (MDBX_UNLIKELY(source.length() % 4 && !ignore_spaces))
MDBX_CXX20_UNLIKELY throw std::domain_error( MDBX_CXX20_UNLIKELY throw std::domain_error("mdbx::from_base64:: odd length of base64 string");
"mdbx::from_base64:: odd length of base64 string");
if (MDBX_UNLIKELY(envisage_result_length() > dest_size)) if (MDBX_UNLIKELY(envisage_result_length() > dest_size))
MDBX_CXX20_UNLIKELY throw_too_small_target_buffer(); MDBX_CXX20_UNLIKELY throw_too_small_target_buffer();
auto ptr = dest; auto ptr = dest;
auto src = source.byte_ptr(); auto src = source.byte_ptr();
for (auto left = source.length(); left > 0;) { for (auto left = source.length(); left > 0;) {
if (MDBX_UNLIKELY(*src <= ' ') && if (MDBX_UNLIKELY(*src <= ' ') && MDBX_LIKELY(ignore_spaces && isspace(*src))) {
MDBX_LIKELY(ignore_spaces && isspace(*src))) {
++src; ++src;
--left; --left;
continue; continue;
@ -1147,8 +1112,7 @@ char *from_base64::write_bytes(char *__restrict const dest,
bailout: bailout:
throw std::domain_error("mdbx::from_base64:: invalid base64 string"); throw std::domain_error("mdbx::from_base64:: invalid base64 string");
} }
const signed char a = b64_map[src[0]], b = b64_map[src[1]], const signed char a = b64_map[src[0]], b = b64_map[src[1]], c = b64_map[src[2]], d = b64_map[src[3]];
c = b64_map[src[2]], d = b64_map[src[3]];
if (MDBX_UNLIKELY(b64_4to3(a, b, c, d, ptr) < 0)) { if (MDBX_UNLIKELY(b64_4to3(a, b, c, d, ptr) < 0)) {
if (left == 4 && (a | b) >= 0 && d == EQ) { if (left == 4 && (a | b) >= 0 && d == EQ) {
if (c >= 0) { if (c >= 0) {
@ -1177,8 +1141,7 @@ bool from_base64::is_erroneous() const noexcept {
bool got = false; bool got = false;
auto src = source.byte_ptr(); auto src = source.byte_ptr();
for (auto left = source.length(); left > 0;) { for (auto left = source.length(); left > 0;) {
if (MDBX_UNLIKELY(*src <= ' ') && if (MDBX_UNLIKELY(*src <= ' ') && MDBX_LIKELY(ignore_spaces && isspace(*src))) {
MDBX_LIKELY(ignore_spaces && isspace(*src))) {
++src; ++src;
--left; --left;
continue; continue;
@ -1186,8 +1149,7 @@ bool from_base64::is_erroneous() const noexcept {
if (MDBX_UNLIKELY(left < 3)) if (MDBX_UNLIKELY(left < 3))
MDBX_CXX20_UNLIKELY return false; MDBX_CXX20_UNLIKELY return false;
const signed char a = b64_map[src[0]], b = b64_map[src[1]], const signed char a = b64_map[src[0]], b = b64_map[src[1]], c = b64_map[src[2]], d = b64_map[src[3]];
c = b64_map[src[2]], d = b64_map[src[3]];
if (MDBX_UNLIKELY((a | b | c | d) < 0)) if (MDBX_UNLIKELY((a | b | c | d) < 0))
MDBX_CXX20_UNLIKELY { MDBX_CXX20_UNLIKELY {
if (left == 4 && (a | b) >= 0 && d == EQ && (c >= 0 || c == d)) if (left == 4 && (a | b) >= 0 && d == EQ && (c >= 0 || c == d))
@ -1205,8 +1167,7 @@ bool from_base64::is_erroneous() const noexcept {
template class LIBMDBX_API_TYPE buffer<legacy_allocator>; template class LIBMDBX_API_TYPE buffer<legacy_allocator>;
#if defined(__cpp_lib_memory_resource) && \ #if defined(__cpp_lib_memory_resource) && __cpp_lib_memory_resource >= 201603L && _GLIBCXX_USE_CXX11_ABI
__cpp_lib_memory_resource >= 201603L && _GLIBCXX_USE_CXX11_ABI
template class LIBMDBX_API_TYPE buffer<polymorphic_allocator>; template class LIBMDBX_API_TYPE buffer<polymorphic_allocator>;
#endif /* __cpp_lib_memory_resource >= 201603L */ #endif /* __cpp_lib_memory_resource >= 201603L */
@ -1225,8 +1186,7 @@ static inline MDBX_env_flags_t mode2flags(env::mode mode) {
} }
} }
__cold MDBX_env_flags_t __cold MDBX_env_flags_t env::operate_parameters::make_flags(bool accede, bool use_subdirectory) const {
env::operate_parameters::make_flags(bool accede, bool use_subdirectory) const {
MDBX_env_flags_t flags = mode2flags(mode); MDBX_env_flags_t flags = mode2flags(mode);
if (accede) if (accede)
flags |= MDBX_ACCEDE; flags |= MDBX_ACCEDE;
@ -1252,8 +1212,7 @@ env::operate_parameters::make_flags(bool accede, bool use_subdirectory) const {
flags |= MDBX_LIFORECLAIM; flags |= MDBX_LIFORECLAIM;
switch (durability) { switch (durability) {
default: default:
MDBX_CXX20_UNLIKELY throw std::invalid_argument( MDBX_CXX20_UNLIKELY throw std::invalid_argument("db::durability is invalid");
"db::durability is invalid");
case env::durability::robust_synchronous: case env::durability::robust_synchronous:
break; break;
case env::durability::half_synchronous_weak_last: case env::durability::half_synchronous_weak_last:
@ -1271,16 +1230,13 @@ env::operate_parameters::make_flags(bool accede, bool use_subdirectory) const {
return flags; return flags;
} }
env::mode env::mode env::operate_parameters::mode_from_flags(MDBX_env_flags_t flags) noexcept {
env::operate_parameters::mode_from_flags(MDBX_env_flags_t flags) noexcept {
if (flags & MDBX_RDONLY) if (flags & MDBX_RDONLY)
return env::mode::readonly; return env::mode::readonly;
return (flags & MDBX_WRITEMAP) ? env::mode::write_mapped_io return (flags & MDBX_WRITEMAP) ? env::mode::write_mapped_io : env::mode::write_file_io;
: env::mode::write_file_io;
} }
env::durability env::operate_parameters::durability_from_flags( env::durability env::operate_parameters::durability_from_flags(MDBX_env_flags_t flags) noexcept {
MDBX_env_flags_t flags) noexcept {
if ((flags & MDBX_UTTERLY_NOSYNC) == MDBX_UTTERLY_NOSYNC) if ((flags & MDBX_UTTERLY_NOSYNC) == MDBX_UTTERLY_NOSYNC)
return env::durability::whole_fragile; return env::durability::whole_fragile;
if (flags & MDBX_SAFE_NOSYNC) if (flags & MDBX_SAFE_NOSYNC)
@ -1291,71 +1247,51 @@ env::durability env::operate_parameters::durability_from_flags(
} }
env::reclaiming_options::reclaiming_options(MDBX_env_flags_t flags) noexcept env::reclaiming_options::reclaiming_options(MDBX_env_flags_t flags) noexcept
: lifo((flags & MDBX_LIFORECLAIM) ? true : false), : lifo((flags & MDBX_LIFORECLAIM) ? true : false), coalesce((flags & MDBX_COALESCE) ? true : false) {}
coalesce((flags & MDBX_COALESCE) ? true : false) {}
env::operate_options::operate_options(MDBX_env_flags_t flags) noexcept env::operate_options::operate_options(MDBX_env_flags_t flags) noexcept
: no_sticky_threads(((flags & (MDBX_NOSTICKYTHREADS | MDBX_EXCLUSIVE)) == : no_sticky_threads(((flags & (MDBX_NOSTICKYTHREADS | MDBX_EXCLUSIVE)) == MDBX_NOSTICKYTHREADS) ? true : false),
MDBX_NOSTICKYTHREADS) nested_write_transactions((flags & (MDBX_WRITEMAP | MDBX_RDONLY)) ? false : true),
? true exclusive((flags & MDBX_EXCLUSIVE) ? true : false), disable_readahead((flags & MDBX_NORDAHEAD) ? true : false),
: false),
nested_write_transactions((flags & (MDBX_WRITEMAP | MDBX_RDONLY)) ? false
: true),
exclusive((flags & MDBX_EXCLUSIVE) ? true : false),
disable_readahead((flags & MDBX_NORDAHEAD) ? true : false),
disable_clear_memory((flags & MDBX_NOMEMINIT) ? true : false) {} disable_clear_memory((flags & MDBX_NOMEMINIT) ? true : false) {}
bool env::is_pristine() const { bool env::is_pristine() const { return get_stat().ms_mod_txnid == 0 && get_info().mi_recent_txnid == INITIAL_TXNID; }
return get_stat().ms_mod_txnid == 0 &&
get_info().mi_recent_txnid == INITIAL_TXNID;
}
bool env::is_empty() const { return get_stat().ms_leaf_pages == 0; } bool env::is_empty() const { return get_stat().ms_leaf_pages == 0; }
__cold env &env::copy(filehandle fd, bool compactify, bool force_dynamic_size) { __cold env &env::copy(filehandle fd, bool compactify, bool force_dynamic_size) {
error::success_or_throw( error::success_or_throw(::mdbx_env_copy2fd(handle_, fd,
::mdbx_env_copy2fd(handle_, fd, (compactify ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) |
(compactify ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) | (force_dynamic_size ? MDBX_CP_FORCE_DYNAMIC_SIZE : MDBX_CP_DEFAULTS)));
(force_dynamic_size ? MDBX_CP_FORCE_DYNAMIC_SIZE
: MDBX_CP_DEFAULTS)));
return *this; return *this;
} }
__cold env &env::copy(const char *destination, bool compactify, __cold env &env::copy(const char *destination, bool compactify, bool force_dynamic_size) {
bool force_dynamic_size) { error::success_or_throw(::mdbx_env_copy(handle_, destination,
error::success_or_throw( (compactify ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) |
::mdbx_env_copy(handle_, destination, (force_dynamic_size ? MDBX_CP_FORCE_DYNAMIC_SIZE : MDBX_CP_DEFAULTS)));
(compactify ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) |
(force_dynamic_size ? MDBX_CP_FORCE_DYNAMIC_SIZE
: MDBX_CP_DEFAULTS)));
return *this; return *this;
} }
__cold env &env::copy(const ::std::string &destination, bool compactify, __cold env &env::copy(const ::std::string &destination, bool compactify, bool force_dynamic_size) {
bool force_dynamic_size) {
return copy(destination.c_str(), compactify, force_dynamic_size); return copy(destination.c_str(), compactify, force_dynamic_size);
} }
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
__cold env &env::copy(const wchar_t *destination, bool compactify, __cold env &env::copy(const wchar_t *destination, bool compactify, bool force_dynamic_size) {
bool force_dynamic_size) { error::success_or_throw(::mdbx_env_copyW(handle_, destination,
error::success_or_throw( (compactify ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) |
::mdbx_env_copyW(handle_, destination, (force_dynamic_size ? MDBX_CP_FORCE_DYNAMIC_SIZE : MDBX_CP_DEFAULTS)));
(compactify ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) |
(force_dynamic_size ? MDBX_CP_FORCE_DYNAMIC_SIZE
: MDBX_CP_DEFAULTS)));
return *this; return *this;
} }
env &env::copy(const ::std::wstring &destination, bool compactify, env &env::copy(const ::std::wstring &destination, bool compactify, bool force_dynamic_size) {
bool force_dynamic_size) {
return copy(destination.c_str(), compactify, force_dynamic_size); return copy(destination.c_str(), compactify, force_dynamic_size);
} }
#endif /* Windows */ #endif /* Windows */
#ifdef MDBX_STD_FILESYSTEM_PATH #ifdef MDBX_STD_FILESYSTEM_PATH
__cold env &env::copy(const MDBX_STD_FILESYSTEM_PATH &destination, __cold env &env::copy(const MDBX_STD_FILESYSTEM_PATH &destination, bool compactify, bool force_dynamic_size) {
bool compactify, bool force_dynamic_size) {
return copy(destination.native(), compactify, force_dynamic_size); return copy(destination.native(), compactify, force_dynamic_size);
} }
#endif /* MDBX_STD_FILESYSTEM_PATH */ #endif /* MDBX_STD_FILESYSTEM_PATH */
@ -1375,8 +1311,7 @@ __cold path env::get_path() const {
} }
__cold bool env::remove(const char *pathname, const remove_mode mode) { __cold bool env::remove(const char *pathname, const remove_mode mode) {
return !error::boolean_or_throw( return !error::boolean_or_throw(::mdbx_env_delete(pathname, MDBX_env_delete_mode_t(mode)));
::mdbx_env_delete(pathname, MDBX_env_delete_mode_t(mode)));
} }
__cold bool env::remove(const ::std::string &pathname, const remove_mode mode) { __cold bool env::remove(const ::std::string &pathname, const remove_mode mode) {
@ -1385,19 +1320,16 @@ __cold bool env::remove(const ::std::string &pathname, const remove_mode mode) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
__cold bool env::remove(const wchar_t *pathname, const remove_mode mode) { __cold bool env::remove(const wchar_t *pathname, const remove_mode mode) {
return !error::boolean_or_throw( return !error::boolean_or_throw(::mdbx_env_deleteW(pathname, MDBX_env_delete_mode_t(mode)));
::mdbx_env_deleteW(pathname, MDBX_env_delete_mode_t(mode)));
} }
__cold bool env::remove(const ::std::wstring &pathname, __cold bool env::remove(const ::std::wstring &pathname, const remove_mode mode) {
const remove_mode mode) {
return remove(pathname.c_str(), mode); return remove(pathname.c_str(), mode);
} }
#endif /* Windows */ #endif /* Windows */
#ifdef MDBX_STD_FILESYSTEM_PATH #ifdef MDBX_STD_FILESYSTEM_PATH
__cold bool env::remove(const MDBX_STD_FILESYSTEM_PATH &pathname, __cold bool env::remove(const MDBX_STD_FILESYSTEM_PATH &pathname, const remove_mode mode) {
const remove_mode mode) {
return remove(pathname.native(), mode); return remove(pathname.native(), mode);
} }
#endif /* MDBX_STD_FILESYSTEM_PATH */ #endif /* MDBX_STD_FILESYSTEM_PATH */
@ -1413,13 +1345,11 @@ static inline MDBX_env *create_env() {
__cold env_managed::~env_managed() noexcept { __cold env_managed::~env_managed() noexcept {
if (MDBX_UNLIKELY(handle_)) if (MDBX_UNLIKELY(handle_))
MDBX_CXX20_UNLIKELY error::success_or_panic( MDBX_CXX20_UNLIKELY error::success_or_panic(::mdbx_env_close(handle_), "mdbx::~env()", "mdbx_env_close");
::mdbx_env_close(handle_), "mdbx::~env()", "mdbx_env_close");
} }
__cold void env_managed::close(bool dont_sync) { __cold void env_managed::close(bool dont_sync) {
const error rc = const error rc = static_cast<MDBX_error_t>(::mdbx_env_close_ex(handle_, dont_sync));
static_cast<MDBX_error_t>(::mdbx_env_close_ex(handle_, dont_sync));
switch (rc.code()) { switch (rc.code()) {
case MDBX_EBADSIGN: case MDBX_EBADSIGN:
MDBX_CXX20_UNLIKELY handle_ = nullptr; MDBX_CXX20_UNLIKELY handle_ = nullptr;
@ -1438,87 +1368,69 @@ __cold void env_managed::setup(unsigned max_maps, unsigned max_readers) {
error::success_or_throw(::mdbx_env_set_maxdbs(handle_, max_maps)); error::success_or_throw(::mdbx_env_set_maxdbs(handle_, max_maps));
} }
__cold env_managed::env_managed(const char *pathname, __cold env_managed::env_managed(const char *pathname, const operate_parameters &op, bool accede)
const operate_parameters &op, bool accede)
: env_managed(create_env()) { : env_managed(create_env()) {
setup(op.max_maps, op.max_readers); setup(op.max_maps, op.max_readers);
error::success_or_throw( error::success_or_throw(::mdbx_env_open(handle_, pathname, op.make_flags(accede), 0));
::mdbx_env_open(handle_, pathname, op.make_flags(accede), 0));
if (op.options.nested_write_transactions && if (op.options.nested_write_transactions && !get_options().nested_write_transactions)
!get_options().nested_write_transactions)
MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE); MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE);
} }
__cold env_managed::env_managed(const char *pathname, __cold env_managed::env_managed(const char *pathname, const env_managed::create_parameters &cp,
const env_managed::create_parameters &cp,
const env::operate_parameters &op, bool accede) const env::operate_parameters &op, bool accede)
: env_managed(create_env()) { : env_managed(create_env()) {
setup(op.max_maps, op.max_readers); setup(op.max_maps, op.max_readers);
set_geometry(cp.geometry); set_geometry(cp.geometry);
error::success_or_throw(::mdbx_env_open( error::success_or_throw(
handle_, pathname, op.make_flags(accede, cp.use_subdirectory), ::mdbx_env_open(handle_, pathname, op.make_flags(accede, cp.use_subdirectory), cp.file_mode_bits));
cp.file_mode_bits));
if (op.options.nested_write_transactions && if (op.options.nested_write_transactions && !get_options().nested_write_transactions)
!get_options().nested_write_transactions)
MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE); MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE);
} }
__cold env_managed::env_managed(const ::std::string &pathname, __cold env_managed::env_managed(const ::std::string &pathname, const operate_parameters &op, bool accede)
const operate_parameters &op, bool accede)
: env_managed(pathname.c_str(), op, accede) {} : env_managed(pathname.c_str(), op, accede) {}
__cold env_managed::env_managed(const ::std::string &pathname, __cold env_managed::env_managed(const ::std::string &pathname, const env_managed::create_parameters &cp,
const env_managed::create_parameters &cp,
const env::operate_parameters &op, bool accede) const env::operate_parameters &op, bool accede)
: env_managed(pathname.c_str(), cp, op, accede) {} : env_managed(pathname.c_str(), cp, op, accede) {}
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
__cold env_managed::env_managed(const wchar_t *pathname, __cold env_managed::env_managed(const wchar_t *pathname, const operate_parameters &op, bool accede)
const operate_parameters &op, bool accede)
: env_managed(create_env()) { : env_managed(create_env()) {
setup(op.max_maps, op.max_readers); setup(op.max_maps, op.max_readers);
error::success_or_throw( error::success_or_throw(::mdbx_env_openW(handle_, pathname, op.make_flags(accede), 0));
::mdbx_env_openW(handle_, pathname, op.make_flags(accede), 0));
if (op.options.nested_write_transactions && if (op.options.nested_write_transactions && !get_options().nested_write_transactions)
!get_options().nested_write_transactions)
MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE); MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE);
} }
__cold env_managed::env_managed(const wchar_t *pathname, __cold env_managed::env_managed(const wchar_t *pathname, const env_managed::create_parameters &cp,
const env_managed::create_parameters &cp,
const env::operate_parameters &op, bool accede) const env::operate_parameters &op, bool accede)
: env_managed(create_env()) { : env_managed(create_env()) {
setup(op.max_maps, op.max_readers); setup(op.max_maps, op.max_readers);
set_geometry(cp.geometry); set_geometry(cp.geometry);
error::success_or_throw(::mdbx_env_openW( error::success_or_throw(
handle_, pathname, op.make_flags(accede, cp.use_subdirectory), ::mdbx_env_openW(handle_, pathname, op.make_flags(accede, cp.use_subdirectory), cp.file_mode_bits));
cp.file_mode_bits));
if (op.options.nested_write_transactions && if (op.options.nested_write_transactions && !get_options().nested_write_transactions)
!get_options().nested_write_transactions)
MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE); MDBX_CXX20_UNLIKELY error::throw_exception(MDBX_INCOMPATIBLE);
} }
__cold env_managed::env_managed(const ::std::wstring &pathname, __cold env_managed::env_managed(const ::std::wstring &pathname, const operate_parameters &op, bool accede)
const operate_parameters &op, bool accede)
: env_managed(pathname.c_str(), op, accede) {} : env_managed(pathname.c_str(), op, accede) {}
__cold env_managed::env_managed(const ::std::wstring &pathname, __cold env_managed::env_managed(const ::std::wstring &pathname, const env_managed::create_parameters &cp,
const env_managed::create_parameters &cp,
const env::operate_parameters &op, bool accede) const env::operate_parameters &op, bool accede)
: env_managed(pathname.c_str(), cp, op, accede) {} : env_managed(pathname.c_str(), cp, op, accede) {}
#endif /* Windows */ #endif /* Windows */
#ifdef MDBX_STD_FILESYSTEM_PATH #ifdef MDBX_STD_FILESYSTEM_PATH
__cold env_managed::env_managed(const MDBX_STD_FILESYSTEM_PATH &pathname, __cold env_managed::env_managed(const MDBX_STD_FILESYSTEM_PATH &pathname, const operate_parameters &op, bool accede)
const operate_parameters &op, bool accede)
: env_managed(pathname.native(), op, accede) {} : env_managed(pathname.native(), op, accede) {}
__cold env_managed::env_managed(const MDBX_STD_FILESYSTEM_PATH &pathname, __cold env_managed::env_managed(const MDBX_STD_FILESYSTEM_PATH &pathname, const env_managed::create_parameters &cp,
const env_managed::create_parameters &cp,
const env::operate_parameters &op, bool accede) const env::operate_parameters &op, bool accede)
: env_managed(pathname.native(), cp, op, accede) {} : env_managed(pathname.native(), cp, op, accede) {}
#endif /* MDBX_STD_FILESYSTEM_PATH */ #endif /* MDBX_STD_FILESYSTEM_PATH */
@ -1528,16 +1440,14 @@ __cold env_managed::env_managed(const MDBX_STD_FILESYSTEM_PATH &pathname,
txn_managed txn::start_nested() { txn_managed txn::start_nested() {
MDBX_txn *nested; MDBX_txn *nested;
error::throw_on_nullptr(handle_, MDBX_BAD_TXN); error::throw_on_nullptr(handle_, MDBX_BAD_TXN);
error::success_or_throw(::mdbx_txn_begin(mdbx_txn_env(handle_), handle_, error::success_or_throw(::mdbx_txn_begin(mdbx_txn_env(handle_), handle_, MDBX_TXN_READWRITE, &nested));
MDBX_TXN_READWRITE, &nested));
assert(nested != nullptr); assert(nested != nullptr);
return txn_managed(nested); return txn_managed(nested);
} }
txn_managed::~txn_managed() noexcept { txn_managed::~txn_managed() noexcept {
if (MDBX_UNLIKELY(handle_)) if (MDBX_UNLIKELY(handle_))
MDBX_CXX20_UNLIKELY error::success_or_panic(::mdbx_txn_abort(handle_), MDBX_CXX20_UNLIKELY error::success_or_panic(::mdbx_txn_abort(handle_), "mdbx::~txn", "mdbx_txn_abort");
"mdbx::~txn", "mdbx_txn_abort");
} }
void txn_managed::abort() { void txn_managed::abort() {
@ -1557,8 +1467,7 @@ void txn_managed::commit() {
} }
void txn_managed::commit(commit_latency *latency) { void txn_managed::commit(commit_latency *latency) {
const error err = const error err = static_cast<MDBX_error_t>(::mdbx_txn_commit_ex(handle_, latency));
static_cast<MDBX_error_t>(::mdbx_txn_commit_ex(handle_, latency));
if (MDBX_LIKELY(err.code() != MDBX_THREAD_MISMATCH)) if (MDBX_LIKELY(err.code() != MDBX_THREAD_MISMATCH))
MDBX_CXX20_LIKELY handle_ = nullptr; MDBX_CXX20_LIKELY handle_ = nullptr;
if (MDBX_UNLIKELY(err.code() != MDBX_SUCCESS)) if (MDBX_UNLIKELY(err.code() != MDBX_SUCCESS))
@ -1568,8 +1477,7 @@ void txn_managed::commit(commit_latency *latency) {
void txn_managed::commit_embark_read() { void txn_managed::commit_embark_read() {
auto env = this->env(); auto env = this->env();
commit(); commit();
error::success_or_throw( error::success_or_throw(::mdbx_txn_begin(env, nullptr, MDBX_TXN_RDONLY, &handle_));
::mdbx_txn_begin(env, nullptr, MDBX_TXN_RDONLY, &handle_));
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -1608,8 +1516,7 @@ __cold bool txn::clear_map(const char *name, bool throw_if_absent) {
} }
} }
__cold bool txn::rename_map(const char *old_name, const char *new_name, __cold bool txn::rename_map(const char *old_name, const char *new_name, bool throw_if_absent) {
bool throw_if_absent) {
map_handle map; map_handle map;
const int err = ::mdbx_dbi_open(handle_, old_name, MDBX_DB_ACCEDE, &map.dbi); const int err = ::mdbx_dbi_open(handle_, old_name, MDBX_DB_ACCEDE, &map.dbi);
switch (err) { switch (err) {
@ -1660,9 +1567,7 @@ __cold bool txn::clear_map(const ::mdbx::slice &name, bool throw_if_absent) {
} }
} }
__cold bool txn::rename_map(const ::mdbx::slice &old_name, __cold bool txn::rename_map(const ::mdbx::slice &old_name, const ::mdbx::slice &new_name, bool throw_if_absent) {
const ::mdbx::slice &new_name,
bool throw_if_absent) {
map_handle map; map_handle map;
const int err = ::mdbx_dbi_open2(handle_, old_name, MDBX_DB_ACCEDE, &map.dbi); const int err = ::mdbx_dbi_open2(handle_, old_name, MDBX_DB_ACCEDE, &map.dbi);
switch (err) { switch (err) {
@ -1679,11 +1584,8 @@ __cold bool txn::rename_map(const ::mdbx::slice &old_name,
} }
} }
__cold bool txn::rename_map(const ::std::string &old_name, __cold bool txn::rename_map(const ::std::string &old_name, const ::std::string &new_name, bool throw_if_absent) {
const ::std::string &new_name, return rename_map(::mdbx::slice(old_name), ::mdbx::slice(new_name), throw_if_absent);
bool throw_if_absent) {
return rename_map(::mdbx::slice(old_name), ::mdbx::slice(new_name),
throw_if_absent);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -1723,12 +1625,10 @@ __cold ::std::ostream &operator<<(::std::ostream &out, const pair &it) {
} }
__cold ::std::ostream &operator<<(::std::ostream &out, const pair_result &it) { __cold ::std::ostream &operator<<(::std::ostream &out, const pair_result &it) {
return out << "{" << (it.done ? "done: " : "non-done: ") << it.key << " => " return out << "{" << (it.done ? "done: " : "non-done: ") << it.key << " => " << it.value << "}";
<< it.value << "}";
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const ::mdbx::env::geometry::size &it) {
const ::mdbx::env::geometry::size &it) {
switch (it.bytes) { switch (it.bytes) {
case ::mdbx::env::geometry::default_value: case ::mdbx::env::geometry::default_value:
return out << "default"; return out << "default";
@ -1738,8 +1638,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
return out << "maximal"; return out << "maximal";
} }
const auto bytes = (it.bytes < 0) ? out << "-", const auto bytes = (it.bytes < 0) ? out << "-", size_t(-it.bytes) : size_t(it.bytes);
size_t(-it.bytes) : size_t(it.bytes);
struct { struct {
size_t one; size_t one;
const char *suffix; const char *suffix;
@ -1769,8 +1668,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
return out; return out;
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const env::geometry &it) {
const env::geometry &it) {
return // return //
out << "\tlower " << env::geometry::size(it.size_lower) // out << "\tlower " << env::geometry::size(it.size_lower) //
<< ",\n\tnow " << env::geometry::size(it.size_now) // << ",\n\tnow " << env::geometry::size(it.size_now) //
@ -1780,8 +1678,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
<< ",\n\tpagesize " << env::geometry::size(it.pagesize) << "\n"; << ",\n\tpagesize " << env::geometry::size(it.pagesize) << "\n";
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const env::operate_parameters &it) {
const env::operate_parameters &it) {
return out << "{\n" // return out << "{\n" //
<< "\tmax_maps " << it.max_maps // << "\tmax_maps " << it.max_maps //
<< ",\n\tmax_readers " << it.max_readers // << ",\n\tmax_readers " << it.max_readers //
@ -1805,8 +1702,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out, const env::mode &it) {
} }
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const env::durability &it) {
const env::durability &it) {
switch (it) { switch (it) {
case env::durability::robust_synchronous: case env::durability::robust_synchronous:
return out << "robust_synchronous"; return out << "robust_synchronous";
@ -1821,16 +1717,14 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
} }
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const env::reclaiming_options &it) {
const env::reclaiming_options &it) {
return out << "{" // return out << "{" //
<< "lifo: " << (it.lifo ? "yes" : "no") // << "lifo: " << (it.lifo ? "yes" : "no") //
<< ", coalesce: " << (it.coalesce ? "yes" : "no") // << ", coalesce: " << (it.coalesce ? "yes" : "no") //
<< "}"; << "}";
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const env::operate_options &it) {
const env::operate_options &it) {
static const char comma[] = ", "; static const char comma[] = ", ";
const char *delimiter = ""; const char *delimiter = "";
out << "{"; out << "{";
@ -1859,8 +1753,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
return out << "}"; return out << "}";
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const env_managed::create_parameters &it) {
const env_managed::create_parameters &it) {
return out << "{\n" // return out << "{\n" //
<< "\tfile_mode " << std::oct << it.file_mode_bits << std::dec // << "\tfile_mode " << std::oct << it.file_mode_bits << std::dec //
<< ",\n\tsubdirectory " << (it.use_subdirectory ? "yes" : "no") // << ",\n\tsubdirectory " << (it.use_subdirectory ? "yes" : "no") //
@ -1868,8 +1761,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
<< it.geometry << "}"; << it.geometry << "}";
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const MDBX_log_level_t &it) {
const MDBX_log_level_t &it) {
switch (it) { switch (it) {
case MDBX_LOG_FATAL: case MDBX_LOG_FATAL:
return out << "LOG_FATAL"; return out << "LOG_FATAL";
@ -1894,8 +1786,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
} }
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const MDBX_debug_flags_t &it) {
const MDBX_debug_flags_t &it) {
if (it == MDBX_DBG_DONTCHANGE) if (it == MDBX_DBG_DONTCHANGE)
return out << "DBG_DONTCHANGE"; return out << "DBG_DONTCHANGE";
@ -1931,8 +1822,7 @@ __cold ::std::ostream &operator<<(::std::ostream &out,
return out << "}"; return out << "}";
} }
__cold ::std::ostream &operator<<(::std::ostream &out, __cold ::std::ostream &operator<<(::std::ostream &out, const ::mdbx::error &err) {
const ::mdbx::error &err) {
return out << err.what() << " (" << long(err.code()) << ")"; return out << err.what() << " (" << long(err.code()) << ")";
} }

View File

@ -9,15 +9,11 @@ typedef struct meta_snap {
} meta_snap_t; } meta_snap_t;
static inline txnid_t fetch_txnid(const volatile mdbx_atomic_uint32_t *ptr) { static inline txnid_t fetch_txnid(const volatile mdbx_atomic_uint32_t *ptr) {
#if (defined(__amd64__) || defined(__e2k__)) && !defined(ENABLE_UBSAN) && \ #if (defined(__amd64__) || defined(__e2k__)) && !defined(ENABLE_UBSAN) && MDBX_UNALIGNED_OK >= 8
MDBX_UNALIGNED_OK >= 8 return atomic_load64((const volatile mdbx_atomic_uint64_t *)ptr, mo_AcquireRelease);
return atomic_load64((const volatile mdbx_atomic_uint64_t *)ptr,
mo_AcquireRelease);
#else #else
const uint32_t l = atomic_load32( const uint32_t l = atomic_load32(&ptr[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], mo_AcquireRelease);
&ptr[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], mo_AcquireRelease); const uint32_t h = atomic_load32(&ptr[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__], mo_AcquireRelease);
const uint32_t h = atomic_load32(
&ptr[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__], mo_AcquireRelease);
return (uint64_t)h << 32 | l; return (uint64_t)h << 32 | l;
#endif #endif
} }
@ -33,9 +29,7 @@ static inline meta_snap_t meta_snap(const volatile meta_t *meta) {
return r; return r;
} }
txnid_t meta_txnid(const volatile meta_t *meta) { txnid_t meta_txnid(const volatile meta_t *meta) { return meta_snap(meta).txnid; }
return meta_snap(meta).txnid;
}
meta_ptr_t meta_ptr(const MDBX_env *env, unsigned n) { meta_ptr_t meta_ptr(const MDBX_env *env, unsigned n) {
eASSERT(env, n < NUM_METAS); eASSERT(env, n < NUM_METAS);
@ -46,16 +40,13 @@ meta_ptr_t meta_ptr(const MDBX_env *env, unsigned n) {
return r; return r;
} }
static uint8_t meta_cmp2pack(uint8_t c01, uint8_t c02, uint8_t c12, bool s0, static uint8_t meta_cmp2pack(uint8_t c01, uint8_t c02, uint8_t c12, bool s0, bool s1, bool s2) {
bool s1, bool s2) {
assert(c01 < 3 && c02 < 3 && c12 < 3); assert(c01 < 3 && c02 < 3 && c12 < 3);
/* assert(s0 < 2 && s1 < 2 && s2 < 2); */ /* assert(s0 < 2 && s1 < 2 && s2 < 2); */
const uint8_t recent = meta_cmp2recent(c01, s0, s1) const uint8_t recent =
? (meta_cmp2recent(c02, s0, s2) ? 0 : 2) meta_cmp2recent(c01, s0, s1) ? (meta_cmp2recent(c02, s0, s2) ? 0 : 2) : (meta_cmp2recent(c12, s1, s2) ? 1 : 2);
: (meta_cmp2recent(c12, s1, s2) ? 1 : 2); const uint8_t prefer_steady =
const uint8_t prefer_steady = meta_cmp2steady(c01, s0, s1) meta_cmp2steady(c01, s0, s1) ? (meta_cmp2steady(c02, s0, s2) ? 0 : 2) : (meta_cmp2steady(c12, s1, s2) ? 1 : 2);
? (meta_cmp2steady(c02, s0, s2) ? 0 : 2)
: (meta_cmp2steady(c12, s1, s2) ? 1 : 2);
uint8_t tail; uint8_t tail;
if (recent == 0) if (recent == 0)
@ -65,10 +56,8 @@ static uint8_t meta_cmp2pack(uint8_t c01, uint8_t c02, uint8_t c12, bool s0,
else else
tail = meta_cmp2steady(c01, s0, s1) ? 1 : 0; tail = meta_cmp2steady(c01, s0, s1) ? 1 : 0;
const bool valid = const bool valid = c01 != 1 || s0 != s1 || c02 != 1 || s0 != s2 || c12 != 1 || s1 != s2;
c01 != 1 || s0 != s1 || c02 != 1 || s0 != s2 || c12 != 1 || s1 != s2; const bool strict = (c01 != 1 || s0 != s1) && (c02 != 1 || s0 != s2) && (c12 != 1 || s1 != s2);
const bool strict = (c01 != 1 || s0 != s1) && (c02 != 1 || s0 != s2) &&
(c12 != 1 || s1 != s2);
return tail | recent << 2 | prefer_steady << 4 | strict << 6 | valid << 7; return tail | recent << 2 | prefer_steady << 4 | strict << 6 | valid << 7;
} }
@ -82,21 +71,16 @@ static inline void meta_troika_unpack(troika_t *troika, const uint8_t packed) {
} }
static const uint8_t troika_fsm_map[2 * 2 * 2 * 3 * 3 * 3] = { static const uint8_t troika_fsm_map[2 * 2 * 2 * 3 * 3 * 3] = {
232, 201, 216, 216, 232, 233, 232, 232, 168, 201, 216, 152, 168, 233, 232, 232, 201, 216, 216, 232, 233, 232, 232, 168, 201, 216, 152, 168, 233, 232, 168, 233, 201, 216, 201, 233, 233,
168, 233, 201, 216, 201, 233, 233, 232, 233, 168, 201, 152, 216, 232, 169, 232, 233, 168, 201, 152, 216, 232, 169, 232, 168, 168, 193, 152, 152, 168, 169, 232, 168, 169, 193, 152, 194,
232, 168, 168, 193, 152, 152, 168, 169, 232, 168, 169, 193, 152, 194, 233, 233, 169, 232, 169, 232, 201, 216, 216, 232, 201, 232, 232, 168, 193, 216, 152, 168, 193, 232, 168, 193, 193,
169, 232, 169, 232, 201, 216, 216, 232, 201, 232, 232, 168, 193, 216, 152, 210, 194, 225, 193, 225, 193, 168, 137, 212, 214, 232, 233, 168, 168, 168, 137, 212, 150, 168, 233, 168, 168,
168, 193, 232, 168, 193, 193, 210, 194, 225, 193, 225, 193, 168, 137, 212, 169, 137, 216, 201, 233, 233, 168, 169, 168, 137, 148, 214, 232, 169, 168, 168, 40, 129, 148, 150, 168, 169,
214, 232, 233, 168, 168, 168, 137, 212, 150, 168, 233, 168, 168, 169, 137, 168, 40, 169, 129, 152, 194, 233, 169, 168, 169, 168, 137, 214, 214, 232, 201, 168, 168, 168, 129, 214, 150,
216, 201, 233, 233, 168, 169, 168, 137, 148, 214, 232, 169, 168, 168, 40, 168, 193, 168, 168, 129, 129, 210, 194, 225, 193, 161, 129, 212, 198, 212, 214, 228, 228, 212, 212, 148, 201,
129, 148, 150, 168, 169, 168, 40, 169, 129, 152, 194, 233, 169, 168, 169, 212, 150, 164, 233, 212, 148, 233, 201, 216, 201, 233, 233, 216, 233, 148, 198, 148, 214, 228, 164, 212, 148,
168, 137, 214, 214, 232, 201, 168, 168, 168, 129, 214, 150, 168, 193, 168, 148, 194, 148, 150, 164, 169, 212, 148, 169, 194, 152, 194, 233, 169, 216, 169, 214, 198, 214, 214, 228, 198,
168, 129, 129, 210, 194, 225, 193, 161, 129, 212, 198, 212, 214, 228, 228, 212, 214, 150, 194, 214, 150, 164, 193, 212, 150, 194, 194, 210, 194, 225, 193, 210, 194};
212, 212, 148, 201, 212, 150, 164, 233, 212, 148, 233, 201, 216, 201, 233,
233, 216, 233, 148, 198, 148, 214, 228, 164, 212, 148, 148, 194, 148, 150,
164, 169, 212, 148, 169, 194, 152, 194, 233, 169, 216, 169, 214, 198, 214,
214, 228, 198, 212, 214, 150, 194, 214, 150, 164, 193, 212, 150, 194, 194,
210, 194, 225, 193, 210, 194};
__cold bool troika_verify_fsm(void) { __cold bool troika_verify_fsm(void) {
bool ok = true; bool ok = true;
@ -117,12 +101,10 @@ __cold bool troika_verify_fsm(void) {
const bool strict = TROIKA_STRICT_VALID(&troika); const bool strict = TROIKA_STRICT_VALID(&troika);
const bool valid = TROIKA_VALID(&troika); const bool valid = TROIKA_VALID(&troika);
const uint8_t recent_chk = meta_cmp2recent(c01, s0, s1) const uint8_t recent_chk =
? (meta_cmp2recent(c02, s0, s2) ? 0 : 2) meta_cmp2recent(c01, s0, s1) ? (meta_cmp2recent(c02, s0, s2) ? 0 : 2) : (meta_cmp2recent(c12, s1, s2) ? 1 : 2);
: (meta_cmp2recent(c12, s1, s2) ? 1 : 2);
const uint8_t prefer_steady_chk = const uint8_t prefer_steady_chk =
meta_cmp2steady(c01, s0, s1) ? (meta_cmp2steady(c02, s0, s2) ? 0 : 2) meta_cmp2steady(c01, s0, s1) ? (meta_cmp2steady(c02, s0, s2) ? 0 : 2) : (meta_cmp2steady(c12, s1, s2) ? 1 : 2);
: (meta_cmp2steady(c12, s1, s2) ? 1 : 2);
uint8_t tail_chk; uint8_t tail_chk;
if (recent_chk == 0) if (recent_chk == 0)
@ -132,20 +114,16 @@ __cold bool troika_verify_fsm(void) {
else else
tail_chk = meta_cmp2steady(c01, s0, s1) ? 1 : 0; tail_chk = meta_cmp2steady(c01, s0, s1) ? 1 : 0;
const bool valid_chk = const bool valid_chk = c01 != 1 || s0 != s1 || c02 != 1 || s0 != s2 || c12 != 1 || s1 != s2;
c01 != 1 || s0 != s1 || c02 != 1 || s0 != s2 || c12 != 1 || s1 != s2; const bool strict_chk = (c01 != 1 || s0 != s1) && (c02 != 1 || s0 != s2) && (c12 != 1 || s1 != s2);
const bool strict_chk = (c01 != 1 || s0 != s1) && (c02 != 1 || s0 != s2) &&
(c12 != 1 || s1 != s2);
assert(troika.recent == recent_chk); assert(troika.recent == recent_chk);
assert(troika.prefer_steady == prefer_steady_chk); assert(troika.prefer_steady == prefer_steady_chk);
assert(tail == tail_chk); assert(tail == tail_chk);
assert(valid == valid_chk); assert(valid == valid_chk);
assert(strict == strict_chk); assert(strict == strict_chk);
assert(troika_fsm_map[troika.fsm] == packed); assert(troika_fsm_map[troika.fsm] == packed);
if (troika.recent != recent_chk || if (troika.recent != recent_chk || troika.prefer_steady != prefer_steady_chk || tail != tail_chk ||
troika.prefer_steady != prefer_steady_chk || tail != tail_chk || valid != valid_chk || strict != strict_chk || troika_fsm_map[troika.fsm] != packed) {
valid != valid_chk || strict != strict_chk ||
troika_fsm_map[troika.fsm] != packed) {
ok = false; ok = false;
} }
} }
@ -181,27 +159,24 @@ txnid_t recent_committed_txnid(const MDBX_env *env) {
static inline bool meta_eq(const troika_t *troika, size_t a, size_t b) { static inline bool meta_eq(const troika_t *troika, size_t a, size_t b) {
assert(a < NUM_METAS && b < NUM_METAS); assert(a < NUM_METAS && b < NUM_METAS);
return troika->txnid[a] == troika->txnid[b] && return troika->txnid[a] == troika->txnid[b] && (((troika->fsm >> a) ^ (troika->fsm >> b)) & 1) == 0 &&
(((troika->fsm >> a) ^ (troika->fsm >> b)) & 1) == 0 &&
troika->txnid[a]; troika->txnid[a];
} }
unsigned meta_eq_mask(const troika_t *troika) { unsigned meta_eq_mask(const troika_t *troika) {
return meta_eq(troika, 0, 1) | meta_eq(troika, 1, 2) << 1 | return meta_eq(troika, 0, 1) | meta_eq(troika, 1, 2) << 1 | meta_eq(troika, 2, 0) << 2;
meta_eq(troika, 2, 0) << 2;
} }
__hot bool meta_should_retry(const MDBX_env *env, troika_t *troika) { __hot bool meta_should_retry(const MDBX_env *env, troika_t *troika) {
const troika_t prev = *troika; const troika_t prev = *troika;
*troika = meta_tap(env); *troika = meta_tap(env);
return prev.fsm != troika->fsm || prev.txnid[0] != troika->txnid[0] || return prev.fsm != troika->fsm || prev.txnid[0] != troika->txnid[0] || prev.txnid[1] != troika->txnid[1] ||
prev.txnid[1] != troika->txnid[1] || prev.txnid[2] != troika->txnid[2]; prev.txnid[2] != troika->txnid[2];
} }
const char *durable_caption(const meta_t *const meta) { const char *durable_caption(const meta_t *const meta) {
if (meta_is_steady(meta)) if (meta_is_steady(meta))
return (meta_sign_get(meta) == meta_sign_calculate(meta)) ? "Steady" return (meta_sign_get(meta) == meta_sign_calculate(meta)) ? "Steady" : "Tainted";
: "Tainted";
return "Weak"; return "Weak";
} }
@ -214,20 +189,16 @@ __cold void meta_troika_dump(const MDBX_env *env, const troika_t *troika) {
"base=%d-%" PRIaTXN ".%c, " "base=%d-%" PRIaTXN ".%c, "
"tail=%d-%" PRIaTXN ".%c, " "tail=%d-%" PRIaTXN ".%c, "
"valid %c, strict %c", "valid %c, strict %c",
troika->txnid[0], (troika->fsm & 1) ? 's' : 'w', troika->txnid[1], troika->txnid[0], (troika->fsm & 1) ? 's' : 'w', troika->txnid[1], (troika->fsm & 2) ? 's' : 'w',
(troika->fsm & 2) ? 's' : 'w', troika->txnid[2], troika->txnid[2], (troika->fsm & 4) ? 's' : 'w', troika->fsm, troika->recent, recent.txnid,
(troika->fsm & 4) ? 's' : 'w', troika->fsm, troika->recent, recent.is_steady ? 's' : 'w', troika->prefer_steady, prefer_steady.txnid, prefer_steady.is_steady ? 's' : 'w',
recent.txnid, recent.is_steady ? 's' : 'w', troika->prefer_steady, troika->tail_and_flags % NUM_METAS, tail.txnid, tail.is_steady ? 's' : 'w', TROIKA_VALID(troika) ? 'Y' : 'N',
prefer_steady.txnid, prefer_steady.is_steady ? 's' : 'w',
troika->tail_and_flags % NUM_METAS, tail.txnid,
tail.is_steady ? 's' : 'w', TROIKA_VALID(troika) ? 'Y' : 'N',
TROIKA_STRICT_VALID(troika) ? 'Y' : 'N'); TROIKA_STRICT_VALID(troika) ? 'Y' : 'N');
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
static int meta_unsteady(MDBX_env *env, const txnid_t inclusive_upto, static int meta_unsteady(MDBX_env *env, const txnid_t inclusive_upto, const pgno_t pgno) {
const pgno_t pgno) {
meta_t *const meta = METAPAGE(env, pgno); meta_t *const meta = METAPAGE(env, pgno);
const txnid_t txnid = constmeta_txnid(meta); const txnid_t txnid = constmeta_txnid(meta);
if (!meta_is_steady(meta) || txnid > inclusive_upto) if (!meta_is_steady(meta) || txnid > inclusive_upto)
@ -236,8 +207,7 @@ static int meta_unsteady(MDBX_env *env, const txnid_t inclusive_upto,
WARNING("wipe txn #%" PRIaTXN ", meta %" PRIaPGNO, txnid, pgno); WARNING("wipe txn #%" PRIaTXN ", meta %" PRIaPGNO, txnid, pgno);
const uint64_t wipe = DATASIGN_NONE; const uint64_t wipe = DATASIGN_NONE;
const void *ptr = &wipe; const void *ptr = &wipe;
size_t bytes = sizeof(meta->sign), size_t bytes = sizeof(meta->sign), offset = ptr_dist(&meta->sign, env->dxb_mmap.base);
offset = ptr_dist(&meta->sign, env->dxb_mmap.base);
if (env->flags & MDBX_WRITEMAP) { if (env->flags & MDBX_WRITEMAP) {
unaligned_poke_u64(4, meta->sign, wipe); unaligned_poke_u64(4, meta->sign, wipe);
osal_flush_incoherent_cpu_writeback(); osal_flush_incoherent_cpu_writeback();
@ -265,8 +235,7 @@ __cold int meta_wipe_steady(MDBX_env *env, txnid_t inclusive_upto) {
if (err == MDBX_RESULT_TRUE) { if (err == MDBX_RESULT_TRUE) {
err = MDBX_SUCCESS; err = MDBX_SUCCESS;
if (!MDBX_AVOID_MSYNC && (env->flags & MDBX_WRITEMAP)) { if (!MDBX_AVOID_MSYNC && (env->flags & MDBX_WRITEMAP)) {
err = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS), err = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
env->lck->pgops.msync.weak += 1; env->lck->pgops.msync.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
@ -278,8 +247,7 @@ __cold int meta_wipe_steady(MDBX_env *env, txnid_t inclusive_upto) {
} }
} }
osal_flush_incoherent_mmap(env->dxb_mmap.base, pgno2bytes(env, NUM_METAS), osal_flush_incoherent_mmap(env->dxb_mmap.base, pgno2bytes(env, NUM_METAS), globals.sys_pagesize);
globals.sys_pagesize);
/* force oldest refresh */ /* force oldest refresh */
atomic_store32(&env->lck->rdt_refresh_flag, true, mo_Relaxed); atomic_store32(&env->lck->rdt_refresh_flag, true, mo_Relaxed);
@ -291,8 +259,7 @@ __cold int meta_wipe_steady(MDBX_env *env, txnid_t inclusive_upto) {
} }
int meta_sync(const MDBX_env *env, const meta_ptr_t head) { int meta_sync(const MDBX_env *env, const meta_ptr_t head) {
eASSERT(env, atomic_load32(&env->lck->meta_sync_txnid, mo_Relaxed) != eASSERT(env, atomic_load32(&env->lck->meta_sync_txnid, mo_Relaxed) != (uint32_t)head.txnid);
(uint32_t)head.txnid);
/* Функция может вызываться (в том числе) при (env->flags & /* Функция может вызываться (в том числе) при (env->flags &
* MDBX_NOMETASYNC) == 0 и env->fd4meta == env->dsync_fd, например если * MDBX_NOMETASYNC) == 0 и env->fd4meta == env->dsync_fd, например если
* предыдущая транзакция была выполненна с флагом MDBX_NOMETASYNC. */ * предыдущая транзакция была выполненна с флагом MDBX_NOMETASYNC. */
@ -300,8 +267,7 @@ int meta_sync(const MDBX_env *env, const meta_ptr_t head) {
int rc = MDBX_RESULT_TRUE; int rc = MDBX_RESULT_TRUE;
if (env->flags & MDBX_WRITEMAP) { if (env->flags & MDBX_WRITEMAP) {
if (!MDBX_AVOID_MSYNC) { if (!MDBX_AVOID_MSYNC) {
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS), rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
env->lck->pgops.msync.weak += 1; env->lck->pgops.msync.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
@ -310,8 +276,7 @@ int meta_sync(const MDBX_env *env, const meta_ptr_t head) {
env->lck->pgops.wops.weak += 1; env->lck->pgops.wops.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
const page_t *page = data_page(head.ptr_c); const page_t *page = data_page(head.ptr_c);
rc = osal_pwrite(env->fd4meta, page, env->ps, rc = osal_pwrite(env->fd4meta, page, env->ps, ptr_dist(page, env->dxb_mmap.base));
ptr_dist(page, env->dxb_mmap.base));
if (likely(rc == MDBX_SUCCESS) && env->fd4meta == env->lazy_fd) { if (likely(rc == MDBX_SUCCESS) && env->fd4meta == env->lazy_fd) {
rc = osal_fsync(env->lazy_fd, MDBX_SYNC_DATA | MDBX_SYNC_IODQ); rc = osal_fsync(env->lazy_fd, MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
@ -332,8 +297,7 @@ int meta_sync(const MDBX_env *env, const meta_ptr_t head) {
return rc; return rc;
} }
__cold static page_t *meta_model(const MDBX_env *env, page_t *model, size_t num, __cold static page_t *meta_model(const MDBX_env *env, page_t *model, size_t num, const bin128_t *guid) {
const bin128_t *guid) {
ENSURE(env, is_powerof2(env->ps)); ENSURE(env, is_powerof2(env->ps));
ENSURE(env, env->ps >= MDBX_MIN_PAGESIZE); ENSURE(env, env->ps >= MDBX_MIN_PAGESIZE);
ENSURE(env, env->ps <= MDBX_MAX_PAGESIZE); ENSURE(env, env->ps <= MDBX_MAX_PAGESIZE);
@ -350,10 +314,8 @@ __cold static page_t *meta_model(const MDBX_env *env, page_t *model, size_t num,
model_meta->geometry.lower = bytes2pgno(env, env->geo_in_bytes.lower); model_meta->geometry.lower = bytes2pgno(env, env->geo_in_bytes.lower);
model_meta->geometry.upper = bytes2pgno(env, env->geo_in_bytes.upper); model_meta->geometry.upper = bytes2pgno(env, env->geo_in_bytes.upper);
model_meta->geometry.grow_pv = model_meta->geometry.grow_pv = pages2pv(bytes2pgno(env, env->geo_in_bytes.grow));
pages2pv(bytes2pgno(env, env->geo_in_bytes.grow)); model_meta->geometry.shrink_pv = pages2pv(bytes2pgno(env, env->geo_in_bytes.shrink));
model_meta->geometry.shrink_pv =
pages2pv(bytes2pgno(env, env->geo_in_bytes.shrink));
model_meta->geometry.now = bytes2pgno(env, env->geo_in_bytes.now); model_meta->geometry.now = bytes2pgno(env, env->geo_in_bytes.now);
model_meta->geometry.first_unallocated = NUM_METAS; model_meta->geometry.first_unallocated = NUM_METAS;
@ -362,12 +324,9 @@ __cold static page_t *meta_model(const MDBX_env *env, page_t *model, size_t num,
ENSURE(env, model_meta->geometry.now >= model_meta->geometry.lower); ENSURE(env, model_meta->geometry.now >= model_meta->geometry.lower);
ENSURE(env, model_meta->geometry.now <= model_meta->geometry.upper); ENSURE(env, model_meta->geometry.now <= model_meta->geometry.upper);
ENSURE(env, model_meta->geometry.first_unallocated >= MIN_PAGENO); ENSURE(env, model_meta->geometry.first_unallocated >= MIN_PAGENO);
ENSURE(env, ENSURE(env, model_meta->geometry.first_unallocated <= model_meta->geometry.now);
model_meta->geometry.first_unallocated <= model_meta->geometry.now); ENSURE(env, model_meta->geometry.grow_pv == pages2pv(pv2pages(model_meta->geometry.grow_pv)));
ENSURE(env, model_meta->geometry.grow_pv == ENSURE(env, model_meta->geometry.shrink_pv == pages2pv(pv2pages(model_meta->geometry.shrink_pv)));
pages2pv(pv2pages(model_meta->geometry.grow_pv)));
ENSURE(env, model_meta->geometry.shrink_pv ==
pages2pv(pv2pages(model_meta->geometry.shrink_pv)));
model_meta->pagesize = env->ps; model_meta->pagesize = env->ps;
model_meta->trees.gc.flags = MDBX_INTEGERKEY; model_meta->trees.gc.flags = MDBX_INTEGERKEY;
@ -389,12 +348,9 @@ __cold meta_t *meta_init_triplet(const MDBX_env *env, void *buffer) {
return page_meta(page2); return page_meta(page2);
} }
__cold int __must_check_result meta_override(MDBX_env *env, size_t target, __cold int __must_check_result meta_override(MDBX_env *env, size_t target, txnid_t txnid, const meta_t *shape) {
txnid_t txnid,
const meta_t *shape) {
page_t *const page = env->page_auxbuf; page_t *const page = env->page_auxbuf;
meta_model(env, page, target, meta_model(env, page, target, &((target == 0 && shape) ? shape : METAPAGE(env, 0))->dxbid);
&((target == 0 && shape) ? shape : METAPAGE(env, 0))->dxbid);
meta_t *const model = page_meta(page); meta_t *const model = page_meta(page);
meta_set_txnid(env, model, txnid); meta_set_txnid(env, model, txnid);
if (txnid) if (txnid)
@ -407,21 +363,18 @@ __cold int __must_check_result meta_override(MDBX_env *env, size_t target,
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
if (globals.runtime_flags & MDBX_DBG_DONT_UPGRADE) if (globals.runtime_flags & MDBX_DBG_DONT_UPGRADE)
memcpy(&model->magic_and_version, &shape->magic_and_version, memcpy(&model->magic_and_version, &shape->magic_and_version, sizeof(model->magic_and_version));
sizeof(model->magic_and_version));
model->reserve16 = shape->reserve16; model->reserve16 = shape->reserve16;
model->validator_id = shape->validator_id; model->validator_id = shape->validator_id;
model->extra_pagehdr = shape->extra_pagehdr; model->extra_pagehdr = shape->extra_pagehdr;
memcpy(&model->geometry, &shape->geometry, sizeof(model->geometry)); memcpy(&model->geometry, &shape->geometry, sizeof(model->geometry));
memcpy(&model->trees, &shape->trees, sizeof(model->trees)); memcpy(&model->trees, &shape->trees, sizeof(model->trees));
memcpy(&model->canary, &shape->canary, sizeof(model->canary)); memcpy(&model->canary, &shape->canary, sizeof(model->canary));
memcpy(&model->pages_retired, &shape->pages_retired, memcpy(&model->pages_retired, &shape->pages_retired, sizeof(model->pages_retired));
sizeof(model->pages_retired));
if (txnid) { if (txnid) {
if ((!model->trees.gc.mod_txnid && model->trees.gc.root != P_INVALID) || if ((!model->trees.gc.mod_txnid && model->trees.gc.root != P_INVALID) ||
(!model->trees.main.mod_txnid && model->trees.main.root != P_INVALID)) (!model->trees.main.mod_txnid && model->trees.main.root != P_INVALID))
memcpy(&model->magic_and_version, &shape->magic_and_version, memcpy(&model->magic_and_version, &shape->magic_and_version, sizeof(model->magic_and_version));
sizeof(model->magic_and_version));
if (unlikely(!coherency_check_meta(env, model, false))) { if (unlikely(!coherency_check_meta(env, model, false))) {
ERROR("bailout overriding meta-%zu since model failed " ERROR("bailout overriding meta-%zu since model failed "
"FreeDB/MainDB %s-check for txnid #%" PRIaTXN, "FreeDB/MainDB %s-check for txnid #%" PRIaTXN,
@ -452,8 +405,7 @@ __cold int __must_check_result meta_override(MDBX_env *env, size_t target,
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
env->lck->pgops.msync.weak += 1; env->lck->pgops.msync.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
rc = osal_msync(&env->dxb_mmap, 0, rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, model->geometry.first_unallocated),
pgno_align2os_bytes(env, model->geometry.first_unallocated),
MDBX_SYNC_DATA | MDBX_SYNC_IODQ); MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
@ -465,8 +417,7 @@ __cold int __must_check_result meta_override(MDBX_env *env, size_t target,
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
env->lck->pgops.msync.weak += 1; env->lck->pgops.msync.weak += 1;
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, target + 1), rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, target + 1), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
} else { } else {
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
env->lck->pgops.wops.weak += 1; env->lck->pgops.wops.weak += 1;
@ -478,28 +429,20 @@ __cold int __must_check_result meta_override(MDBX_env *env, size_t target,
#endif /* MDBX_ENABLE_PGOP_STAT */ #endif /* MDBX_ENABLE_PGOP_STAT */
rc = osal_fsync(env->lazy_fd, MDBX_SYNC_DATA | MDBX_SYNC_IODQ); rc = osal_fsync(env->lazy_fd, MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
} }
osal_flush_incoherent_mmap(env->dxb_mmap.base, pgno2bytes(env, NUM_METAS), osal_flush_incoherent_mmap(env->dxb_mmap.base, pgno2bytes(env, NUM_METAS), globals.sys_pagesize);
globals.sys_pagesize);
} }
eASSERT(env, (!env->txn && (env->flags & ENV_ACTIVE) == 0) || eASSERT(env, (!env->txn && (env->flags & ENV_ACTIVE) == 0) ||
(env->stuck_meta == (int)target && (env->stuck_meta == (int)target && (env->flags & (MDBX_EXCLUSIVE | MDBX_RDONLY)) == MDBX_EXCLUSIVE));
(env->flags & (MDBX_EXCLUSIVE | MDBX_RDONLY)) ==
MDBX_EXCLUSIVE));
return rc; return rc;
} }
__cold int meta_validate(MDBX_env *env, meta_t *const meta, __cold int meta_validate(MDBX_env *env, meta_t *const meta, const page_t *const page, const unsigned meta_number,
const page_t *const page, const unsigned meta_number,
unsigned *guess_pagesize) { unsigned *guess_pagesize) {
const uint64_t magic_and_version = const uint64_t magic_and_version = unaligned_peek_u64(4, &meta->magic_and_version);
unaligned_peek_u64(4, &meta->magic_and_version); if (unlikely(magic_and_version != MDBX_DATA_MAGIC && magic_and_version != MDBX_DATA_MAGIC_LEGACY_COMPAT &&
if (unlikely(magic_and_version != MDBX_DATA_MAGIC &&
magic_and_version != MDBX_DATA_MAGIC_LEGACY_COMPAT &&
magic_and_version != MDBX_DATA_MAGIC_LEGACY_DEVEL)) { magic_and_version != MDBX_DATA_MAGIC_LEGACY_DEVEL)) {
ERROR("meta[%u] has invalid magic/version %" PRIx64, meta_number, ERROR("meta[%u] has invalid magic/version %" PRIx64, meta_number, magic_and_version);
magic_and_version); return ((magic_and_version >> 8) != MDBX_MAGIC) ? MDBX_INVALID : MDBX_VERSION_MISMATCH;
return ((magic_and_version >> 8) != MDBX_MAGIC) ? MDBX_INVALID
: MDBX_VERSION_MISMATCH;
} }
if (unlikely(page->pgno != meta_number)) { if (unlikely(page->pgno != meta_number)) {
@ -512,11 +455,9 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
return MDBX_INVALID; return MDBX_INVALID;
} }
if (unlikely(!is_powerof2(meta->pagesize) || if (unlikely(!is_powerof2(meta->pagesize) || meta->pagesize < MDBX_MIN_PAGESIZE ||
meta->pagesize < MDBX_MIN_PAGESIZE ||
meta->pagesize > MDBX_MAX_PAGESIZE)) { meta->pagesize > MDBX_MAX_PAGESIZE)) {
WARNING("meta[%u] has invalid pagesize (%u), skip it", meta_number, WARNING("meta[%u] has invalid pagesize (%u), skip it", meta_number, meta->pagesize);
meta->pagesize);
return is_powerof2(meta->pagesize) ? MDBX_VERSION_MISMATCH : MDBX_INVALID; return is_powerof2(meta->pagesize) ? MDBX_VERSION_MISMATCH : MDBX_INVALID;
} }
@ -535,81 +476,63 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
const uint64_t sign = meta_sign_get(meta); const uint64_t sign = meta_sign_get(meta);
const uint64_t sign_stready = meta_sign_calculate(meta); const uint64_t sign_stready = meta_sign_calculate(meta);
if (SIGN_IS_STEADY(sign) && unlikely(sign != sign_stready)) { if (SIGN_IS_STEADY(sign) && unlikely(sign != sign_stready)) {
WARNING("meta[%u] has invalid steady-checksum (0x%" PRIx64 " != 0x%" PRIx64 WARNING("meta[%u] has invalid steady-checksum (0x%" PRIx64 " != 0x%" PRIx64 "), skip it", meta_number, sign,
"), skip it", sign_stready);
meta_number, sign, sign_stready);
return MDBX_RESULT_TRUE; return MDBX_RESULT_TRUE;
} }
if (unlikely(meta->trees.gc.flags != MDBX_INTEGERKEY) && if (unlikely(meta->trees.gc.flags != MDBX_INTEGERKEY) &&
((meta->trees.gc.flags & DB_PERSISTENT_FLAGS) != MDBX_INTEGERKEY || ((meta->trees.gc.flags & DB_PERSISTENT_FLAGS) != MDBX_INTEGERKEY || magic_and_version == MDBX_DATA_MAGIC)) {
magic_and_version == MDBX_DATA_MAGIC)) { WARNING("meta[%u] has invalid %s flags 0x%x, skip it", meta_number, "GC/FreeDB", meta->trees.gc.flags);
WARNING("meta[%u] has invalid %s flags 0x%x, skip it", meta_number,
"GC/FreeDB", meta->trees.gc.flags);
return MDBX_INCOMPATIBLE; return MDBX_INCOMPATIBLE;
} }
if (unlikely(!check_table_flags(meta->trees.main.flags))) { if (unlikely(!check_table_flags(meta->trees.main.flags))) {
WARNING("meta[%u] has invalid %s flags 0x%x, skip it", meta_number, WARNING("meta[%u] has invalid %s flags 0x%x, skip it", meta_number, "MainDB", meta->trees.main.flags);
"MainDB", meta->trees.main.flags);
return MDBX_INCOMPATIBLE; return MDBX_INCOMPATIBLE;
} }
DEBUG("checking meta%" PRIaPGNO " = root %" PRIaPGNO "/%" PRIaPGNO DEBUG("checking meta%" PRIaPGNO " = root %" PRIaPGNO "/%" PRIaPGNO ", geo %" PRIaPGNO "/%" PRIaPGNO "-%" PRIaPGNO
", geo %" PRIaPGNO "/%" PRIaPGNO "-%" PRIaPGNO "/%" PRIaPGNO "/%" PRIaPGNO " +%u -%u, txn_id %" PRIaTXN ", %s",
" +%u -%u, txn_id %" PRIaTXN ", %s", page->pgno, meta->trees.main.root, meta->trees.gc.root, meta->geometry.lower, meta->geometry.first_unallocated,
page->pgno, meta->trees.main.root, meta->trees.gc.root, meta->geometry.now, meta->geometry.upper, pv2pages(meta->geometry.grow_pv), pv2pages(meta->geometry.shrink_pv),
meta->geometry.lower, meta->geometry.first_unallocated,
meta->geometry.now, meta->geometry.upper,
pv2pages(meta->geometry.grow_pv), pv2pages(meta->geometry.shrink_pv),
txnid, durable_caption(meta)); txnid, durable_caption(meta));
if (unlikely(txnid < MIN_TXNID || txnid > MAX_TXNID)) { if (unlikely(txnid < MIN_TXNID || txnid > MAX_TXNID)) {
WARNING("meta[%u] has invalid txnid %" PRIaTXN ", skip it", meta_number, WARNING("meta[%u] has invalid txnid %" PRIaTXN ", skip it", meta_number, txnid);
txnid);
return MDBX_RESULT_TRUE; return MDBX_RESULT_TRUE;
} }
if (unlikely(meta->geometry.lower < MIN_PAGENO || if (unlikely(meta->geometry.lower < MIN_PAGENO || meta->geometry.lower > MAX_PAGENO + 1)) {
meta->geometry.lower > MAX_PAGENO + 1)) { WARNING("meta[%u] has invalid min-pages (%" PRIaPGNO "), skip it", meta_number, meta->geometry.lower);
WARNING("meta[%u] has invalid min-pages (%" PRIaPGNO "), skip it",
meta_number, meta->geometry.lower);
return MDBX_INVALID; return MDBX_INVALID;
} }
if (unlikely(meta->geometry.upper < MIN_PAGENO || if (unlikely(meta->geometry.upper < MIN_PAGENO || meta->geometry.upper > MAX_PAGENO + 1 ||
meta->geometry.upper > MAX_PAGENO + 1 ||
meta->geometry.upper < meta->geometry.lower)) { meta->geometry.upper < meta->geometry.lower)) {
WARNING("meta[%u] has invalid max-pages (%" PRIaPGNO "), skip it", WARNING("meta[%u] has invalid max-pages (%" PRIaPGNO "), skip it", meta_number, meta->geometry.upper);
meta_number, meta->geometry.upper);
return MDBX_INVALID; return MDBX_INVALID;
} }
if (unlikely(meta->geometry.first_unallocated < MIN_PAGENO || if (unlikely(meta->geometry.first_unallocated < MIN_PAGENO || meta->geometry.first_unallocated - 1 > MAX_PAGENO)) {
meta->geometry.first_unallocated - 1 > MAX_PAGENO)) { WARNING("meta[%u] has invalid next-pageno (%" PRIaPGNO "), skip it", meta_number, meta->geometry.first_unallocated);
WARNING("meta[%u] has invalid next-pageno (%" PRIaPGNO "), skip it",
meta_number, meta->geometry.first_unallocated);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
const uint64_t used_bytes = const uint64_t used_bytes = meta->geometry.first_unallocated * (uint64_t)meta->pagesize;
meta->geometry.first_unallocated * (uint64_t)meta->pagesize;
if (unlikely(used_bytes > env->dxb_mmap.filesize)) { if (unlikely(used_bytes > env->dxb_mmap.filesize)) {
/* Here could be a race with DB-shrinking performed by other process */ /* Here could be a race with DB-shrinking performed by other process */
int err = osal_filesize(env->lazy_fd, &env->dxb_mmap.filesize); int err = osal_filesize(env->lazy_fd, &env->dxb_mmap.filesize);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
if (unlikely(used_bytes > env->dxb_mmap.filesize)) { if (unlikely(used_bytes > env->dxb_mmap.filesize)) {
WARNING("meta[%u] used-bytes (%" PRIu64 ") beyond filesize (%" PRIu64 WARNING("meta[%u] used-bytes (%" PRIu64 ") beyond filesize (%" PRIu64 "), skip it", meta_number, used_bytes,
"), skip it", env->dxb_mmap.filesize);
meta_number, used_bytes, env->dxb_mmap.filesize);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
} }
if (unlikely(meta->geometry.first_unallocated - 1 > MAX_PAGENO || if (unlikely(meta->geometry.first_unallocated - 1 > MAX_PAGENO || used_bytes > MAX_MAPSIZE)) {
used_bytes > MAX_MAPSIZE)) { WARNING("meta[%u] has too large used-space (%" PRIu64 "), skip it", meta_number, used_bytes);
WARNING("meta[%u] has too large used-space (%" PRIu64 "), skip it",
meta_number, used_bytes);
return MDBX_TOO_LARGE; return MDBX_TOO_LARGE;
} }
@ -617,13 +540,10 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
uint64_t mapsize_min = geo_lower * (uint64_t)meta->pagesize; uint64_t mapsize_min = geo_lower * (uint64_t)meta->pagesize;
STATIC_ASSERT(MAX_MAPSIZE < PTRDIFF_MAX - MDBX_MAX_PAGESIZE); STATIC_ASSERT(MAX_MAPSIZE < PTRDIFF_MAX - MDBX_MAX_PAGESIZE);
STATIC_ASSERT(MIN_MAPSIZE < MAX_MAPSIZE); STATIC_ASSERT(MIN_MAPSIZE < MAX_MAPSIZE);
STATIC_ASSERT((uint64_t)(MAX_PAGENO + 1) * MDBX_MIN_PAGESIZE % (4ul << 20) == STATIC_ASSERT((uint64_t)(MAX_PAGENO + 1) * MDBX_MIN_PAGESIZE % (4ul << 20) == 0);
0);
if (unlikely(mapsize_min < MIN_MAPSIZE || mapsize_min > MAX_MAPSIZE)) { if (unlikely(mapsize_min < MIN_MAPSIZE || mapsize_min > MAX_MAPSIZE)) {
if (MAX_MAPSIZE != MAX_MAPSIZE64 && mapsize_min > MAX_MAPSIZE && if (MAX_MAPSIZE != MAX_MAPSIZE64 && mapsize_min > MAX_MAPSIZE && mapsize_min <= MAX_MAPSIZE64) {
mapsize_min <= MAX_MAPSIZE64) { eASSERT(env, meta->geometry.first_unallocated - 1 <= MAX_PAGENO && used_bytes <= MAX_MAPSIZE);
eASSERT(env, meta->geometry.first_unallocated - 1 <= MAX_PAGENO &&
used_bytes <= MAX_MAPSIZE);
WARNING("meta[%u] has too large min-mapsize (%" PRIu64 "), " WARNING("meta[%u] has too large min-mapsize (%" PRIu64 "), "
"but size of used space still acceptable (%" PRIu64 ")", "but size of used space still acceptable (%" PRIu64 ")",
meta_number, mapsize_min, used_bytes); meta_number, mapsize_min, used_bytes);
@ -632,14 +552,12 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
geo_lower = MAX_PAGENO + 1; geo_lower = MAX_PAGENO + 1;
mapsize_min = geo_lower * (uint64_t)meta->pagesize; mapsize_min = geo_lower * (uint64_t)meta->pagesize;
} }
WARNING("meta[%u] consider get-%s pageno is %" PRIaPGNO WARNING("meta[%u] consider get-%s pageno is %" PRIaPGNO " instead of wrong %" PRIaPGNO
" instead of wrong %" PRIaPGNO
", will be corrected on next commit(s)", ", will be corrected on next commit(s)",
meta_number, "lower", geo_lower, meta->geometry.lower); meta_number, "lower", geo_lower, meta->geometry.lower);
meta->geometry.lower = geo_lower; meta->geometry.lower = geo_lower;
} else { } else {
WARNING("meta[%u] has invalid min-mapsize (%" PRIu64 "), skip it", WARNING("meta[%u] has invalid min-mapsize (%" PRIu64 "), skip it", meta_number, mapsize_min);
meta_number, mapsize_min);
return MDBX_VERSION_MISMATCH; return MDBX_VERSION_MISMATCH;
} }
} }
@ -648,17 +566,13 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
uint64_t mapsize_max = geo_upper * (uint64_t)meta->pagesize; uint64_t mapsize_max = geo_upper * (uint64_t)meta->pagesize;
STATIC_ASSERT(MIN_MAPSIZE < MAX_MAPSIZE); STATIC_ASSERT(MIN_MAPSIZE < MAX_MAPSIZE);
if (unlikely(mapsize_max > MAX_MAPSIZE || if (unlikely(mapsize_max > MAX_MAPSIZE ||
(MAX_PAGENO + 1) < (MAX_PAGENO + 1) < ceil_powerof2((size_t)mapsize_max, globals.sys_pagesize) / (size_t)meta->pagesize)) {
ceil_powerof2((size_t)mapsize_max, globals.sys_pagesize) /
(size_t)meta->pagesize)) {
if (mapsize_max > MAX_MAPSIZE64) { if (mapsize_max > MAX_MAPSIZE64) {
WARNING("meta[%u] has invalid max-mapsize (%" PRIu64 "), skip it", WARNING("meta[%u] has invalid max-mapsize (%" PRIu64 "), skip it", meta_number, mapsize_max);
meta_number, mapsize_max);
return MDBX_VERSION_MISMATCH; return MDBX_VERSION_MISMATCH;
} }
/* allow to open large DB from a 32-bit environment */ /* allow to open large DB from a 32-bit environment */
eASSERT(env, meta->geometry.first_unallocated - 1 <= MAX_PAGENO && eASSERT(env, meta->geometry.first_unallocated - 1 <= MAX_PAGENO && used_bytes <= MAX_MAPSIZE);
used_bytes <= MAX_MAPSIZE);
WARNING("meta[%u] has too large max-mapsize (%" PRIu64 "), " WARNING("meta[%u] has too large max-mapsize (%" PRIu64 "), "
"but size of used space still acceptable (%" PRIu64 ")", "but size of used space still acceptable (%" PRIu64 ")",
meta_number, mapsize_max, used_bytes); meta_number, mapsize_max, used_bytes);
@ -667,8 +581,7 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
geo_upper = MAX_PAGENO + 1; geo_upper = MAX_PAGENO + 1;
mapsize_max = geo_upper * (uint64_t)meta->pagesize; mapsize_max = geo_upper * (uint64_t)meta->pagesize;
} }
WARNING("meta[%u] consider get-%s pageno is %" PRIaPGNO WARNING("meta[%u] consider get-%s pageno is %" PRIaPGNO " instead of wrong %" PRIaPGNO
" instead of wrong %" PRIaPGNO
", will be corrected on next commit(s)", ", will be corrected on next commit(s)",
meta_number, "upper", geo_upper, meta->geometry.upper); meta_number, "upper", geo_upper, meta->geometry.upper);
meta->geometry.upper = geo_upper; meta->geometry.upper = geo_upper;
@ -688,14 +601,12 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
geo_now = geo_upper; geo_now = geo_upper;
if (unlikely(meta->geometry.first_unallocated > geo_now)) { if (unlikely(meta->geometry.first_unallocated > geo_now)) {
WARNING("meta[%u] next-pageno (%" PRIaPGNO WARNING("meta[%u] next-pageno (%" PRIaPGNO ") is beyond end-pgno (%" PRIaPGNO "), skip it", meta_number,
") is beyond end-pgno (%" PRIaPGNO "), skip it", meta->geometry.first_unallocated, geo_now);
meta_number, meta->geometry.first_unallocated, geo_now);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
if (meta->geometry.now != geo_now) { if (meta->geometry.now != geo_now) {
WARNING("meta[%u] consider geo-%s pageno is %" PRIaPGNO WARNING("meta[%u] consider geo-%s pageno is %" PRIaPGNO " instead of wrong %" PRIaPGNO
" instead of wrong %" PRIaPGNO
", will be corrected on next commit(s)", ", will be corrected on next commit(s)",
meta_number, "now", geo_now, meta->geometry.now); meta_number, "now", geo_now, meta->geometry.now);
meta->geometry.now = geo_now; meta->geometry.now = geo_now;
@ -703,43 +614,36 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
/* GC */ /* GC */
if (meta->trees.gc.root == P_INVALID) { if (meta->trees.gc.root == P_INVALID) {
if (unlikely(meta->trees.gc.branch_pages || meta->trees.gc.height || if (unlikely(meta->trees.gc.branch_pages || meta->trees.gc.height || meta->trees.gc.items ||
meta->trees.gc.items || meta->trees.gc.leaf_pages || meta->trees.gc.leaf_pages || meta->trees.gc.large_pages)) {
meta->trees.gc.large_pages)) {
WARNING("meta[%u] has false-empty %s, skip it", meta_number, "GC"); WARNING("meta[%u] has false-empty %s, skip it", meta_number, "GC");
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
} else if (unlikely(meta->trees.gc.root >= } else if (unlikely(meta->trees.gc.root >= meta->geometry.first_unallocated)) {
meta->geometry.first_unallocated)) { WARNING("meta[%u] has invalid %s-root %" PRIaPGNO ", skip it", meta_number, "GC", meta->trees.gc.root);
WARNING("meta[%u] has invalid %s-root %" PRIaPGNO ", skip it", meta_number,
"GC", meta->trees.gc.root);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
/* MainDB */ /* MainDB */
if (meta->trees.main.root == P_INVALID) { if (meta->trees.main.root == P_INVALID) {
if (unlikely(meta->trees.main.branch_pages || meta->trees.main.height || if (unlikely(meta->trees.main.branch_pages || meta->trees.main.height || meta->trees.main.items ||
meta->trees.main.items || meta->trees.main.leaf_pages || meta->trees.main.leaf_pages || meta->trees.main.large_pages)) {
meta->trees.main.large_pages)) {
WARNING("meta[%u] has false-empty %s", meta_number, "MainDB"); WARNING("meta[%u] has false-empty %s", meta_number, "MainDB");
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
} else if (unlikely(meta->trees.main.root >= } else if (unlikely(meta->trees.main.root >= meta->geometry.first_unallocated)) {
meta->geometry.first_unallocated)) { WARNING("meta[%u] has invalid %s-root %" PRIaPGNO ", skip it", meta_number, "MainDB", meta->trees.main.root);
WARNING("meta[%u] has invalid %s-root %" PRIaPGNO ", skip it", meta_number,
"MainDB", meta->trees.main.root);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
if (unlikely(meta->trees.gc.mod_txnid > txnid)) { if (unlikely(meta->trees.gc.mod_txnid > txnid)) {
WARNING("meta[%u] has wrong mod_txnid %" PRIaTXN " for %s, skip it", WARNING("meta[%u] has wrong mod_txnid %" PRIaTXN " for %s, skip it", meta_number, meta->trees.gc.mod_txnid, "GC");
meta_number, meta->trees.gc.mod_txnid, "GC");
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
if (unlikely(meta->trees.main.mod_txnid > txnid)) { if (unlikely(meta->trees.main.mod_txnid > txnid)) {
WARNING("meta[%u] has wrong mod_txnid %" PRIaTXN " for %s, skip it", WARNING("meta[%u] has wrong mod_txnid %" PRIaTXN " for %s, skip it", meta_number, meta->trees.main.mod_txnid,
meta_number, meta->trees.main.mod_txnid, "MainDB"); "MainDB");
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
@ -748,7 +652,5 @@ __cold int meta_validate(MDBX_env *env, meta_t *const meta,
__cold int meta_validate_copy(MDBX_env *env, const meta_t *meta, meta_t *dest) { __cold int meta_validate_copy(MDBX_env *env, const meta_t *meta, meta_t *dest) {
*dest = *meta; *dest = *meta;
return meta_validate(env, dest, data_page(meta), return meta_validate(env, dest, data_page(meta), bytes2pgno(env, ptr_dist(meta, env->dxb_mmap.base)), nullptr);
bytes2pgno(env, ptr_dist(meta, env->dxb_mmap.base)),
nullptr);
} }

View File

@ -16,17 +16,11 @@ static inline uint64_t meta_sign_calculate(const meta_t *meta) {
return (sign > DATASIGN_WEAK) ? sign : ~sign; return (sign > DATASIGN_WEAK) ? sign : ~sign;
} }
static inline uint64_t meta_sign_get(const volatile meta_t *meta) { static inline uint64_t meta_sign_get(const volatile meta_t *meta) { return unaligned_peek_u64_volatile(4, meta->sign); }
return unaligned_peek_u64_volatile(4, meta->sign);
}
static inline void meta_sign_as_steady(meta_t *meta) { static inline void meta_sign_as_steady(meta_t *meta) { unaligned_poke_u64(4, meta->sign, meta_sign_calculate(meta)); }
unaligned_poke_u64(4, meta->sign, meta_sign_calculate(meta));
}
static inline bool meta_is_steady(const volatile meta_t *meta) { static inline bool meta_is_steady(const volatile meta_t *meta) { return SIGN_IS_STEADY(meta_sign_get(meta)); }
return SIGN_IS_STEADY(meta_sign_get(meta));
}
MDBX_INTERNAL troika_t meta_tap(const MDBX_env *env); MDBX_INTERNAL troika_t meta_tap(const MDBX_env *env);
MDBX_INTERNAL unsigned meta_eq_mask(const troika_t *troika); MDBX_INTERNAL unsigned meta_eq_mask(const troika_t *troika);
@ -48,14 +42,12 @@ MDBX_INTERNAL txnid_t recent_committed_txnid(const MDBX_env *env);
MDBX_INTERNAL int meta_sync(const MDBX_env *env, const meta_ptr_t head); MDBX_INTERNAL int meta_sync(const MDBX_env *env, const meta_ptr_t head);
MDBX_INTERNAL const char *durable_caption(const meta_t *const meta); MDBX_INTERNAL const char *durable_caption(const meta_t *const meta);
MDBX_INTERNAL void meta_troika_dump(const MDBX_env *env, MDBX_INTERNAL void meta_troika_dump(const MDBX_env *env, const troika_t *troika);
const troika_t *troika);
#define METAPAGE(env, n) page_meta(pgno2page(env, n)) #define METAPAGE(env, n) page_meta(pgno2page(env, n))
#define METAPAGE_END(env) METAPAGE(env, NUM_METAS) #define METAPAGE_END(env) METAPAGE(env, NUM_METAS)
static inline meta_ptr_t meta_recent(const MDBX_env *env, static inline meta_ptr_t meta_recent(const MDBX_env *env, const troika_t *troika) {
const troika_t *troika) {
meta_ptr_t r; meta_ptr_t r;
r.txnid = troika->txnid[troika->recent]; r.txnid = troika->txnid[troika->recent];
r.ptr_v = METAPAGE(env, troika->recent); r.ptr_v = METAPAGE(env, troika->recent);
@ -63,8 +55,7 @@ static inline meta_ptr_t meta_recent(const MDBX_env *env,
return r; return r;
} }
static inline meta_ptr_t meta_prefer_steady(const MDBX_env *env, static inline meta_ptr_t meta_prefer_steady(const MDBX_env *env, const troika_t *troika) {
const troika_t *troika) {
meta_ptr_t r; meta_ptr_t r;
r.txnid = troika->txnid[troika->prefer_steady]; r.txnid = troika->txnid[troika->prefer_steady];
r.ptr_v = METAPAGE(env, troika->prefer_steady); r.ptr_v = METAPAGE(env, troika->prefer_steady);
@ -72,8 +63,7 @@ static inline meta_ptr_t meta_prefer_steady(const MDBX_env *env,
return r; return r;
} }
static inline meta_ptr_t meta_tail(const MDBX_env *env, static inline meta_ptr_t meta_tail(const MDBX_env *env, const troika_t *troika) {
const troika_t *troika) {
const uint8_t tail = troika->tail_and_flags & 3; const uint8_t tail = troika->tail_and_flags & 3;
MDBX_ANALYSIS_ASSUME(tail < NUM_METAS); MDBX_ANALYSIS_ASSUME(tail < NUM_METAS);
meta_ptr_t r; meta_ptr_t r;
@ -89,72 +79,53 @@ static inline bool meta_is_used(const troika_t *troika, unsigned n) {
static inline bool meta_bootid_match(const meta_t *meta) { static inline bool meta_bootid_match(const meta_t *meta) {
return memcmp(&meta->bootid, &globals.bootid, 16) == 0 && return memcmp(&meta->bootid, &globals.bootid, 16) == 0 && (globals.bootid.x | globals.bootid.y) != 0;
(globals.bootid.x | globals.bootid.y) != 0;
} }
static inline bool meta_weak_acceptable(const MDBX_env *env, const meta_t *meta, static inline bool meta_weak_acceptable(const MDBX_env *env, const meta_t *meta, const int lck_exclusive) {
const int lck_exclusive) {
return lck_exclusive return lck_exclusive
? /* exclusive lock */ meta_bootid_match(meta) ? /* exclusive lock */ meta_bootid_match(meta)
: /* db already opened */ env->lck_mmap.lck && : /* db already opened */ env->lck_mmap.lck && (env->lck_mmap.lck->envmode.weak & MDBX_RDONLY) == 0;
(env->lck_mmap.lck->envmode.weak & MDBX_RDONLY) == 0;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline txnid_t MDBX_NOTHROW_PURE_FUNCTION static inline txnid_t constmeta_txnid(const meta_t *meta) {
constmeta_txnid(const meta_t *meta) {
const txnid_t a = unaligned_peek_u64(4, &meta->txnid_a); const txnid_t a = unaligned_peek_u64(4, &meta->txnid_a);
const txnid_t b = unaligned_peek_u64(4, &meta->txnid_b); const txnid_t b = unaligned_peek_u64(4, &meta->txnid_b);
return likely(a == b) ? a : 0; return likely(a == b) ? a : 0;
} }
static inline void meta_update_begin(const MDBX_env *env, meta_t *meta, static inline void meta_update_begin(const MDBX_env *env, meta_t *meta, txnid_t txnid) {
txnid_t txnid) {
eASSERT(env, meta >= METAPAGE(env, 0) && meta < METAPAGE_END(env)); eASSERT(env, meta >= METAPAGE(env, 0) && meta < METAPAGE_END(env));
eASSERT(env, unaligned_peek_u64(4, meta->txnid_a) < txnid && eASSERT(env, unaligned_peek_u64(4, meta->txnid_a) < txnid && unaligned_peek_u64(4, meta->txnid_b) < txnid);
unaligned_peek_u64(4, meta->txnid_b) < txnid);
(void)env; (void)env;
#if (defined(__amd64__) || defined(__e2k__)) && !defined(ENABLE_UBSAN) && \ #if (defined(__amd64__) || defined(__e2k__)) && !defined(ENABLE_UBSAN) && MDBX_UNALIGNED_OK >= 8
MDBX_UNALIGNED_OK >= 8
atomic_store64((mdbx_atomic_uint64_t *)&meta->txnid_b, 0, mo_AcquireRelease); atomic_store64((mdbx_atomic_uint64_t *)&meta->txnid_b, 0, mo_AcquireRelease);
atomic_store64((mdbx_atomic_uint64_t *)&meta->txnid_a, txnid, atomic_store64((mdbx_atomic_uint64_t *)&meta->txnid_a, txnid, mo_AcquireRelease);
mo_AcquireRelease);
#else #else
atomic_store32(&meta->txnid_b[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], 0, atomic_store32(&meta->txnid_b[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], 0, mo_AcquireRelease);
mo_AcquireRelease); atomic_store32(&meta->txnid_b[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__], 0, mo_AcquireRelease);
atomic_store32(&meta->txnid_b[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__], 0, atomic_store32(&meta->txnid_a[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], (uint32_t)txnid, mo_AcquireRelease);
mo_AcquireRelease); atomic_store32(&meta->txnid_a[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__], (uint32_t)(txnid >> 32), mo_AcquireRelease);
atomic_store32(&meta->txnid_a[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__],
(uint32_t)txnid, mo_AcquireRelease);
atomic_store32(&meta->txnid_a[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__],
(uint32_t)(txnid >> 32), mo_AcquireRelease);
#endif #endif
} }
static inline void meta_update_end(const MDBX_env *env, meta_t *meta, static inline void meta_update_end(const MDBX_env *env, meta_t *meta, txnid_t txnid) {
txnid_t txnid) {
eASSERT(env, meta >= METAPAGE(env, 0) && meta < METAPAGE_END(env)); eASSERT(env, meta >= METAPAGE(env, 0) && meta < METAPAGE_END(env));
eASSERT(env, unaligned_peek_u64(4, meta->txnid_a) == txnid); eASSERT(env, unaligned_peek_u64(4, meta->txnid_a) == txnid);
eASSERT(env, unaligned_peek_u64(4, meta->txnid_b) < txnid); eASSERT(env, unaligned_peek_u64(4, meta->txnid_b) < txnid);
(void)env; (void)env;
jitter4testing(true); jitter4testing(true);
memcpy(&meta->bootid, &globals.bootid, 16); memcpy(&meta->bootid, &globals.bootid, 16);
#if (defined(__amd64__) || defined(__e2k__)) && !defined(ENABLE_UBSAN) && \ #if (defined(__amd64__) || defined(__e2k__)) && !defined(ENABLE_UBSAN) && MDBX_UNALIGNED_OK >= 8
MDBX_UNALIGNED_OK >= 8 atomic_store64((mdbx_atomic_uint64_t *)&meta->txnid_b, txnid, mo_AcquireRelease);
atomic_store64((mdbx_atomic_uint64_t *)&meta->txnid_b, txnid,
mo_AcquireRelease);
#else #else
atomic_store32(&meta->txnid_b[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], atomic_store32(&meta->txnid_b[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__], (uint32_t)txnid, mo_AcquireRelease);
(uint32_t)txnid, mo_AcquireRelease); atomic_store32(&meta->txnid_b[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__], (uint32_t)(txnid >> 32), mo_AcquireRelease);
atomic_store32(&meta->txnid_b[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__],
(uint32_t)(txnid >> 32), mo_AcquireRelease);
#endif #endif
} }
static inline void meta_set_txnid(const MDBX_env *env, meta_t *meta, static inline void meta_set_txnid(const MDBX_env *env, meta_t *meta, const txnid_t txnid) {
const txnid_t txnid) { eASSERT(env, !env->dxb_mmap.base || meta < METAPAGE(env, 0) || meta >= METAPAGE_END(env));
eASSERT(env, !env->dxb_mmap.base || meta < METAPAGE(env, 0) ||
meta >= METAPAGE_END(env));
(void)env; (void)env;
/* update inconsistently since this function used ONLY for filling meta-image /* update inconsistently since this function used ONLY for filling meta-image
* for writing, but not the actual meta-page */ * for writing, but not the actual meta-page */
@ -167,42 +138,31 @@ static inline uint8_t meta_cmp2int(txnid_t a, txnid_t b, uint8_t s) {
return unlikely(a == b) ? 1 * s : (a > b) ? 2 * s : 0 * s; return unlikely(a == b) ? 1 * s : (a > b) ? 2 * s : 0 * s;
} }
static inline uint8_t meta_cmp2recent(uint8_t ab_cmp2int, bool a_steady, static inline uint8_t meta_cmp2recent(uint8_t ab_cmp2int, bool a_steady, bool b_steady) {
bool b_steady) {
assert(ab_cmp2int < 3 /* && a_steady< 2 && b_steady < 2 */); assert(ab_cmp2int < 3 /* && a_steady< 2 && b_steady < 2 */);
return ab_cmp2int > 1 || (ab_cmp2int == 1 && a_steady > b_steady); return ab_cmp2int > 1 || (ab_cmp2int == 1 && a_steady > b_steady);
} }
static inline uint8_t meta_cmp2steady(uint8_t ab_cmp2int, bool a_steady, static inline uint8_t meta_cmp2steady(uint8_t ab_cmp2int, bool a_steady, bool b_steady) {
bool b_steady) {
assert(ab_cmp2int < 3 /* && a_steady< 2 && b_steady < 2 */); assert(ab_cmp2int < 3 /* && a_steady< 2 && b_steady < 2 */);
return a_steady > b_steady || (a_steady == b_steady && ab_cmp2int > 1); return a_steady > b_steady || (a_steady == b_steady && ab_cmp2int > 1);
} }
static inline bool meta_choice_recent(txnid_t a_txnid, bool a_steady, static inline bool meta_choice_recent(txnid_t a_txnid, bool a_steady, txnid_t b_txnid, bool b_steady) {
txnid_t b_txnid, bool b_steady) {
return meta_cmp2recent(meta_cmp2int(a_txnid, b_txnid, 1), a_steady, b_steady); return meta_cmp2recent(meta_cmp2int(a_txnid, b_txnid, 1), a_steady, b_steady);
} }
static inline bool meta_choice_steady(txnid_t a_txnid, bool a_steady, static inline bool meta_choice_steady(txnid_t a_txnid, bool a_steady, txnid_t b_txnid, bool b_steady) {
txnid_t b_txnid, bool b_steady) {
return meta_cmp2steady(meta_cmp2int(a_txnid, b_txnid, 1), a_steady, b_steady); return meta_cmp2steady(meta_cmp2int(a_txnid, b_txnid, 1), a_steady, b_steady);
} }
MDBX_INTERNAL meta_t *meta_init_triplet(const MDBX_env *env, void *buffer); MDBX_INTERNAL meta_t *meta_init_triplet(const MDBX_env *env, void *buffer);
MDBX_INTERNAL int meta_validate(MDBX_env *env, meta_t *const meta, MDBX_INTERNAL int meta_validate(MDBX_env *env, meta_t *const meta, const page_t *const page, const unsigned meta_number,
const page_t *const page,
const unsigned meta_number,
unsigned *guess_pagesize); unsigned *guess_pagesize);
MDBX_INTERNAL int __must_check_result meta_validate_copy(MDBX_env *env, MDBX_INTERNAL int __must_check_result meta_validate_copy(MDBX_env *env, const meta_t *meta, meta_t *dest);
const meta_t *meta,
meta_t *dest);
MDBX_INTERNAL int __must_check_result meta_override(MDBX_env *env, MDBX_INTERNAL int __must_check_result meta_override(MDBX_env *env, size_t target, txnid_t txnid, const meta_t *shape);
size_t target,
txnid_t txnid,
const meta_t *shape);
MDBX_INTERNAL int meta_wipe_steady(MDBX_env *env, txnid_t inclusive_upto); MDBX_INTERNAL int meta_wipe_steady(MDBX_env *env, txnid_t inclusive_upto);

View File

@ -14,11 +14,9 @@ __cold int mdbx_is_readahead_reasonable(size_t volume, intptr_t redundancy) {
const int log2page = log2n_powerof2(pagesize); const int log2page = log2n_powerof2(pagesize);
const intptr_t volume_pages = (volume + pagesize - 1) >> log2page; const intptr_t volume_pages = (volume + pagesize - 1) >> log2page;
const intptr_t redundancy_pages = const intptr_t redundancy_pages = (redundancy < 0) ? -(intptr_t)((-redundancy + pagesize - 1) >> log2page)
(redundancy < 0) ? -(intptr_t)((-redundancy + pagesize - 1) >> log2page) : (intptr_t)(redundancy + pagesize - 1) >> log2page;
: (intptr_t)(redundancy + pagesize - 1) >> log2page; if (volume_pages >= total_ram_pages || volume_pages + redundancy_pages >= total_ram_pages)
if (volume_pages >= total_ram_pages ||
volume_pages + redundancy_pages >= total_ram_pages)
return MDBX_RESULT_FALSE; return MDBX_RESULT_FALSE;
intptr_t avail_ram_pages; intptr_t avail_ram_pages;
@ -26,13 +24,10 @@ __cold int mdbx_is_readahead_reasonable(size_t volume, intptr_t redundancy) {
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return LOG_IFERR(err); return LOG_IFERR(err);
return (volume_pages + redundancy_pages >= avail_ram_pages) return (volume_pages + redundancy_pages >= avail_ram_pages) ? MDBX_RESULT_FALSE : MDBX_RESULT_TRUE;
? MDBX_RESULT_FALSE
: MDBX_RESULT_TRUE;
} }
int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, uint64_t increment) {
uint64_t increment) {
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
bailout: bailout:
@ -111,30 +106,23 @@ int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int mdbx_cmp(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, int mdbx_cmp(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
eASSERT(nullptr, txn->signature == txn_signature); eASSERT(nullptr, txn->signature == txn_signature);
tASSERT(txn, (dbi_state(txn, dbi) & DBI_VALID) && !dbi_changed(txn, dbi)); tASSERT(txn, (dbi_state(txn, dbi) & DBI_VALID) && !dbi_changed(txn, dbi));
tASSERT(txn, tASSERT(txn, dbi < txn->env->n_dbi && (txn->env->dbs_flags[dbi] & DB_VALID) != 0);
dbi < txn->env->n_dbi && (txn->env->dbs_flags[dbi] & DB_VALID) != 0);
return txn->env->kvs[dbi].clc.k.cmp(a, b); return txn->env->kvs[dbi].clc.k.cmp(a, b);
} }
int mdbx_dcmp(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, int mdbx_dcmp(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, const MDBX_val *b) {
const MDBX_val *b) {
eASSERT(nullptr, txn->signature == txn_signature); eASSERT(nullptr, txn->signature == txn_signature);
tASSERT(txn, (dbi_state(txn, dbi) & DBI_VALID) && !dbi_changed(txn, dbi)); tASSERT(txn, (dbi_state(txn, dbi) & DBI_VALID) && !dbi_changed(txn, dbi));
tASSERT(txn, dbi < txn->env->n_dbi && (txn->env->dbs_flags[dbi] & DB_VALID)); tASSERT(txn, dbi < txn->env->n_dbi && (txn->env->dbs_flags[dbi] & DB_VALID));
return txn->env->kvs[dbi].clc.v.cmp(a, b); return txn->env->kvs[dbi].clc.v.cmp(a, b);
} }
__cold MDBX_cmp_func *mdbx_get_keycmp(MDBX_db_flags_t flags) { __cold MDBX_cmp_func *mdbx_get_keycmp(MDBX_db_flags_t flags) { return builtin_keycmp(flags); }
return builtin_keycmp(flags);
}
__cold MDBX_cmp_func *mdbx_get_datacmp(MDBX_db_flags_t flags) { __cold MDBX_cmp_func *mdbx_get_datacmp(MDBX_db_flags_t flags) { return builtin_datacmp(flags); }
return builtin_datacmp(flags);
}
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
@ -227,10 +215,8 @@ __cold const char *mdbx_strerror_r(int errnum, char *buf, size_t buflen) {
const char *msg = mdbx_liberr2str(errnum); const char *msg = mdbx_liberr2str(errnum);
if (!msg && buflen > 0 && buflen < INT_MAX) { if (!msg && buflen > 0 && buflen < INT_MAX) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
DWORD size = FormatMessageA( DWORD size = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, errnum,
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, (DWORD)buflen, nullptr);
errnum, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, (DWORD)buflen,
nullptr);
while (size && buf[size - 1] <= ' ') while (size && buf[size - 1] <= ' ')
--size; --size;
buf[size] = 0; buf[size] = 0;
@ -284,10 +270,8 @@ __cold const char *mdbx_strerror(int errnum) {
const char *mdbx_strerror_r_ANSI2OEM(int errnum, char *buf, size_t buflen) { const char *mdbx_strerror_r_ANSI2OEM(int errnum, char *buf, size_t buflen) {
const char *msg = mdbx_liberr2str(errnum); const char *msg = mdbx_liberr2str(errnum);
if (!msg && buflen > 0 && buflen < INT_MAX) { if (!msg && buflen > 0 && buflen < INT_MAX) {
DWORD size = FormatMessageA( DWORD size = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, errnum,
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, (DWORD)buflen, nullptr);
errnum, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, (DWORD)buflen,
nullptr);
while (size && buf[size - 1] <= ' ') while (size && buf[size - 1] <= ' ')
--size; --size;
buf[size] = 0; buf[size] = 0;

View File

@ -45,8 +45,7 @@ bsr_t mvcc_bind_slot(MDBX_env *env) {
result.err = mvcc_cleanup_dead(env, true, nullptr); result.err = mvcc_cleanup_dead(env, true, nullptr);
if (result.err != MDBX_RESULT_TRUE) { if (result.err != MDBX_RESULT_TRUE) {
lck_rdt_unlock(env); lck_rdt_unlock(env);
result.err = result.err = (result.err == MDBX_SUCCESS) ? MDBX_READERS_FULL : result.err;
(result.err == MDBX_SUCCESS) ? MDBX_READERS_FULL : result.err;
return result; return result;
} }
} }
@ -61,8 +60,7 @@ bsr_t mvcc_bind_slot(MDBX_env *env) {
safe64_reset(&result.rslot->txnid, true); safe64_reset(&result.rslot->txnid, true);
if (slot == nreaders) if (slot == nreaders)
env->lck->rdt_length.weak = (uint32_t)++nreaders; env->lck->rdt_length.weak = (uint32_t)++nreaders;
result.rslot->tid.weak = result.rslot->tid.weak = (env->flags & MDBX_NOSTICKYTHREADS) ? 0 : osal_thread_self();
(env->flags & MDBX_NOSTICKYTHREADS) ? 0 : osal_thread_self();
atomic_store32(&result.rslot->pid, env->pid, mo_AcquireRelease); atomic_store32(&result.rslot->pid, env->pid, mo_AcquireRelease);
lck_rdt_unlock(env); lck_rdt_unlock(env);
@ -84,17 +82,14 @@ __hot txnid_t mvcc_shapshot_oldest(MDBX_env *const env, const txnid_t steady) {
return env->lck->cached_oldest.weak = steady; return env->lck->cached_oldest.weak = steady;
} }
const txnid_t prev_oldest = const txnid_t prev_oldest = atomic_load64(&lck->cached_oldest, mo_AcquireRelease);
atomic_load64(&lck->cached_oldest, mo_AcquireRelease);
eASSERT(env, steady >= prev_oldest); eASSERT(env, steady >= prev_oldest);
txnid_t new_oldest = prev_oldest; txnid_t new_oldest = prev_oldest;
while (nothing_changed != while (nothing_changed != atomic_load32(&lck->rdt_refresh_flag, mo_AcquireRelease)) {
atomic_load32(&lck->rdt_refresh_flag, mo_AcquireRelease)) {
lck->rdt_refresh_flag.weak = nothing_changed; lck->rdt_refresh_flag.weak = nothing_changed;
jitter4testing(false); jitter4testing(false);
const size_t snap_nreaders = const size_t snap_nreaders = atomic_load32(&lck->rdt_length, mo_AcquireRelease);
atomic_load32(&lck->rdt_length, mo_AcquireRelease);
new_oldest = steady; new_oldest = steady;
for (size_t i = 0; i < snap_nreaders; ++i) { for (size_t i = 0; i < snap_nreaders; ++i) {
@ -105,11 +100,9 @@ __hot txnid_t mvcc_shapshot_oldest(MDBX_env *const env, const txnid_t steady) {
const txnid_t rtxn = safe64_read(&lck->rdt[i].txnid); const txnid_t rtxn = safe64_read(&lck->rdt[i].txnid);
if (unlikely(rtxn < prev_oldest)) { if (unlikely(rtxn < prev_oldest)) {
if (unlikely(nothing_changed == atomic_load32(&lck->rdt_refresh_flag, if (unlikely(nothing_changed == atomic_load32(&lck->rdt_refresh_flag, mo_AcquireRelease)) &&
mo_AcquireRelease)) &&
safe64_reset_compare(&lck->rdt[i].txnid, rtxn)) { safe64_reset_compare(&lck->rdt[i].txnid, rtxn)) {
NOTICE("kick stuck reader[%zu of %zu].pid_%u %" PRIaTXN NOTICE("kick stuck reader[%zu of %zu].pid_%u %" PRIaTXN " < prev-oldest %" PRIaTXN ", steady-txn %" PRIaTXN,
" < prev-oldest %" PRIaTXN ", steady-txn %" PRIaTXN,
i, snap_nreaders, pid, rtxn, prev_oldest, steady); i, snap_nreaders, pid, rtxn, prev_oldest, steady);
} }
continue; continue;
@ -135,17 +128,13 @@ pgno_t mvcc_snapshot_largest(const MDBX_env *env, pgno_t last_used_page) {
lck_t *const lck = env->lck_mmap.lck; lck_t *const lck = env->lck_mmap.lck;
if (likely(lck != nullptr /* check for exclusive without-lck mode */)) { if (likely(lck != nullptr /* check for exclusive without-lck mode */)) {
retry:; retry:;
const size_t snap_nreaders = const size_t snap_nreaders = atomic_load32(&lck->rdt_length, mo_AcquireRelease);
atomic_load32(&lck->rdt_length, mo_AcquireRelease);
for (size_t i = 0; i < snap_nreaders; ++i) { for (size_t i = 0; i < snap_nreaders; ++i) {
if (atomic_load32(&lck->rdt[i].pid, mo_AcquireRelease)) { if (atomic_load32(&lck->rdt[i].pid, mo_AcquireRelease)) {
/* jitter4testing(true); */ /* jitter4testing(true); */
const pgno_t snap_pages = const pgno_t snap_pages = atomic_load32(&lck->rdt[i].snapshot_pages_used, mo_Relaxed);
atomic_load32(&lck->rdt[i].snapshot_pages_used, mo_Relaxed);
const txnid_t snap_txnid = safe64_read(&lck->rdt[i].txnid); const txnid_t snap_txnid = safe64_read(&lck->rdt[i].txnid);
if (unlikely(snap_pages != if (unlikely(snap_pages != atomic_load32(&lck->rdt[i].snapshot_pages_used, mo_AcquireRelease) ||
atomic_load32(&lck->rdt[i].snapshot_pages_used,
mo_AcquireRelease) ||
snap_txnid != safe64_read(&lck->rdt[i].txnid))) snap_txnid != safe64_read(&lck->rdt[i].txnid)))
goto retry; goto retry;
if (last_used_page < snap_pages && snap_txnid <= env->basal_txn->txnid) if (last_used_page < snap_pages && snap_txnid <= env->basal_txn->txnid)
@ -161,18 +150,14 @@ pgno_t mvcc_snapshot_largest(const MDBX_env *env, pgno_t last_used_page) {
pgno_t mvcc_largest_this(MDBX_env *env, pgno_t largest) { pgno_t mvcc_largest_this(MDBX_env *env, pgno_t largest) {
lck_t *const lck = env->lck_mmap.lck; lck_t *const lck = env->lck_mmap.lck;
if (likely(lck != nullptr /* exclusive mode */)) { if (likely(lck != nullptr /* exclusive mode */)) {
const size_t snap_nreaders = const size_t snap_nreaders = atomic_load32(&lck->rdt_length, mo_AcquireRelease);
atomic_load32(&lck->rdt_length, mo_AcquireRelease);
for (size_t i = 0; i < snap_nreaders; ++i) { for (size_t i = 0; i < snap_nreaders; ++i) {
retry: retry:
if (atomic_load32(&lck->rdt[i].pid, mo_AcquireRelease) == env->pid) { if (atomic_load32(&lck->rdt[i].pid, mo_AcquireRelease) == env->pid) {
/* jitter4testing(true); */ /* jitter4testing(true); */
const pgno_t snap_pages = const pgno_t snap_pages = atomic_load32(&lck->rdt[i].snapshot_pages_used, mo_Relaxed);
atomic_load32(&lck->rdt[i].snapshot_pages_used, mo_Relaxed);
const txnid_t snap_txnid = safe64_read(&lck->rdt[i].txnid); const txnid_t snap_txnid = safe64_read(&lck->rdt[i].txnid);
if (unlikely(snap_pages != if (unlikely(snap_pages != atomic_load32(&lck->rdt[i].snapshot_pages_used, mo_AcquireRelease) ||
atomic_load32(&lck->rdt[i].snapshot_pages_used,
mo_AcquireRelease) ||
snap_txnid != safe64_read(&lck->rdt[i].txnid))) snap_txnid != safe64_read(&lck->rdt[i].txnid)))
goto retry; goto retry;
if (largest < snap_pages && if (largest < snap_pages &&
@ -219,8 +204,7 @@ static bool pid_insert(uint32_t *list, uint32_t pid) {
return true; return true;
} }
__cold MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rdt_locked, __cold MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rdt_locked, int *dead) {
int *dead) {
int rc = check_env(env, true); int rc = check_env(env, true);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
@ -234,13 +218,11 @@ __cold MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rdt_locked,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
const size_t snap_nreaders = const size_t snap_nreaders = atomic_load32(&lck->rdt_length, mo_AcquireRelease);
atomic_load32(&lck->rdt_length, mo_AcquireRelease);
uint32_t pidsbuf_onstask[142]; uint32_t pidsbuf_onstask[142];
uint32_t *const pids = uint32_t *const pids = (snap_nreaders < ARRAY_LENGTH(pidsbuf_onstask))
(snap_nreaders < ARRAY_LENGTH(pidsbuf_onstask)) ? pidsbuf_onstask
? pidsbuf_onstask : osal_malloc((snap_nreaders + 1) * sizeof(uint32_t));
: osal_malloc((snap_nreaders + 1) * sizeof(uint32_t));
if (unlikely(!pids)) if (unlikely(!pids))
return MDBX_ENOMEM; return MDBX_ENOMEM;
@ -296,8 +278,7 @@ __cold MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rdt_locked,
/* clean it */ /* clean it */
for (size_t ii = i; ii < snap_nreaders; ii++) { for (size_t ii = i; ii < snap_nreaders; ii++) {
if (lck->rdt[ii].pid.weak == pid) { if (lck->rdt[ii].pid.weak == pid) {
DEBUG("clear stale reader pid %" PRIuPTR " txn %" PRIaTXN, (size_t)pid, DEBUG("clear stale reader pid %" PRIuPTR " txn %" PRIaTXN, (size_t)pid, lck->rdt[ii].txnid.weak);
lck->rdt[ii].txnid.weak);
atomic_store32(&lck->rdt[ii].pid, 0, mo_Relaxed); atomic_store32(&lck->rdt[ii].pid, 0, mo_Relaxed);
atomic_store32(&lck->rdt_refresh_flag, true, mo_AcquireRelease); atomic_store32(&lck->rdt_refresh_flag, true, mo_AcquireRelease);
count++; count++;
@ -321,11 +302,9 @@ __cold MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rdt_locked,
int txn_park(MDBX_txn *txn, bool autounpark) { int txn_park(MDBX_txn *txn, bool autounpark) {
reader_slot_t *const rslot = txn->to.reader; reader_slot_t *const rslot = txn->to.reader;
tASSERT(txn, (txn->flags & (MDBX_TXN_FINISHED | MDBX_TXN_RDONLY | tASSERT(txn, (txn->flags & (MDBX_TXN_FINISHED | MDBX_TXN_RDONLY | MDBX_TXN_PARKED)) == MDBX_TXN_RDONLY);
MDBX_TXN_PARKED)) == MDBX_TXN_RDONLY);
tASSERT(txn, txn->to.reader->tid.weak < MDBX_TID_TXN_OUSTED); tASSERT(txn, txn->to.reader->tid.weak < MDBX_TID_TXN_OUSTED);
if (unlikely((txn->flags & (MDBX_TXN_FINISHED | MDBX_TXN_RDONLY | if (unlikely((txn->flags & (MDBX_TXN_FINISHED | MDBX_TXN_RDONLY | MDBX_TXN_PARKED)) != MDBX_TXN_RDONLY))
MDBX_TXN_PARKED)) != MDBX_TXN_RDONLY))
return MDBX_BAD_TXN; return MDBX_BAD_TXN;
const uint32_t pid = atomic_load32(&rslot->pid, mo_Relaxed); const uint32_t pid = atomic_load32(&rslot->pid, mo_Relaxed);
@ -344,14 +323,12 @@ int txn_park(MDBX_txn *txn, bool autounpark) {
atomic_store64(&rslot->tid, MDBX_TID_TXN_PARKED, mo_AcquireRelease); atomic_store64(&rslot->tid, MDBX_TID_TXN_PARKED, mo_AcquireRelease);
atomic_store32(&txn->env->lck->rdt_refresh_flag, true, mo_Relaxed); atomic_store32(&txn->env->lck->rdt_refresh_flag, true, mo_Relaxed);
txn->flags += txn->flags += autounpark ? MDBX_TXN_PARKED | MDBX_TXN_AUTOUNPARK : MDBX_TXN_PARKED;
autounpark ? MDBX_TXN_PARKED | MDBX_TXN_AUTOUNPARK : MDBX_TXN_PARKED;
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int txn_unpark(MDBX_txn *txn) { int txn_unpark(MDBX_txn *txn) {
if (unlikely((txn->flags & (MDBX_TXN_FINISHED | MDBX_TXN_HAS_CHILD | if (unlikely((txn->flags & (MDBX_TXN_FINISHED | MDBX_TXN_HAS_CHILD | MDBX_TXN_RDONLY | MDBX_TXN_PARKED)) !=
MDBX_TXN_RDONLY | MDBX_TXN_PARKED)) !=
(MDBX_TXN_RDONLY | MDBX_TXN_PARKED))) (MDBX_TXN_RDONLY | MDBX_TXN_PARKED)))
return MDBX_BAD_TXN; return MDBX_BAD_TXN;
@ -363,14 +340,11 @@ int txn_unpark(MDBX_txn *txn) {
ERROR("unexpected pid %u%s%u", pid, " != expected ", txn->env->pid); ERROR("unexpected pid %u%s%u", pid, " != expected ", txn->env->pid);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
if (unlikely(tid == MDBX_TID_TXN_OUSTED || if (unlikely(tid == MDBX_TID_TXN_OUSTED || txnid >= SAFE64_INVALID_THRESHOLD))
txnid >= SAFE64_INVALID_THRESHOLD))
break; break;
if (unlikely(tid != MDBX_TID_TXN_PARKED || txnid != txn->txnid)) { if (unlikely(tid != MDBX_TID_TXN_PARKED || txnid != txn->txnid)) {
ERROR("unexpected thread-id 0x%" PRIx64 "%s0x%" PRIx64 ERROR("unexpected thread-id 0x%" PRIx64 "%s0x%" PRIx64 " and/or txn-id %" PRIaTXN "%s%" PRIaTXN, tid, " != must ",
" and/or txn-id %" PRIaTXN "%s%" PRIaTXN, MDBX_TID_TXN_OUSTED, txnid, " != must ", txn->txnid);
tid, " != must ", MDBX_TID_TXN_OUSTED, txnid, " != must ",
txn->txnid);
break; break;
} }
if (unlikely((txn->flags & MDBX_TXN_ERROR))) if (unlikely((txn->flags & MDBX_TXN_ERROR)))
@ -380,12 +354,9 @@ int txn_unpark(MDBX_txn *txn) {
if (unlikely(!atomic_cas64(&rslot->tid, MDBX_TID_TXN_PARKED, txn->owner))) if (unlikely(!atomic_cas64(&rslot->tid, MDBX_TID_TXN_PARKED, txn->owner)))
continue; continue;
#else #else
atomic_store32(&rslot->tid.high, (uint32_t)((uint64_t)txn->owner >> 32), atomic_store32(&rslot->tid.high, (uint32_t)((uint64_t)txn->owner >> 32), mo_Relaxed);
mo_Relaxed); if (unlikely(!atomic_cas32(&rslot->tid.low, (uint32_t)MDBX_TID_TXN_PARKED, (uint32_t)txn->owner))) {
if (unlikely(!atomic_cas32(&rslot->tid.low, (uint32_t)MDBX_TID_TXN_PARKED, atomic_store32(&rslot->tid.high, (uint32_t)(MDBX_TID_TXN_PARKED >> 32), mo_AcquireRelease);
(uint32_t)txn->owner))) {
atomic_store32(&rslot->tid.high, (uint32_t)(MDBX_TID_TXN_PARKED >> 32),
mo_AcquireRelease);
continue; continue;
} }
#endif #endif
@ -413,8 +384,7 @@ __cold txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t straggler) {
bool notify_eof_of_loop = false; bool notify_eof_of_loop = false;
int retry = 0; int retry = 0;
do { do {
const txnid_t steady = const txnid_t steady = env->txn->tw.troika.txnid[env->txn->tw.troika.prefer_steady];
env->txn->tw.troika.txnid[env->txn->tw.troika.prefer_steady];
env->lck->rdt_refresh_flag.weak = /* force refresh */ true; env->lck->rdt_refresh_flag.weak = /* force refresh */ true;
oldest = mvcc_shapshot_oldest(env, steady); oldest = mvcc_shapshot_oldest(env, steady);
eASSERT(env, oldest < env->basal_txn->txnid); eASSERT(env, oldest < env->basal_txn->txnid);
@ -435,8 +405,7 @@ __cold txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t straggler) {
reader_slot_t *const rslot = &lck->rdt[i]; reader_slot_t *const rslot = &lck->rdt[i];
txnid_t rtxn = safe64_read(&rslot->txnid); txnid_t rtxn = safe64_read(&rslot->txnid);
retry: retry:
if (rtxn == straggler && if (rtxn == straggler && (pid = atomic_load32(&rslot->pid, mo_AcquireRelease)) != 0) {
(pid = atomic_load32(&rslot->pid, mo_AcquireRelease)) != 0) {
const uint64_t tid = safe64_read(&rslot->tid); const uint64_t tid = safe64_read(&rslot->tid);
if (tid == MDBX_TID_TXN_PARKED) { if (tid == MDBX_TID_TXN_PARKED) {
/* Читающая транзакция была помечена владельцем как "припаркованная", /* Читающая транзакция была помечена владельцем как "припаркованная",
@ -454,25 +423,21 @@ __cold txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t straggler) {
*/ */
bool ousted = bool ousted =
#if MDBX_64BIT_CAS #if MDBX_64BIT_CAS
atomic_cas64(&rslot->tid, MDBX_TID_TXN_PARKED, atomic_cas64(&rslot->tid, MDBX_TID_TXN_PARKED, MDBX_TID_TXN_OUSTED);
MDBX_TID_TXN_OUSTED);
#else #else
atomic_cas32(&rslot->tid.low, (uint32_t)MDBX_TID_TXN_PARKED, atomic_cas32(&rslot->tid.low, (uint32_t)MDBX_TID_TXN_PARKED, (uint32_t)MDBX_TID_TXN_OUSTED);
(uint32_t)MDBX_TID_TXN_OUSTED);
#endif #endif
if (likely(ousted)) { if (likely(ousted)) {
ousted = safe64_reset_compare(&rslot->txnid, rtxn); ousted = safe64_reset_compare(&rslot->txnid, rtxn);
NOTICE("ousted-%s parked read-txn %" PRIaTXN NOTICE("ousted-%s parked read-txn %" PRIaTXN ", pid %u, tid 0x%" PRIx64, ousted ? "complete" : "half", rtxn,
", pid %u, tid 0x%" PRIx64, pid, tid);
ousted ? "complete" : "half", rtxn, pid, tid);
eASSERT(env, ousted || safe64_read(&rslot->txnid) > straggler); eASSERT(env, ousted || safe64_read(&rslot->txnid) > straggler);
continue; continue;
} }
rtxn = safe64_read(&rslot->txnid); rtxn = safe64_read(&rslot->txnid);
goto retry; goto retry;
} }
hold_retired = hold_retired = atomic_load64(&lck->rdt[i].snapshot_pages_retired, mo_Relaxed);
atomic_load64(&lck->rdt[i].snapshot_pages_retired, mo_Relaxed);
stucked = rslot; stucked = rslot;
} }
} }
@ -487,15 +452,10 @@ __cold txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t straggler) {
const meta_ptr_t head = meta_recent(env, &env->txn->tw.troika); const meta_ptr_t head = meta_recent(env, &env->txn->tw.troika);
const txnid_t gap = (head.txnid - straggler) / xMDBX_TXNID_STEP; const txnid_t gap = (head.txnid - straggler) / xMDBX_TXNID_STEP;
const uint64_t head_retired = const uint64_t head_retired = unaligned_peek_u64(4, head.ptr_c->pages_retired);
unaligned_peek_u64(4, head.ptr_c->pages_retired); const size_t space = (head_retired > hold_retired) ? pgno2bytes(env, (pgno_t)(head_retired - hold_retired)) : 0;
const size_t space = int rc = callback(env, env->txn, pid, (mdbx_tid_t)((intptr_t)tid), straggler,
(head_retired > hold_retired) (gap < UINT_MAX) ? (unsigned)gap : UINT_MAX, space, retry);
? pgno2bytes(env, (pgno_t)(head_retired - hold_retired))
: 0;
int rc =
callback(env, env->txn, pid, (mdbx_tid_t)((intptr_t)tid), straggler,
(gap < UINT_MAX) ? (unsigned)gap : UINT_MAX, space, retry);
if (rc < 0) if (rc < 0)
/* hsr returned error and/or agree MDBX_MAP_FULL error */ /* hsr returned error and/or agree MDBX_MAP_FULL error */
break; break;
@ -523,10 +483,8 @@ __cold txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t straggler) {
/* notify end of hsr-loop */ /* notify end of hsr-loop */
const txnid_t turn = oldest - straggler; const txnid_t turn = oldest - straggler;
if (turn) if (turn)
NOTICE("hsr-kick: done turn %" PRIaTXN " -> %" PRIaTXN " +%" PRIaTXN, NOTICE("hsr-kick: done turn %" PRIaTXN " -> %" PRIaTXN " +%" PRIaTXN, straggler, oldest, turn);
straggler, oldest, turn); callback(env, env->txn, 0, 0, straggler, (turn < UINT_MAX) ? (unsigned)turn : UINT_MAX, 0, -retry);
callback(env, env->txn, 0, 0, straggler,
(turn < UINT_MAX) ? (unsigned)turn : UINT_MAX, 0, -retry);
} }
return oldest; return oldest;
} }

View File

@ -5,15 +5,13 @@
#include "internals.h" #include "internals.h"
__hot int __must_check_result node_add_dupfix(MDBX_cursor *mc, size_t indx, __hot int __must_check_result node_add_dupfix(MDBX_cursor *mc, size_t indx, const MDBX_val *key) {
const MDBX_val *key) {
page_t *mp = mc->pg[mc->top]; page_t *mp = mc->pg[mc->top];
MDBX_ANALYSIS_ASSUME(key != nullptr); MDBX_ANALYSIS_ASSUME(key != nullptr);
DKBUF_DEBUG; DKBUF_DEBUG;
DEBUG("add to leaf2-%spage %" PRIaPGNO " index %zi, " DEBUG("add to leaf2-%spage %" PRIaPGNO " index %zi, "
" key size %" PRIuPTR " [%s]", " key size %" PRIuPTR " [%s]",
is_subpage(mp) ? "sub-" : "", mp->pgno, indx, key ? key->iov_len : 0, is_subpage(mp) ? "sub-" : "", mp->pgno, indx, key ? key->iov_len : 0, DKEY_DEBUG(key));
DKEY_DEBUG(key));
cASSERT(mc, key); cASSERT(mc, key);
cASSERT(mc, page_type_compat(mp) == (P_LEAF | P_DUPFIX)); cASSERT(mc, page_type_compat(mp) == (P_LEAF | P_DUPFIX));
@ -45,14 +43,11 @@ __hot int __must_check_result node_add_dupfix(MDBX_cursor *mc, size_t indx,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
int __must_check_result node_add_branch(MDBX_cursor *mc, size_t indx, int __must_check_result node_add_branch(MDBX_cursor *mc, size_t indx, const MDBX_val *key, pgno_t pgno) {
const MDBX_val *key, pgno_t pgno) {
page_t *mp = mc->pg[mc->top]; page_t *mp = mc->pg[mc->top];
DKBUF_DEBUG; DKBUF_DEBUG;
DEBUG("add to branch-%spage %" PRIaPGNO " index %zi, node-pgno %" PRIaPGNO DEBUG("add to branch-%spage %" PRIaPGNO " index %zi, node-pgno %" PRIaPGNO " key size %" PRIuPTR " [%s]",
" key size %" PRIuPTR " [%s]", is_subpage(mp) ? "sub-" : "", mp->pgno, indx, pgno, key ? key->iov_len : 0, DKEY_DEBUG(key));
is_subpage(mp) ? "sub-" : "", mp->pgno, indx, pgno,
key ? key->iov_len : 0, DKEY_DEBUG(key));
cASSERT(mc, page_type(mp) == P_BRANCH); cASSERT(mc, page_type(mp) == P_BRANCH);
STATIC_ASSERT(NODESIZE % 2 == 0); STATIC_ASSERT(NODESIZE % 2 == 0);
@ -87,17 +82,15 @@ int __must_check_result node_add_branch(MDBX_cursor *mc, size_t indx,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx, __hot int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx, const MDBX_val *key, MDBX_val *data,
const MDBX_val *key, MDBX_val *data,
unsigned flags) { unsigned flags) {
MDBX_ANALYSIS_ASSUME(key != nullptr); MDBX_ANALYSIS_ASSUME(key != nullptr);
MDBX_ANALYSIS_ASSUME(data != nullptr); MDBX_ANALYSIS_ASSUME(data != nullptr);
page_t *mp = mc->pg[mc->top]; page_t *mp = mc->pg[mc->top];
DKBUF_DEBUG; DKBUF_DEBUG;
DEBUG("add to leaf-%spage %" PRIaPGNO " index %zi, data size %" PRIuPTR DEBUG("add to leaf-%spage %" PRIaPGNO " index %zi, data size %" PRIuPTR " key size %" PRIuPTR " [%s]",
" key size %" PRIuPTR " [%s]", is_subpage(mp) ? "sub-" : "", mp->pgno, indx, data ? data->iov_len : 0, key ? key->iov_len : 0,
is_subpage(mp) ? "sub-" : "", mp->pgno, indx, data ? data->iov_len : 0, DKEY_DEBUG(key));
key ? key->iov_len : 0, DKEY_DEBUG(key));
cASSERT(mc, key != nullptr && data != nullptr); cASSERT(mc, key != nullptr && data != nullptr);
cASSERT(mc, page_type_compat(mp) == P_LEAF); cASSERT(mc, page_type_compat(mp) == P_LEAF);
page_t *largepage = nullptr; page_t *largepage = nullptr;
@ -106,19 +99,16 @@ __hot int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx,
if (unlikely(flags & N_BIG)) { if (unlikely(flags & N_BIG)) {
/* Data already on large/overflow page. */ /* Data already on large/overflow page. */
STATIC_ASSERT(sizeof(pgno_t) % 2 == 0); STATIC_ASSERT(sizeof(pgno_t) % 2 == 0);
node_bytes = node_bytes = node_size_len(key->iov_len, 0) + sizeof(pgno_t) + sizeof(indx_t);
node_size_len(key->iov_len, 0) + sizeof(pgno_t) + sizeof(indx_t);
cASSERT(mc, page_room(mp) >= node_bytes); cASSERT(mc, page_room(mp) >= node_bytes);
} else if (unlikely(node_size(key, data) > mc->txn->env->leaf_nodemax)) { } else if (unlikely(node_size(key, data) > mc->txn->env->leaf_nodemax)) {
/* Put data on large/overflow page. */ /* Put data on large/overflow page. */
if (unlikely(mc->tree->flags & MDBX_DUPSORT)) { if (unlikely(mc->tree->flags & MDBX_DUPSORT)) {
ERROR("Unexpected target %s flags 0x%x for large data-item", "dupsort-db", ERROR("Unexpected target %s flags 0x%x for large data-item", "dupsort-db", mc->tree->flags);
mc->tree->flags);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
if (unlikely(flags & (N_DUP | N_TREE))) { if (unlikely(flags & (N_DUP | N_TREE))) {
ERROR("Unexpected target %s flags 0x%x for large data-item", "node", ERROR("Unexpected target %s flags 0x%x for large data-item", "node", flags);
flags);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
cASSERT(mc, page_room(mp) >= leaf_size(mc->txn->env, key, data)); cASSERT(mc, page_room(mp) >= leaf_size(mc->txn->env, key, data));
@ -127,12 +117,10 @@ __hot int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx,
if (unlikely(npr.err != MDBX_SUCCESS)) if (unlikely(npr.err != MDBX_SUCCESS))
return npr.err; return npr.err;
largepage = npr.page; largepage = npr.page;
DEBUG("allocated %u large/overflow page(s) %" PRIaPGNO "for %" PRIuPTR DEBUG("allocated %u large/overflow page(s) %" PRIaPGNO "for %" PRIuPTR " data bytes", largepage->pages,
" data bytes", largepage->pgno, data->iov_len);
largepage->pages, largepage->pgno, data->iov_len);
flags |= N_BIG; flags |= N_BIG;
node_bytes = node_bytes = node_size_len(key->iov_len, 0) + sizeof(pgno_t) + sizeof(indx_t);
node_size_len(key->iov_len, 0) + sizeof(pgno_t) + sizeof(indx_t);
cASSERT(mc, node_bytes == leaf_size(mc->txn->env, key, data)); cASSERT(mc, node_bytes == leaf_size(mc->txn->env, key, data));
} else { } else {
cASSERT(mc, page_room(mp) >= leaf_size(mc->txn->env, key, data)); cASSERT(mc, page_room(mp) >= leaf_size(mc->txn->env, key, data));
@ -186,8 +174,7 @@ __hot void node_del(MDBX_cursor *mc, size_t ksize) {
const size_t hole = mc->ki[mc->top]; const size_t hole = mc->ki[mc->top];
const size_t nkeys = page_numkeys(mp); const size_t nkeys = page_numkeys(mp);
DEBUG("delete node %zu on %s page %" PRIaPGNO, hole, DEBUG("delete node %zu on %s page %" PRIaPGNO, hole, is_leaf(mp) ? "leaf" : "branch", mp->pgno);
is_leaf(mp) ? "leaf" : "branch", mp->pgno);
cASSERT(mc, hole < nkeys); cASSERT(mc, hole < nkeys);
if (is_dupfix_leaf(mp)) { if (is_dupfix_leaf(mp)) {
@ -215,9 +202,7 @@ __hot void node_del(MDBX_cursor *mc, size_t ksize) {
size_t r, w; size_t r, w;
for (r = w = 0; r < nkeys; r++) for (r = w = 0; r < nkeys; r++)
if (r != hole) if (r != hole)
mp->entries[w++] = (mp->entries[r] < hole_offset) mp->entries[w++] = (mp->entries[r] < hole_offset) ? mp->entries[r] + (indx_t)hole_size : mp->entries[r];
? mp->entries[r] + (indx_t)hole_size
: mp->entries[r];
void *const base = ptr_disp(mp, mp->upper + PAGEHDRSZ); void *const base = ptr_disp(mp, mp->upper + PAGEHDRSZ);
memmove(ptr_disp(base, hole_size), base, hole_offset - mp->upper); memmove(ptr_disp(base, hole_size), base, hole_offset - mp->upper);
@ -236,14 +221,12 @@ __hot void node_del(MDBX_cursor *mc, size_t ksize) {
} }
} }
__noinline int node_read_bigdata(MDBX_cursor *mc, const node_t *node, __noinline int node_read_bigdata(MDBX_cursor *mc, const node_t *node, MDBX_val *data, const page_t *mp) {
MDBX_val *data, const page_t *mp) {
cASSERT(mc, node_flags(node) == N_BIG && data->iov_len == node_ds(node)); cASSERT(mc, node_flags(node) == N_BIG && data->iov_len == node_ds(node));
pgr_t lp = page_get_large(mc, node_largedata_pgno(node), mp->txnid); pgr_t lp = page_get_large(mc, node_largedata_pgno(node), mp->txnid);
if (unlikely((lp.err != MDBX_SUCCESS))) { if (unlikely((lp.err != MDBX_SUCCESS))) {
DEBUG("read large/overflow page %" PRIaPGNO " failed", DEBUG("read large/overflow page %" PRIaPGNO " failed", node_largedata_pgno(node));
node_largedata_pgno(node));
return lp.err; return lp.err;
} }
@ -254,9 +237,7 @@ __noinline int node_read_bigdata(MDBX_cursor *mc, const node_t *node,
const size_t dsize = data->iov_len; const size_t dsize = data->iov_len;
const unsigned npages = largechunk_npages(env, dsize); const unsigned npages = largechunk_npages(env, dsize);
if (unlikely(lp.page->pages < npages)) if (unlikely(lp.page->pages < npages))
return bad_page(lp.page, return bad_page(lp.page, "too less n-pages %u for bigdata-node (%zu bytes)", lp.page->pages, dsize);
"too less n-pages %u for bigdata-node (%zu bytes)",
lp.page->pages, dsize);
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -265,8 +246,7 @@ node_t *node_shrink(page_t *mp, size_t indx, node_t *node) {
assert(node == page_node(mp, indx)); assert(node == page_node(mp, indx));
page_t *sp = (page_t *)node_data(node); page_t *sp = (page_t *)node_data(node);
assert(is_subpage(sp) && page_numkeys(sp) > 0); assert(is_subpage(sp) && page_numkeys(sp) > 0);
const size_t delta = const size_t delta = EVEN_FLOOR(page_room(sp) /* avoid the node uneven-sized */);
EVEN_FLOOR(page_room(sp) /* avoid the node uneven-sized */);
if (unlikely(delta) == 0) if (unlikely(delta) == 0)
return node; return node;
@ -303,15 +283,13 @@ node_t *node_shrink(page_t *mp, size_t indx, node_t *node) {
return ptr_disp(node, delta); return ptr_disp(node, delta);
} }
__hot struct node_search_result node_search(MDBX_cursor *mc, __hot struct node_search_result node_search(MDBX_cursor *mc, const MDBX_val *key) {
const MDBX_val *key) {
page_t *mp = mc->pg[mc->top]; page_t *mp = mc->pg[mc->top];
const intptr_t nkeys = page_numkeys(mp); const intptr_t nkeys = page_numkeys(mp);
DKBUF_DEBUG; DKBUF_DEBUG;
DEBUG("searching %zu keys in %s %spage %" PRIaPGNO, nkeys, DEBUG("searching %zu keys in %s %spage %" PRIaPGNO, nkeys, is_leaf(mp) ? "leaf" : "branch",
is_leaf(mp) ? "leaf" : "branch", is_subpage(mp) ? "sub-" : "", is_subpage(mp) ? "sub-" : "", mp->pgno);
mp->pgno);
struct node_search_result ret; struct node_search_result ret;
ret.exact = false; ret.exact = false;
@ -333,8 +311,7 @@ __hot struct node_search_result node_search(MDBX_cursor *mc,
do { do {
i = (low + high) >> 1; i = (low + high) >> 1;
nodekey.iov_base = page_dupfix_ptr(mp, i, nodekey.iov_len); nodekey.iov_base = page_dupfix_ptr(mp, i, nodekey.iov_len);
cASSERT(mc, ptr_disp(mp, mc->txn->env->ps) >= cASSERT(mc, ptr_disp(mp, mc->txn->env->ps) >= ptr_disp(nodekey.iov_base, nodekey.iov_len));
ptr_disp(nodekey.iov_base, nodekey.iov_len));
int cr = cmp(key, &nodekey); int cr = cmp(key, &nodekey);
DEBUG("found leaf index %zu [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr); DEBUG("found leaf index %zu [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr);
if (cr > 0) if (cr > 0)
@ -349,10 +326,8 @@ __hot struct node_search_result node_search(MDBX_cursor *mc,
/* store the key index */ /* store the key index */
mc->ki[mc->top] = (indx_t)i; mc->ki[mc->top] = (indx_t)i;
ret.node = ret.node = (i < nkeys) ? /* fake for DUPFIX */ (node_t *)(intptr_t)-1
(i < nkeys) : /* There is no entry larger or equal to the key. */ nullptr;
? /* fake for DUPFIX */ (node_t *)(intptr_t)-1
: /* There is no entry larger or equal to the key. */ nullptr;
return ret; return ret;
} }
@ -367,14 +342,12 @@ __hot struct node_search_result node_search(MDBX_cursor *mc,
node = page_node(mp, i); node = page_node(mp, i);
nodekey.iov_len = node_ks(node); nodekey.iov_len = node_ks(node);
nodekey.iov_base = node_key(node); nodekey.iov_base = node_key(node);
cASSERT(mc, ptr_disp(mp, mc->txn->env->ps) >= cASSERT(mc, ptr_disp(mp, mc->txn->env->ps) >= ptr_disp(nodekey.iov_base, nodekey.iov_len));
ptr_disp(nodekey.iov_base, nodekey.iov_len));
int cr = cmp(key, &nodekey); int cr = cmp(key, &nodekey);
if (is_leaf(mp)) if (is_leaf(mp))
DEBUG("found leaf index %zu [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr); DEBUG("found leaf index %zu [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr);
else else
DEBUG("found branch index %zu [%s -> %" PRIaPGNO "], rc = %i", i, DEBUG("found branch index %zu [%s -> %" PRIaPGNO "], rc = %i", i, DKEY_DEBUG(&nodekey), node_pgno(node), cr);
DKEY_DEBUG(&nodekey), node_pgno(node), cr);
if (cr > 0) if (cr > 0)
low = ++i; low = ++i;
else if (cr < 0) else if (cr < 0)
@ -387,8 +360,6 @@ __hot struct node_search_result node_search(MDBX_cursor *mc,
/* store the key index */ /* store the key index */
mc->ki[mc->top] = (indx_t)i; mc->ki[mc->top] = (indx_t)i;
ret.node = (i < nkeys) ret.node = (i < nkeys) ? page_node(mp, i) : /* There is no entry larger or equal to the key. */ nullptr;
? page_node(mp, i)
: /* There is no entry larger or equal to the key. */ nullptr;
return ret; return ret;
} }

View File

@ -9,8 +9,7 @@
#define NODE_ADD_FLAGS (N_DUP | N_TREE | MDBX_RESERVE | MDBX_APPEND) #define NODE_ADD_FLAGS (N_DUP | N_TREE | MDBX_RESERVE | MDBX_APPEND)
/* Get the page number pointed to by a branch node */ /* Get the page number pointed to by a branch node */
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t node_pgno(const node_t *const __restrict node) {
node_pgno(const node_t *const __restrict node) {
pgno_t pgno = UNALIGNED_PEEK_32(node, node_t, child_pgno); pgno_t pgno = UNALIGNED_PEEK_32(node, node_t, child_pgno);
return pgno; return pgno;
} }
@ -23,8 +22,7 @@ static inline void node_set_pgno(node_t *const __restrict node, pgno_t pgno) {
} }
/* Get the size of the data in a leaf node */ /* Get the size of the data in a leaf node */
MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_NOTHROW_PURE_FUNCTION static inline size_t node_ds(const node_t *const __restrict node) {
node_ds(const node_t *const __restrict node) {
return UNALIGNED_PEEK_32(node, node_t, dsize); return UNALIGNED_PEEK_32(node, node_t, dsize);
} }
@ -35,8 +33,7 @@ static inline void node_set_ds(node_t *const __restrict node, size_t size) {
} }
/* The size of a key in a node */ /* The size of a key in a node */
MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_NOTHROW_PURE_FUNCTION static inline size_t node_ks(const node_t *const __restrict node) {
node_ks(const node_t *const __restrict node) {
return UNALIGNED_PEEK_16(node, node_t, ksize); return UNALIGNED_PEEK_16(node, node_t, ksize);
} }
@ -46,54 +43,42 @@ static inline void node_set_ks(node_t *const __restrict node, size_t size) {
UNALIGNED_POKE_16(node, node_t, ksize, (uint16_t)size); UNALIGNED_POKE_16(node, node_t, ksize, (uint16_t)size);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t node_flags(const node_t *const __restrict node) {
node_flags(const node_t *const __restrict node) {
return UNALIGNED_PEEK_8(node, node_t, flags); return UNALIGNED_PEEK_8(node, node_t, flags);
} }
static inline void node_set_flags(node_t *const __restrict node, static inline void node_set_flags(node_t *const __restrict node, uint8_t flags) {
uint8_t flags) {
UNALIGNED_POKE_8(node, node_t, flags, flags); UNALIGNED_POKE_8(node, node_t, flags, flags);
} }
/* Address of the key for the node */ /* Address of the key for the node */
MDBX_NOTHROW_PURE_FUNCTION static inline void * MDBX_NOTHROW_PURE_FUNCTION static inline void *node_key(const node_t *const __restrict node) {
node_key(const node_t *const __restrict node) {
return ptr_disp(node, NODESIZE); return ptr_disp(node, NODESIZE);
} }
/* Address of the data for a node */ /* Address of the data for a node */
MDBX_NOTHROW_PURE_FUNCTION static inline void * MDBX_NOTHROW_PURE_FUNCTION static inline void *node_data(const node_t *const __restrict node) {
node_data(const node_t *const __restrict node) {
return ptr_disp(node_key(node), node_ks(node)); return ptr_disp(node_key(node), node_ks(node));
} }
/* Size of a node in a leaf page with a given key and data. /* Size of a node in a leaf page with a given key and data.
* This is node header plus key plus data size. */ * This is node header plus key plus data size. */
MDBX_NOTHROW_CONST_FUNCTION static inline size_t MDBX_NOTHROW_CONST_FUNCTION static inline size_t node_size_len(const size_t key_len, const size_t value_len) {
node_size_len(const size_t key_len, const size_t value_len) {
return NODESIZE + EVEN_CEIL(key_len + value_len); return NODESIZE + EVEN_CEIL(key_len + value_len);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline size_t MDBX_NOTHROW_PURE_FUNCTION static inline size_t node_size(const MDBX_val *key, const MDBX_val *value) {
node_size(const MDBX_val *key, const MDBX_val *value) {
return node_size_len(key ? key->iov_len : 0, value ? value->iov_len : 0); return node_size_len(key ? key->iov_len : 0, value ? value->iov_len : 0);
} }
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t node_largedata_pgno(const node_t *const __restrict node) {
node_largedata_pgno(const node_t *const __restrict node) {
assert(node_flags(node) & N_BIG); assert(node_flags(node) & N_BIG);
return peek_pgno(node_data(node)); return peek_pgno(node_data(node));
} }
MDBX_INTERNAL int __must_check_result node_read_bigdata(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result node_read_bigdata(MDBX_cursor *mc, const node_t *node, MDBX_val *data,
const node_t *node,
MDBX_val *data,
const page_t *mp); const page_t *mp);
static inline int __must_check_result node_read(MDBX_cursor *mc, static inline int __must_check_result node_read(MDBX_cursor *mc, const node_t *node, MDBX_val *data, const page_t *mp) {
const node_t *node,
MDBX_val *data,
const page_t *mp) {
data->iov_len = node_ds(node); data->iov_len = node_ds(node);
data->iov_base = node_data(node); data->iov_base = node_data(node);
if (likely(node_flags(node) != N_BIG)) if (likely(node_flags(node) != N_BIG))
@ -105,20 +90,12 @@ static inline int __must_check_result node_read(MDBX_cursor *mc,
MDBX_INTERNAL nsr_t node_search(MDBX_cursor *mc, const MDBX_val *key); MDBX_INTERNAL nsr_t node_search(MDBX_cursor *mc, const MDBX_val *key);
MDBX_INTERNAL int __must_check_result node_add_branch(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result node_add_branch(MDBX_cursor *mc, size_t indx, const MDBX_val *key, pgno_t pgno);
size_t indx,
const MDBX_val *key,
pgno_t pgno);
MDBX_INTERNAL int __must_check_result node_add_leaf(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx, const MDBX_val *key, MDBX_val *data,
size_t indx,
const MDBX_val *key,
MDBX_val *data,
unsigned flags); unsigned flags);
MDBX_INTERNAL int __must_check_result node_add_dupfix(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result node_add_dupfix(MDBX_cursor *mc, size_t indx, const MDBX_val *key);
size_t indx,
const MDBX_val *key);
MDBX_INTERNAL void node_del(MDBX_cursor *mc, size_t ksize); MDBX_INTERNAL void node_del(MDBX_cursor *mc, size_t ksize);

View File

@ -66,8 +66,7 @@
/** Does a system have battery-backed Real-Time Clock or just a fake. */ /** Does a system have battery-backed Real-Time Clock or just a fake. */
#ifndef MDBX_TRUST_RTC #ifndef MDBX_TRUST_RTC
#if defined(__linux__) || defined(__gnu_linux__) || defined(__NetBSD__) || \ #if defined(__linux__) || defined(__gnu_linux__) || defined(__NetBSD__) || defined(__OpenBSD__)
defined(__OpenBSD__)
#define MDBX_TRUST_RTC 0 /* a lot of embedded systems have a fake RTC */ #define MDBX_TRUST_RTC 0 /* a lot of embedded systems have a fake RTC */
#else #else
#define MDBX_TRUST_RTC 1 #define MDBX_TRUST_RTC 1
@ -131,15 +130,13 @@
#ifndef MDBX_PNL_PREALLOC_FOR_RADIXSORT #ifndef MDBX_PNL_PREALLOC_FOR_RADIXSORT
#define MDBX_PNL_PREALLOC_FOR_RADIXSORT 1 #define MDBX_PNL_PREALLOC_FOR_RADIXSORT 1
#elif !(MDBX_PNL_PREALLOC_FOR_RADIXSORT == 0 || \ #elif !(MDBX_PNL_PREALLOC_FOR_RADIXSORT == 0 || MDBX_PNL_PREALLOC_FOR_RADIXSORT == 1)
MDBX_PNL_PREALLOC_FOR_RADIXSORT == 1)
#error MDBX_PNL_PREALLOC_FOR_RADIXSORT must be defined as 0 or 1 #error MDBX_PNL_PREALLOC_FOR_RADIXSORT must be defined as 0 or 1
#endif /* MDBX_PNL_PREALLOC_FOR_RADIXSORT */ #endif /* MDBX_PNL_PREALLOC_FOR_RADIXSORT */
#ifndef MDBX_DPL_PREALLOC_FOR_RADIXSORT #ifndef MDBX_DPL_PREALLOC_FOR_RADIXSORT
#define MDBX_DPL_PREALLOC_FOR_RADIXSORT 1 #define MDBX_DPL_PREALLOC_FOR_RADIXSORT 1
#elif !(MDBX_DPL_PREALLOC_FOR_RADIXSORT == 0 || \ #elif !(MDBX_DPL_PREALLOC_FOR_RADIXSORT == 0 || MDBX_DPL_PREALLOC_FOR_RADIXSORT == 1)
MDBX_DPL_PREALLOC_FOR_RADIXSORT == 1)
#error MDBX_DPL_PREALLOC_FOR_RADIXSORT must be defined as 0 or 1 #error MDBX_DPL_PREALLOC_FOR_RADIXSORT must be defined as 0 or 1
#endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */ #endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */
@ -204,8 +201,7 @@
/** Size of buffer used during copying a environment/database file. */ /** Size of buffer used during copying a environment/database file. */
#ifndef MDBX_ENVCOPY_WRITEBUF #ifndef MDBX_ENVCOPY_WRITEBUF
#define MDBX_ENVCOPY_WRITEBUF 1048576u #define MDBX_ENVCOPY_WRITEBUF 1048576u
#elif MDBX_ENVCOPY_WRITEBUF < 65536u || MDBX_ENVCOPY_WRITEBUF > 1073741824u || \ #elif MDBX_ENVCOPY_WRITEBUF < 65536u || MDBX_ENVCOPY_WRITEBUF > 1073741824u || MDBX_ENVCOPY_WRITEBUF % 65536u
MDBX_ENVCOPY_WRITEBUF % 65536u
#error MDBX_ENVCOPY_WRITEBUF must be defined in range 65536..1073741824 and be multiple of 65536 #error MDBX_ENVCOPY_WRITEBUF must be defined in range 65536..1073741824 and be multiple of 65536
#endif /* MDBX_ENVCOPY_WRITEBUF */ #endif /* MDBX_ENVCOPY_WRITEBUF */
@ -224,8 +220,7 @@
#else #else
#define MDBX_ASSUME_MALLOC_OVERHEAD (sizeof(void *) * 2u) #define MDBX_ASSUME_MALLOC_OVERHEAD (sizeof(void *) * 2u)
#endif #endif
#elif MDBX_ASSUME_MALLOC_OVERHEAD < 0 || MDBX_ASSUME_MALLOC_OVERHEAD > 64 || \ #elif MDBX_ASSUME_MALLOC_OVERHEAD < 0 || MDBX_ASSUME_MALLOC_OVERHEAD > 64 || MDBX_ASSUME_MALLOC_OVERHEAD % 4
MDBX_ASSUME_MALLOC_OVERHEAD % 4
#error MDBX_ASSUME_MALLOC_OVERHEAD must be defined in range 0..64 and be multiple of 4 #error MDBX_ASSUME_MALLOC_OVERHEAD must be defined in range 0..64 and be multiple of 4
#endif /* MDBX_ASSUME_MALLOC_OVERHEAD */ #endif /* MDBX_ASSUME_MALLOC_OVERHEAD */
@ -252,15 +247,13 @@
#define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 0 #define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 0
#elif defined(__e2k__) #elif defined(__e2k__)
#define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 0 #define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 0
#elif __has_builtin(__builtin_cpu_supports) || \ #elif __has_builtin(__builtin_cpu_supports) || defined(__BUILTIN_CPU_SUPPORTS__) || \
defined(__BUILTIN_CPU_SUPPORTS__) || \
(defined(__ia32__) && __GNUC_PREREQ(4, 8) && __GLIBC_PREREQ(2, 23)) (defined(__ia32__) && __GNUC_PREREQ(4, 8) && __GLIBC_PREREQ(2, 23))
#define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 1 #define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 1
#else #else
#define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 0 #define MDBX_HAVE_BUILTIN_CPU_SUPPORTS 0
#endif #endif
#elif !(MDBX_HAVE_BUILTIN_CPU_SUPPORTS == 0 || \ #elif !(MDBX_HAVE_BUILTIN_CPU_SUPPORTS == 0 || MDBX_HAVE_BUILTIN_CPU_SUPPORTS == 1)
MDBX_HAVE_BUILTIN_CPU_SUPPORTS == 1)
#error MDBX_HAVE_BUILTIN_CPU_SUPPORTS must be defined as 0 or 1 #error MDBX_HAVE_BUILTIN_CPU_SUPPORTS must be defined as 0 or 1
#endif /* MDBX_HAVE_BUILTIN_CPU_SUPPORTS */ #endif /* MDBX_HAVE_BUILTIN_CPU_SUPPORTS */
@ -286,19 +279,15 @@
#define MDBX_LOCKING MDBX_LOCKING_WIN32FILES #define MDBX_LOCKING MDBX_LOCKING_WIN32FILES
#else #else
#ifndef MDBX_LOCKING #ifndef MDBX_LOCKING
#if defined(_POSIX_THREAD_PROCESS_SHARED) && \ #if defined(_POSIX_THREAD_PROCESS_SHARED) && _POSIX_THREAD_PROCESS_SHARED >= 200112L && !defined(__FreeBSD__)
_POSIX_THREAD_PROCESS_SHARED >= 200112L && !defined(__FreeBSD__)
/* Some platforms define the EOWNERDEAD error code even though they /* Some platforms define the EOWNERDEAD error code even though they
* don't support Robust Mutexes. If doubt compile with -MDBX_LOCKING=2001. */ * don't support Robust Mutexes. If doubt compile with -MDBX_LOCKING=2001. */
#if defined(EOWNERDEAD) && _POSIX_THREAD_PROCESS_SHARED >= 200809L && \ #if defined(EOWNERDEAD) && _POSIX_THREAD_PROCESS_SHARED >= 200809L && \
((defined(_POSIX_THREAD_ROBUST_PRIO_INHERIT) && \ ((defined(_POSIX_THREAD_ROBUST_PRIO_INHERIT) && _POSIX_THREAD_ROBUST_PRIO_INHERIT > 0) || \
_POSIX_THREAD_ROBUST_PRIO_INHERIT > 0) || \ (defined(_POSIX_THREAD_ROBUST_PRIO_PROTECT) && _POSIX_THREAD_ROBUST_PRIO_PROTECT > 0) || \
(defined(_POSIX_THREAD_ROBUST_PRIO_PROTECT) && \ defined(PTHREAD_MUTEX_ROBUST) || defined(PTHREAD_MUTEX_ROBUST_NP)) && \
_POSIX_THREAD_ROBUST_PRIO_PROTECT > 0) || \ (!defined(__GLIBC__) || __GLIBC_PREREQ(2, 10) /* troubles with Robust mutexes before 2.10 */)
defined(PTHREAD_MUTEX_ROBUST) || defined(PTHREAD_MUTEX_ROBUST_NP)) && \
(!defined(__GLIBC__) || \
__GLIBC_PREREQ(2, 10) /* troubles with Robust mutexes before 2.10 */)
#define MDBX_LOCKING MDBX_LOCKING_POSIX2008 #define MDBX_LOCKING MDBX_LOCKING_POSIX2008
#else #else
#define MDBX_LOCKING MDBX_LOCKING_POSIX2001 #define MDBX_LOCKING MDBX_LOCKING_POSIX2001
@ -316,12 +305,9 @@
/** Advanced: Using POSIX OFD-locks (autodetection by default). */ /** Advanced: Using POSIX OFD-locks (autodetection by default). */
#ifndef MDBX_USE_OFDLOCKS #ifndef MDBX_USE_OFDLOCKS
#if ((defined(F_OFD_SETLK) && defined(F_OFD_SETLKW) && \ #if ((defined(F_OFD_SETLK) && defined(F_OFD_SETLKW) && defined(F_OFD_GETLK)) || \
defined(F_OFD_GETLK)) || \ (defined(F_OFD_SETLK64) && defined(F_OFD_SETLKW64) && defined(F_OFD_GETLK64))) && \
(defined(F_OFD_SETLK64) && defined(F_OFD_SETLKW64) && \ !defined(MDBX_SAFE4QEMU) && !defined(__sun) /* OFD-lock are broken on Solaris */
defined(F_OFD_GETLK64))) && \
!defined(MDBX_SAFE4QEMU) && \
!defined(__sun) /* OFD-lock are broken on Solaris */
#define MDBX_USE_OFDLOCKS 1 #define MDBX_USE_OFDLOCKS 1
#else #else
#define MDBX_USE_OFDLOCKS 0 #define MDBX_USE_OFDLOCKS 0
@ -335,8 +321,7 @@
/** Advanced: Using sendfile() syscall (autodetection by default). */ /** Advanced: Using sendfile() syscall (autodetection by default). */
#ifndef MDBX_USE_SENDFILE #ifndef MDBX_USE_SENDFILE
#if ((defined(__linux__) || defined(__gnu_linux__)) && \ #if ((defined(__linux__) || defined(__gnu_linux__)) && !defined(__ANDROID_API__)) || \
!defined(__ANDROID_API__)) || \
(defined(__ANDROID_API__) && __ANDROID_API__ >= 21) (defined(__ANDROID_API__) && __ANDROID_API__ >= 21)
#define MDBX_USE_SENDFILE 1 #define MDBX_USE_SENDFILE 1
#else #else
@ -360,14 +345,12 @@
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
#ifndef MDBX_CPU_WRITEBACK_INCOHERENT #ifndef MDBX_CPU_WRITEBACK_INCOHERENT
#if defined(__ia32__) || defined(__e2k__) || defined(__hppa) || \ #if defined(__ia32__) || defined(__e2k__) || defined(__hppa) || defined(__hppa__) || defined(DOXYGEN)
defined(__hppa__) || defined(DOXYGEN)
#define MDBX_CPU_WRITEBACK_INCOHERENT 0 #define MDBX_CPU_WRITEBACK_INCOHERENT 0
#else #else
#define MDBX_CPU_WRITEBACK_INCOHERENT 1 #define MDBX_CPU_WRITEBACK_INCOHERENT 1
#endif #endif
#elif !(MDBX_CPU_WRITEBACK_INCOHERENT == 0 || \ #elif !(MDBX_CPU_WRITEBACK_INCOHERENT == 0 || MDBX_CPU_WRITEBACK_INCOHERENT == 1)
MDBX_CPU_WRITEBACK_INCOHERENT == 1)
#error MDBX_CPU_WRITEBACK_INCOHERENT must be defined as 0 or 1 #error MDBX_CPU_WRITEBACK_INCOHERENT must be defined as 0 or 1
#endif /* MDBX_CPU_WRITEBACK_INCOHERENT */ #endif /* MDBX_CPU_WRITEBACK_INCOHERENT */
@ -377,31 +360,27 @@
#else #else
#define MDBX_MMAP_INCOHERENT_FILE_WRITE 0 #define MDBX_MMAP_INCOHERENT_FILE_WRITE 0
#endif #endif
#elif !(MDBX_MMAP_INCOHERENT_FILE_WRITE == 0 || \ #elif !(MDBX_MMAP_INCOHERENT_FILE_WRITE == 0 || MDBX_MMAP_INCOHERENT_FILE_WRITE == 1)
MDBX_MMAP_INCOHERENT_FILE_WRITE == 1)
#error MDBX_MMAP_INCOHERENT_FILE_WRITE must be defined as 0 or 1 #error MDBX_MMAP_INCOHERENT_FILE_WRITE must be defined as 0 or 1
#endif /* MDBX_MMAP_INCOHERENT_FILE_WRITE */ #endif /* MDBX_MMAP_INCOHERENT_FILE_WRITE */
#ifndef MDBX_MMAP_INCOHERENT_CPU_CACHE #ifndef MDBX_MMAP_INCOHERENT_CPU_CACHE
#if defined(__mips) || defined(__mips__) || defined(__mips64) || \ #if defined(__mips) || defined(__mips__) || defined(__mips64) || defined(__mips64__) || defined(_M_MRX000) || \
defined(__mips64__) || defined(_M_MRX000) || defined(_MIPS_) || \ defined(_MIPS_) || defined(__MWERKS__) || defined(__sgi)
defined(__MWERKS__) || defined(__sgi)
/* MIPS has cache coherency issues. */ /* MIPS has cache coherency issues. */
#define MDBX_MMAP_INCOHERENT_CPU_CACHE 1 #define MDBX_MMAP_INCOHERENT_CPU_CACHE 1
#else #else
/* LY: assume no relevant mmap/dcache issues. */ /* LY: assume no relevant mmap/dcache issues. */
#define MDBX_MMAP_INCOHERENT_CPU_CACHE 0 #define MDBX_MMAP_INCOHERENT_CPU_CACHE 0
#endif #endif
#elif !(MDBX_MMAP_INCOHERENT_CPU_CACHE == 0 || \ #elif !(MDBX_MMAP_INCOHERENT_CPU_CACHE == 0 || MDBX_MMAP_INCOHERENT_CPU_CACHE == 1)
MDBX_MMAP_INCOHERENT_CPU_CACHE == 1)
#error MDBX_MMAP_INCOHERENT_CPU_CACHE must be defined as 0 or 1 #error MDBX_MMAP_INCOHERENT_CPU_CACHE must be defined as 0 or 1
#endif /* MDBX_MMAP_INCOHERENT_CPU_CACHE */ #endif /* MDBX_MMAP_INCOHERENT_CPU_CACHE */
/** Assume system needs explicit syscall to sync/flush/write modified mapped /** Assume system needs explicit syscall to sync/flush/write modified mapped
* memory. */ * memory. */
#ifndef MDBX_MMAP_NEEDS_JOLT #ifndef MDBX_MMAP_NEEDS_JOLT
#if MDBX_MMAP_INCOHERENT_FILE_WRITE || MDBX_MMAP_INCOHERENT_CPU_CACHE || \ #if MDBX_MMAP_INCOHERENT_FILE_WRITE || MDBX_MMAP_INCOHERENT_CPU_CACHE || !(defined(__linux__) || defined(__gnu_linux__))
!(defined(__linux__) || defined(__gnu_linux__))
#define MDBX_MMAP_NEEDS_JOLT 1 #define MDBX_MMAP_NEEDS_JOLT 1
#else #else
#define MDBX_MMAP_NEEDS_JOLT 0 #define MDBX_MMAP_NEEDS_JOLT 0
@ -456,8 +435,7 @@
#endif /* MDBX_64BIT_CAS */ #endif /* MDBX_64BIT_CAS */
#ifndef MDBX_UNALIGNED_OK #ifndef MDBX_UNALIGNED_OK
#if defined(__ALIGNED__) || defined(__SANITIZE_UNDEFINED__) || \ #if defined(__ALIGNED__) || defined(__SANITIZE_UNDEFINED__) || defined(ENABLE_UBSAN)
defined(ENABLE_UBSAN)
#define MDBX_UNALIGNED_OK 0 /* no unaligned access allowed */ #define MDBX_UNALIGNED_OK 0 /* no unaligned access allowed */
#elif defined(__ARM_FEATURE_UNALIGNED) #elif defined(__ARM_FEATURE_UNALIGNED)
#define MDBX_UNALIGNED_OK 4 /* ok unaligned for 32-bit words */ #define MDBX_UNALIGNED_OK 4 /* ok unaligned for 32-bit words */

File diff suppressed because it is too large Load Diff

View File

@ -12,9 +12,8 @@
#if __has_include(<sys/cachectl.h>) #if __has_include(<sys/cachectl.h>)
#include <sys/cachectl.h> #include <sys/cachectl.h>
#elif defined(__mips) || defined(__mips__) || defined(__mips64) || \ #elif defined(__mips) || defined(__mips__) || defined(__mips64) || defined(__mips64__) || defined(_M_MRX000) || \
defined(__mips64__) || defined(_M_MRX000) || defined(_MIPS_) || \ defined(_MIPS_) || defined(__MWERKS__) || defined(__sgi)
defined(__MWERKS__) || defined(__sgi)
/* MIPS should have explicit cache control */ /* MIPS should have explicit cache control */
#include <sys/cachectl.h> #include <sys/cachectl.h>
#endif #endif
@ -28,11 +27,9 @@ MDBX_MAYBE_UNUSED static inline void osal_compiler_barrier(void) {
__memory_barrier(); __memory_barrier();
#elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) #elif defined(__SUNPRO_C) || defined(__sun) || defined(sun)
__compiler_barrier(); __compiler_barrier();
#elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && \ #elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && (defined(HP_IA64) || defined(__ia64))
(defined(HP_IA64) || defined(__ia64))
_Asm_sched_fence(/* LY: no-arg meaning 'all expect ALU', e.g. 0x3D3D */); _Asm_sched_fence(/* LY: no-arg meaning 'all expect ALU', e.g. 0x3D3D */);
#elif defined(_AIX) || defined(__ppc__) || defined(__powerpc__) || \ #elif defined(_AIX) || defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
defined(__ppc64__) || defined(__powerpc64__)
__fence(); __fence();
#else #else
#error "Could not guess the kind of compiler, please report to us." #error "Could not guess the kind of compiler, please report to us."
@ -60,11 +57,9 @@ MDBX_MAYBE_UNUSED static inline void osal_memory_barrier(void) {
#endif #endif
#elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) #elif defined(__SUNPRO_C) || defined(__sun) || defined(sun)
__machine_rw_barrier(); __machine_rw_barrier();
#elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && \ #elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && (defined(HP_IA64) || defined(__ia64))
(defined(HP_IA64) || defined(__ia64))
_Asm_mf(); _Asm_mf();
#elif defined(_AIX) || defined(__ppc__) || defined(__powerpc__) || \ #elif defined(_AIX) || defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
defined(__ppc64__) || defined(__powerpc64__)
__lwsync(); __lwsync();
#else #else
#error "Could not guess the kind of compiler, please report to us." #error "Could not guess the kind of compiler, please report to us."
@ -101,9 +96,7 @@ typedef CRITICAL_SECTION osal_fastmutex_t;
#if MDBX_WITHOUT_MSVC_CRT #if MDBX_WITHOUT_MSVC_CRT
#ifndef osal_malloc #ifndef osal_malloc
static inline void *osal_malloc(size_t bytes) { static inline void *osal_malloc(size_t bytes) { return HeapAlloc(GetProcessHeap(), 0, bytes); }
return HeapAlloc(GetProcessHeap(), 0, bytes);
}
#endif /* osal_malloc */ #endif /* osal_malloc */
#ifndef osal_calloc #ifndef osal_calloc
@ -114,8 +107,7 @@ static inline void *osal_calloc(size_t nelem, size_t size) {
#ifndef osal_realloc #ifndef osal_realloc
static inline void *osal_realloc(void *ptr, size_t bytes) { static inline void *osal_realloc(void *ptr, size_t bytes) {
return ptr ? HeapReAlloc(GetProcessHeap(), 0, ptr, bytes) return ptr ? HeapReAlloc(GetProcessHeap(), 0, ptr, bytes) : HeapAlloc(GetProcessHeap(), 0, bytes);
: HeapAlloc(GetProcessHeap(), 0, bytes);
} }
#endif /* osal_realloc */ #endif /* osal_realloc */
@ -208,7 +200,7 @@ typedef struct osal_mmap {
#elif defined(__APPLE__) || defined(__MACH__) || defined(_DARWIN_C_SOURCE) #elif defined(__APPLE__) || defined(__MACH__) || defined(_DARWIN_C_SOURCE)
#if defined(MAC_OS_X_VERSION_MIN_REQUIRED) && defined(MAC_OS_VERSION_11_0) && \ #if defined(MAC_OS_X_VERSION_MIN_REQUIRED) && defined(MAC_OS_VERSION_11_0) && \
MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
/* FIXME: add checks for IOS versions, etc */ /* FIXME: add checks for IOS versions, etc */
#define MDBX_HAVE_PWRITEV 1 #define MDBX_HAVE_PWRITEV 1
@ -279,39 +271,29 @@ typedef struct osal_ioring {
MDBX_INTERNAL int osal_ioring_create(osal_ioring_t * MDBX_INTERNAL int osal_ioring_create(osal_ioring_t *
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
, ,
bool enable_direct, bool enable_direct, mdbx_filehandle_t overlapped_fd
mdbx_filehandle_t overlapped_fd
#endif /* Windows */ #endif /* Windows */
); );
MDBX_INTERNAL int osal_ioring_resize(osal_ioring_t *, size_t items); MDBX_INTERNAL int osal_ioring_resize(osal_ioring_t *, size_t items);
MDBX_INTERNAL void osal_ioring_destroy(osal_ioring_t *); MDBX_INTERNAL void osal_ioring_destroy(osal_ioring_t *);
MDBX_INTERNAL void osal_ioring_reset(osal_ioring_t *); MDBX_INTERNAL void osal_ioring_reset(osal_ioring_t *);
MDBX_INTERNAL int osal_ioring_add(osal_ioring_t *ctx, const size_t offset, MDBX_INTERNAL int osal_ioring_add(osal_ioring_t *ctx, const size_t offset, void *data, const size_t bytes);
void *data, const size_t bytes);
typedef struct osal_ioring_write_result { typedef struct osal_ioring_write_result {
int err; int err;
unsigned wops; unsigned wops;
} osal_ioring_write_result_t; } osal_ioring_write_result_t;
MDBX_INTERNAL osal_ioring_write_result_t MDBX_INTERNAL osal_ioring_write_result_t osal_ioring_write(osal_ioring_t *ior, mdbx_filehandle_t fd);
osal_ioring_write(osal_ioring_t *ior, mdbx_filehandle_t fd);
MDBX_INTERNAL void osal_ioring_walk(osal_ioring_t *ior, iov_ctx_t *ctx, MDBX_INTERNAL void osal_ioring_walk(osal_ioring_t *ior, iov_ctx_t *ctx,
void (*callback)(iov_ctx_t *ctx, void (*callback)(iov_ctx_t *ctx, size_t offset, void *data, size_t bytes));
size_t offset, void *data,
size_t bytes));
MDBX_MAYBE_UNUSED static inline unsigned MDBX_MAYBE_UNUSED static inline unsigned osal_ioring_left(const osal_ioring_t *ior) { return ior->slots_left; }
osal_ioring_left(const osal_ioring_t *ior) {
return ior->slots_left;
}
MDBX_MAYBE_UNUSED static inline unsigned MDBX_MAYBE_UNUSED static inline unsigned osal_ioring_used(const osal_ioring_t *ior) {
osal_ioring_used(const osal_ioring_t *ior) {
return ior->allocated - ior->slots_left; return ior->allocated - ior->slots_left;
} }
MDBX_MAYBE_UNUSED static inline int MDBX_MAYBE_UNUSED static inline int osal_ioring_prepare(osal_ioring_t *ior, size_t items, size_t bytes) {
osal_ioring_prepare(osal_ioring_t *ior, size_t items, size_t bytes) {
items = (items > 32) ? items : 32; items = (items > 32) ? items : 32;
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
if (ior->direct) { if (ior->direct) {
@ -330,13 +312,11 @@ osal_ioring_prepare(osal_ioring_t *ior, size_t items, size_t bytes) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
/* libc compatibility stuff */ /* libc compatibility stuff */
#if (!defined(__GLIBC__) && __GLIBC_PREREQ(2, 1)) && \ #if (!defined(__GLIBC__) && __GLIBC_PREREQ(2, 1)) && (defined(_GNU_SOURCE) || defined(_BSD_SOURCE))
(defined(_GNU_SOURCE) || defined(_BSD_SOURCE))
#define osal_asprintf asprintf #define osal_asprintf asprintf
#define osal_vasprintf vasprintf #define osal_vasprintf vasprintf
#else #else
MDBX_MAYBE_UNUSED MDBX_INTERNAL MDBX_MAYBE_UNUSED MDBX_INTERNAL MDBX_PRINTF_ARGS(2, 3) int osal_asprintf(char **strp, const char *fmt, ...);
MDBX_PRINTF_ARGS(2, 3) int osal_asprintf(char **strp, const char *fmt, ...);
MDBX_INTERNAL int osal_vasprintf(char **strp, const char *fmt, va_list ap); MDBX_INTERNAL int osal_vasprintf(char **strp, const char *fmt, va_list ap);
#endif #endif
@ -358,14 +338,12 @@ MDBX_MAYBE_UNUSED MDBX_INTERNAL void osal_jitter(bool tiny);
#else #else
#define MAX_WRITE UINT32_C(0x3f000000) #define MAX_WRITE UINT32_C(0x3f000000)
#if defined(F_GETLK64) && defined(F_SETLK64) && defined(F_SETLKW64) && \ #if defined(F_GETLK64) && defined(F_SETLK64) && defined(F_SETLKW64) && !defined(__ANDROID_API__)
!defined(__ANDROID_API__)
#define MDBX_F_SETLK F_SETLK64 #define MDBX_F_SETLK F_SETLK64
#define MDBX_F_SETLKW F_SETLKW64 #define MDBX_F_SETLKW F_SETLKW64
#define MDBX_F_GETLK F_GETLK64 #define MDBX_F_GETLK F_GETLK64
#if (__GLIBC_PREREQ(2, 28) && \ #if (__GLIBC_PREREQ(2, 28) && (defined(__USE_LARGEFILE64) || defined(__LARGEFILE64_SOURCE) || \
(defined(__USE_LARGEFILE64) || defined(__LARGEFILE64_SOURCE) || \ defined(_USE_LARGEFILE64) || defined(_LARGEFILE64_SOURCE))) || \
defined(_USE_LARGEFILE64) || defined(_LARGEFILE64_SOURCE))) || \
defined(fcntl64) defined(fcntl64)
#define MDBX_FCNTL fcntl64 #define MDBX_FCNTL fcntl64
#else #else
@ -383,8 +361,7 @@ MDBX_MAYBE_UNUSED MDBX_INTERNAL void osal_jitter(bool tiny);
#define MDBX_STRUCT_FLOCK struct flock #define MDBX_STRUCT_FLOCK struct flock
#endif /* MDBX_F_SETLK, MDBX_F_SETLKW, MDBX_F_GETLK */ #endif /* MDBX_F_SETLK, MDBX_F_SETLKW, MDBX_F_GETLK */
#if defined(F_OFD_SETLK64) && defined(F_OFD_SETLKW64) && \ #if defined(F_OFD_SETLK64) && defined(F_OFD_SETLKW64) && defined(F_OFD_GETLK64) && !defined(__ANDROID_API__)
defined(F_OFD_GETLK64) && !defined(__ANDROID_API__)
#define MDBX_F_OFD_SETLK F_OFD_SETLK64 #define MDBX_F_OFD_SETLK F_OFD_SETLK64
#define MDBX_F_OFD_SETLKW F_OFD_SETLKW64 #define MDBX_F_OFD_SETLKW F_OFD_SETLKW64
#define MDBX_F_OFD_GETLK F_OFD_GETLK64 #define MDBX_F_OFD_GETLK F_OFD_GETLK64
@ -393,8 +370,7 @@ MDBX_MAYBE_UNUSED MDBX_INTERNAL void osal_jitter(bool tiny);
#define MDBX_F_OFD_SETLKW F_OFD_SETLKW #define MDBX_F_OFD_SETLKW F_OFD_SETLKW
#define MDBX_F_OFD_GETLK F_OFD_GETLK #define MDBX_F_OFD_GETLK F_OFD_GETLK
#ifndef OFF_T_MAX #ifndef OFF_T_MAX
#define OFF_T_MAX \ #define OFF_T_MAX (((sizeof(off_t) > 4) ? INT64_MAX : INT32_MAX) & ~(size_t)0xFffff)
(((sizeof(off_t) > 4) ? INT64_MAX : INT32_MAX) & ~(size_t)0xFffff)
#endif /* OFF_T_MAX */ #endif /* OFF_T_MAX */
#endif /* MDBX_F_OFD_SETLK64, MDBX_F_OFD_SETLKW64, MDBX_F_OFD_GETLK64 */ #endif /* MDBX_F_OFD_SETLK64, MDBX_F_OFD_SETLKW64, MDBX_F_OFD_GETLK64 */
@ -414,8 +390,7 @@ MDBX_MAYBE_UNUSED static inline int osal_get_errno(void) {
} }
#ifndef osal_memalign_alloc #ifndef osal_memalign_alloc
MDBX_INTERNAL int osal_memalign_alloc(size_t alignment, size_t bytes, MDBX_INTERNAL int osal_memalign_alloc(size_t alignment, size_t bytes, void **result);
void **result);
#endif #endif
#ifndef osal_memalign_free #ifndef osal_memalign_free
MDBX_INTERNAL void osal_memalign_free(void *ptr); MDBX_INTERNAL void osal_memalign_free(void *ptr);
@ -433,19 +408,13 @@ MDBX_INTERNAL int osal_fastmutex_acquire(osal_fastmutex_t *fastmutex);
MDBX_INTERNAL int osal_fastmutex_release(osal_fastmutex_t *fastmutex); MDBX_INTERNAL int osal_fastmutex_release(osal_fastmutex_t *fastmutex);
MDBX_INTERNAL int osal_fastmutex_destroy(osal_fastmutex_t *fastmutex); MDBX_INTERNAL int osal_fastmutex_destroy(osal_fastmutex_t *fastmutex);
MDBX_INTERNAL int osal_pwritev(mdbx_filehandle_t fd, struct iovec *iov, MDBX_INTERNAL int osal_pwritev(mdbx_filehandle_t fd, struct iovec *iov, size_t sgvcnt, uint64_t offset);
size_t sgvcnt, uint64_t offset); MDBX_INTERNAL int osal_pread(mdbx_filehandle_t fd, void *buf, size_t count, uint64_t offset);
MDBX_INTERNAL int osal_pread(mdbx_filehandle_t fd, void *buf, size_t count, MDBX_INTERNAL int osal_pwrite(mdbx_filehandle_t fd, const void *buf, size_t count, uint64_t offset);
uint64_t offset); MDBX_INTERNAL int osal_write(mdbx_filehandle_t fd, const void *buf, size_t count);
MDBX_INTERNAL int osal_pwrite(mdbx_filehandle_t fd, const void *buf,
size_t count, uint64_t offset);
MDBX_INTERNAL int osal_write(mdbx_filehandle_t fd, const void *buf,
size_t count);
MDBX_INTERNAL int MDBX_INTERNAL int osal_thread_create(osal_thread_t *thread, THREAD_RESULT(THREAD_CALL *start_routine)(void *),
osal_thread_create(osal_thread_t *thread, void *arg);
THREAD_RESULT(THREAD_CALL *start_routine)(void *),
void *arg);
MDBX_INTERNAL int osal_thread_join(osal_thread_t thread); MDBX_INTERNAL int osal_thread_join(osal_thread_t thread);
enum osal_syncmode_bits { enum osal_syncmode_bits {
@ -456,8 +425,7 @@ enum osal_syncmode_bits {
MDBX_SYNC_IODQ = 8 MDBX_SYNC_IODQ = 8
}; };
MDBX_INTERNAL int osal_fsync(mdbx_filehandle_t fd, MDBX_INTERNAL int osal_fsync(mdbx_filehandle_t fd, const enum osal_syncmode_bits mode_bits);
const enum osal_syncmode_bits mode_bits);
MDBX_INTERNAL int osal_ftruncate(mdbx_filehandle_t fd, uint64_t length); MDBX_INTERNAL int osal_ftruncate(mdbx_filehandle_t fd, uint64_t length);
MDBX_INTERNAL int osal_fseek(mdbx_filehandle_t fd, uint64_t pos); MDBX_INTERNAL int osal_fseek(mdbx_filehandle_t fd, uint64_t pos);
MDBX_INTERNAL int osal_filesize(mdbx_filehandle_t fd, uint64_t *length); MDBX_INTERNAL int osal_filesize(mdbx_filehandle_t fd, uint64_t *length);
@ -483,14 +451,11 @@ MDBX_MAYBE_UNUSED static inline bool osal_isdirsep(pathchar_t c) {
c == '/'; c == '/';
} }
MDBX_INTERNAL bool osal_pathequal(const pathchar_t *l, const pathchar_t *r, MDBX_INTERNAL bool osal_pathequal(const pathchar_t *l, const pathchar_t *r, size_t len);
size_t len);
MDBX_INTERNAL pathchar_t *osal_fileext(const pathchar_t *pathname, size_t len); MDBX_INTERNAL pathchar_t *osal_fileext(const pathchar_t *pathname, size_t len);
MDBX_INTERNAL int osal_fileexists(const pathchar_t *pathname); MDBX_INTERNAL int osal_fileexists(const pathchar_t *pathname);
MDBX_INTERNAL int osal_openfile(const enum osal_openfile_purpose purpose, MDBX_INTERNAL int osal_openfile(const enum osal_openfile_purpose purpose, const MDBX_env *env,
const MDBX_env *env, const pathchar_t *pathname, const pathchar_t *pathname, mdbx_filehandle_t *fd, mdbx_mode_t unix_mode_bits);
mdbx_filehandle_t *fd,
mdbx_mode_t unix_mode_bits);
MDBX_INTERNAL int osal_closefile(mdbx_filehandle_t fd); MDBX_INTERNAL int osal_closefile(mdbx_filehandle_t fd);
MDBX_INTERNAL int osal_removefile(const pathchar_t *pathname); MDBX_INTERNAL int osal_removefile(const pathchar_t *pathname);
MDBX_INTERNAL int osal_removedirectory(const pathchar_t *pathname); MDBX_INTERNAL int osal_removedirectory(const pathchar_t *pathname);
@ -499,26 +464,21 @@ MDBX_INTERNAL int osal_lockfile(mdbx_filehandle_t fd, bool wait);
#define MMAP_OPTION_TRUNCATE 1 #define MMAP_OPTION_TRUNCATE 1
#define MMAP_OPTION_SEMAPHORE 2 #define MMAP_OPTION_SEMAPHORE 2
MDBX_INTERNAL int osal_mmap(const int flags, osal_mmap_t *map, size_t size, MDBX_INTERNAL int osal_mmap(const int flags, osal_mmap_t *map, size_t size, const size_t limit, const unsigned options);
const size_t limit, const unsigned options);
MDBX_INTERNAL int osal_munmap(osal_mmap_t *map); MDBX_INTERNAL int osal_munmap(osal_mmap_t *map);
#define MDBX_MRESIZE_MAY_MOVE 0x00000100 #define MDBX_MRESIZE_MAY_MOVE 0x00000100
#define MDBX_MRESIZE_MAY_UNMAP 0x00000200 #define MDBX_MRESIZE_MAY_UNMAP 0x00000200
MDBX_INTERNAL int osal_mresize(const int flags, osal_mmap_t *map, size_t size, MDBX_INTERNAL int osal_mresize(const int flags, osal_mmap_t *map, size_t size, size_t limit);
size_t limit);
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
typedef struct { typedef struct {
unsigned limit, count; unsigned limit, count;
HANDLE handles[31]; HANDLE handles[31];
} mdbx_handle_array_t; } mdbx_handle_array_t;
MDBX_INTERNAL int MDBX_INTERNAL int osal_suspend_threads_before_remap(MDBX_env *env, mdbx_handle_array_t **array);
osal_suspend_threads_before_remap(MDBX_env *env, mdbx_handle_array_t **array);
MDBX_INTERNAL int osal_resume_threads_after_remap(mdbx_handle_array_t *array); MDBX_INTERNAL int osal_resume_threads_after_remap(mdbx_handle_array_t *array);
#endif /* Windows */ #endif /* Windows */
MDBX_INTERNAL int osal_msync(const osal_mmap_t *map, size_t offset, MDBX_INTERNAL int osal_msync(const osal_mmap_t *map, size_t offset, size_t length, enum osal_syncmode_bits mode_bits);
size_t length, enum osal_syncmode_bits mode_bits); MDBX_INTERNAL int osal_check_fs_rdonly(mdbx_filehandle_t handle, const pathchar_t *pathname, int err);
MDBX_INTERNAL int osal_check_fs_rdonly(mdbx_filehandle_t handle,
const pathchar_t *pathname, int err);
MDBX_INTERNAL int osal_check_fs_incore(mdbx_filehandle_t handle); MDBX_INTERNAL int osal_check_fs_incore(mdbx_filehandle_t handle);
MDBX_MAYBE_UNUSED static inline uint32_t osal_getpid(void) { MDBX_MAYBE_UNUSED static inline uint32_t osal_getpid(void) {
@ -549,8 +509,7 @@ MDBX_INTERNAL int osal_check_tid4bionic(void);
static inline int osal_check_tid4bionic(void) { return 0; } static inline int osal_check_tid4bionic(void) { return 0; }
#endif /* __ANDROID_API__ || ANDROID) || BIONIC */ #endif /* __ANDROID_API__ || ANDROID) || BIONIC */
MDBX_MAYBE_UNUSED static inline int MDBX_MAYBE_UNUSED static inline int osal_pthread_mutex_lock(pthread_mutex_t *mutex) {
osal_pthread_mutex_lock(pthread_mutex_t *mutex) {
int err = osal_check_tid4bionic(); int err = osal_check_tid4bionic();
return unlikely(err) ? err : pthread_mutex_lock(mutex); return unlikely(err) ? err : pthread_mutex_lock(mutex);
} }
@ -561,8 +520,7 @@ MDBX_INTERNAL uint64_t osal_cputime(size_t *optional_page_faults);
MDBX_INTERNAL uint64_t osal_16dot16_to_monotime(uint32_t seconds_16dot16); MDBX_INTERNAL uint64_t osal_16dot16_to_monotime(uint32_t seconds_16dot16);
MDBX_INTERNAL uint32_t osal_monotime_to_16dot16(uint64_t monotime); MDBX_INTERNAL uint32_t osal_monotime_to_16dot16(uint64_t monotime);
MDBX_MAYBE_UNUSED static inline uint32_t MDBX_MAYBE_UNUSED static inline uint32_t osal_monotime_to_16dot16_noUnderflow(uint64_t monotime) {
osal_monotime_to_16dot16_noUnderflow(uint64_t monotime) {
uint32_t seconds_16dot16 = osal_monotime_to_16dot16(monotime); uint32_t seconds_16dot16 = osal_monotime_to_16dot16(monotime);
return seconds_16dot16 ? seconds_16dot16 : /* fix underflow */ (monotime > 0); return seconds_16dot16 ? seconds_16dot16 : /* fix underflow */ (monotime > 0);
} }
@ -589,10 +547,8 @@ MDBX_INTERNAL bin128_t osal_guid(const MDBX_env *);
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint64_t MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint64_t osal_bswap64(uint64_t v) {
osal_bswap64(uint64_t v) { #if __GNUC_PREREQ(4, 4) || __CLANG_PREREQ(4, 0) || __has_builtin(__builtin_bswap64)
#if __GNUC_PREREQ(4, 4) || __CLANG_PREREQ(4, 0) || \
__has_builtin(__builtin_bswap64)
return __builtin_bswap64(v); return __builtin_bswap64(v);
#elif defined(_MSC_VER) && !defined(__clang__) #elif defined(_MSC_VER) && !defined(__clang__)
return _byteswap_uint64(v); return _byteswap_uint64(v);
@ -601,19 +557,14 @@ osal_bswap64(uint64_t v) {
#elif defined(bswap_64) #elif defined(bswap_64)
return bswap_64(v); return bswap_64(v);
#else #else
return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) | return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) | ((v << 24) & UINT64_C(0x0000ff0000000000)) |
((v << 24) & UINT64_C(0x0000ff0000000000)) | ((v << 8) & UINT64_C(0x000000ff00000000)) | ((v >> 8) & UINT64_C(0x00000000ff000000)) |
((v << 8) & UINT64_C(0x000000ff00000000)) | ((v >> 24) & UINT64_C(0x0000000000ff0000)) | ((v >> 40) & UINT64_C(0x000000000000ff00));
((v >> 8) & UINT64_C(0x00000000ff000000)) |
((v >> 24) & UINT64_C(0x0000000000ff0000)) |
((v >> 40) & UINT64_C(0x000000000000ff00));
#endif #endif
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32(uint32_t v) {
osal_bswap32(uint32_t v) { #if __GNUC_PREREQ(4, 4) || __CLANG_PREREQ(4, 0) || __has_builtin(__builtin_bswap32)
#if __GNUC_PREREQ(4, 4) || __CLANG_PREREQ(4, 0) || \
__has_builtin(__builtin_bswap32)
return __builtin_bswap32(v); return __builtin_bswap32(v);
#elif defined(_MSC_VER) && !defined(__clang__) #elif defined(_MSC_VER) && !defined(__clang__)
return _byteswap_ulong(v); return _byteswap_ulong(v);
@ -622,7 +573,6 @@ osal_bswap32(uint32_t v) {
#elif defined(bswap_32) #elif defined(bswap_32)
return bswap_32(v); return bswap_32(v);
#else #else
return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) | return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) | ((v >> 8) & UINT32_C(0x0000ff00));
((v >> 8) & UINT32_C(0x0000ff00));
#endif #endif
} }

View File

@ -3,17 +3,14 @@
#include "internals.h" #include "internals.h"
__cold int MDBX_PRINTF_ARGS(2, 3) __cold int MDBX_PRINTF_ARGS(2, 3) bad_page(const page_t *mp, const char *fmt, ...) {
bad_page(const page_t *mp, const char *fmt, ...) {
if (LOG_ENABLED(MDBX_LOG_ERROR)) { if (LOG_ENABLED(MDBX_LOG_ERROR)) {
static const page_t *prev; static const page_t *prev;
if (prev != mp) { if (prev != mp) {
char buf4unknown[16]; char buf4unknown[16];
prev = mp; prev = mp;
debug_log(MDBX_LOG_ERROR, "badpage", 0, debug_log(MDBX_LOG_ERROR, "badpage", 0, "corrupted %s-page #%u, mod-txnid %" PRIaTXN "\n",
"corrupted %s-page #%u, mod-txnid %" PRIaTXN "\n", pagetype_caption(page_type(mp), buf4unknown), mp->pgno, mp->txnid);
pagetype_caption(page_type(mp), buf4unknown), mp->pgno,
mp->txnid);
} }
va_list args; va_list args;
@ -24,17 +21,14 @@ __cold int MDBX_PRINTF_ARGS(2, 3)
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
__cold void MDBX_PRINTF_ARGS(2, 3) __cold void MDBX_PRINTF_ARGS(2, 3) poor_page(const page_t *mp, const char *fmt, ...) {
poor_page(const page_t *mp, const char *fmt, ...) {
if (LOG_ENABLED(MDBX_LOG_NOTICE)) { if (LOG_ENABLED(MDBX_LOG_NOTICE)) {
static const page_t *prev; static const page_t *prev;
if (prev != mp) { if (prev != mp) {
char buf4unknown[16]; char buf4unknown[16];
prev = mp; prev = mp;
debug_log(MDBX_LOG_NOTICE, "poorpage", 0, debug_log(MDBX_LOG_NOTICE, "poorpage", 0, "suboptimal %s-page #%u, mod-txnid %" PRIaTXN "\n",
"suboptimal %s-page #%u, mod-txnid %" PRIaTXN "\n", pagetype_caption(page_type(mp), buf4unknown), mp->pgno, mp->txnid);
pagetype_caption(page_type(mp), buf4unknown), mp->pgno,
mp->txnid);
} }
va_list args; va_list args;
@ -63,21 +57,17 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
const ptrdiff_t offset = ptr_dist(mp, env->dxb_mmap.base); const ptrdiff_t offset = ptr_dist(mp, env->dxb_mmap.base);
unsigned flags_mask = P_ILL_BITS; unsigned flags_mask = P_ILL_BITS;
unsigned flags_expected = 0; unsigned flags_expected = 0;
if (offset < 0 || if (offset < 0 || offset > (ptrdiff_t)(pgno2bytes(env, mc->txn->geo.first_unallocated) -
offset > (ptrdiff_t)(pgno2bytes(env, mc->txn->geo.first_unallocated) - ((mp->flags & P_SUBP) ? PAGEHDRSZ + 1 : env->ps))) {
((mp->flags & P_SUBP) ? PAGEHDRSZ + 1 : env->ps))) {
/* should be dirty page without MDBX_WRITEMAP, or a subpage of. */ /* should be dirty page without MDBX_WRITEMAP, or a subpage of. */
flags_mask -= P_SUBP; flags_mask -= P_SUBP;
if ((env->flags & MDBX_WRITEMAP) != 0 || if ((env->flags & MDBX_WRITEMAP) != 0 || (!is_shadowed(mc->txn, mp) && !(mp->flags & P_SUBP)))
(!is_shadowed(mc->txn, mp) && !(mp->flags & P_SUBP))) rc = bad_page(mp, "invalid page-address %p, offset %zi\n", __Wpedantic_format_voidptr(mp), offset);
rc = bad_page(mp, "invalid page-address %p, offset %zi\n",
__Wpedantic_format_voidptr(mp), offset);
} else if (offset & (env->ps - 1)) } else if (offset & (env->ps - 1))
flags_expected = P_SUBP; flags_expected = P_SUBP;
if (unlikely((mp->flags & flags_mask) != flags_expected)) if (unlikely((mp->flags & flags_mask) != flags_expected))
rc = bad_page(mp, "unknown/extra page-flags (have 0x%x, expect 0x%x)\n", rc = bad_page(mp, "unknown/extra page-flags (have 0x%x, expect 0x%x)\n", mp->flags & flags_mask, flags_expected);
mp->flags & flags_mask, flags_expected);
cASSERT(mc, (mc->checking & z_dupfix) == 0 || (mc->flags & z_inner) != 0); cASSERT(mc, (mc->checking & z_dupfix) == 0 || (mc->flags & z_inner) != 0);
const uint8_t type = page_type(mp); const uint8_t type = page_type(mp);
@ -86,82 +76,62 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
return bad_page(mp, "invalid type (%u)\n", type); return bad_page(mp, "invalid type (%u)\n", type);
case P_LARGE: case P_LARGE:
if (unlikely(mc->flags & z_inner)) if (unlikely(mc->flags & z_inner))
rc = bad_page(mp, "unexpected %s-page for %s (db-flags 0x%x)\n", "large", rc = bad_page(mp, "unexpected %s-page for %s (db-flags 0x%x)\n", "large", "nested dupsort tree", mc->tree->flags);
"nested dupsort tree", mc->tree->flags);
const pgno_t npages = mp->pages; const pgno_t npages = mp->pages;
if (unlikely(npages < 1 || npages >= MAX_PAGENO / 2)) if (unlikely(npages < 1 || npages >= MAX_PAGENO / 2))
rc = bad_page(mp, "invalid n-pages (%u) for large-page\n", npages); rc = bad_page(mp, "invalid n-pages (%u) for large-page\n", npages);
if (unlikely(mp->pgno + npages > mc->txn->geo.first_unallocated)) if (unlikely(mp->pgno + npages > mc->txn->geo.first_unallocated))
rc = bad_page( rc = bad_page(mp, "end of large-page beyond (%u) allocated space (%u next-pgno)\n", mp->pgno + npages,
mp, "end of large-page beyond (%u) allocated space (%u next-pgno)\n", mc->txn->geo.first_unallocated);
mp->pgno + npages, mc->txn->geo.first_unallocated);
return rc; //-------------------------- end of large/overflow page handling return rc; //-------------------------- end of large/overflow page handling
case P_LEAF | P_SUBP: case P_LEAF | P_SUBP:
if (unlikely(mc->tree->height != 1)) if (unlikely(mc->tree->height != 1))
rc = bad_page(mp, "unexpected %s-page for %s (db-flags 0x%x)\n", rc =
"leaf-sub", "nested dupsort db", mc->tree->flags); bad_page(mp, "unexpected %s-page for %s (db-flags 0x%x)\n", "leaf-sub", "nested dupsort db", mc->tree->flags);
/* fall through */ /* fall through */
__fallthrough; __fallthrough;
case P_LEAF: case P_LEAF:
if (unlikely((mc->checking & z_dupfix) != 0)) if (unlikely((mc->checking & z_dupfix) != 0))
rc = bad_page(mp, rc = bad_page(mp, "unexpected leaf-page for dupfix subtree (db-lags 0x%x)\n", mc->tree->flags);
"unexpected leaf-page for dupfix subtree (db-lags 0x%x)\n",
mc->tree->flags);
break; break;
case P_LEAF | P_DUPFIX | P_SUBP: case P_LEAF | P_DUPFIX | P_SUBP:
if (unlikely(mc->tree->height != 1)) if (unlikely(mc->tree->height != 1))
rc = bad_page(mp, "unexpected %s-page for %s (db-flags 0x%x)\n", rc = bad_page(mp, "unexpected %s-page for %s (db-flags 0x%x)\n", "leaf2-sub", "nested dupsort db",
"leaf2-sub", "nested dupsort db", mc->tree->flags); mc->tree->flags);
/* fall through */ /* fall through */
__fallthrough; __fallthrough;
case P_LEAF | P_DUPFIX: case P_LEAF | P_DUPFIX:
if (unlikely((mc->checking & z_dupfix) == 0)) if (unlikely((mc->checking & z_dupfix) == 0))
rc = bad_page( rc = bad_page(mp, "unexpected leaf2-page for non-dupfix (sub)tree (db-flags 0x%x)\n", mc->tree->flags);
mp,
"unexpected leaf2-page for non-dupfix (sub)tree (db-flags 0x%x)\n",
mc->tree->flags);
break; break;
case P_BRANCH: case P_BRANCH:
break; break;
} }
if (unlikely(mp->upper < mp->lower || (mp->lower & 1) || if (unlikely(mp->upper < mp->lower || (mp->lower & 1) || PAGEHDRSZ + mp->upper > env->ps))
PAGEHDRSZ + mp->upper > env->ps)) rc = bad_page(mp, "invalid page lower(%u)/upper(%u) with limit %zu\n", mp->lower, mp->upper, page_space(env));
rc = bad_page(mp, "invalid page lower(%u)/upper(%u) with limit %zu\n",
mp->lower, mp->upper, page_space(env));
const char *const end_of_page = ptr_disp(mp, env->ps); const char *const end_of_page = ptr_disp(mp, env->ps);
const size_t nkeys = page_numkeys(mp); const size_t nkeys = page_numkeys(mp);
STATIC_ASSERT(P_BRANCH == 1); STATIC_ASSERT(P_BRANCH == 1);
if (unlikely(nkeys <= (uint8_t)(mp->flags & P_BRANCH))) { if (unlikely(nkeys <= (uint8_t)(mp->flags & P_BRANCH))) {
if ((!(mc->flags & z_inner) || mc->tree->items) && if ((!(mc->flags & z_inner) || mc->tree->items) &&
(!(mc->checking & z_updating) || (!(mc->checking & z_updating) || !(is_modifable(mc->txn, mp) || (mp->flags & P_SUBP))))
!(is_modifable(mc->txn, mp) || (mp->flags & P_SUBP)))) rc = bad_page(mp, "%s-page nkeys (%zu) < %u\n", is_branch(mp) ? "branch" : "leaf", nkeys, 1 + is_branch(mp));
rc =
bad_page(mp, "%s-page nkeys (%zu) < %u\n",
is_branch(mp) ? "branch" : "leaf", nkeys, 1 + is_branch(mp));
} }
const size_t ksize_max = keysize_max(env->ps, 0); const size_t ksize_max = keysize_max(env->ps, 0);
const size_t leaf2_ksize = mp->dupfix_ksize; const size_t leaf2_ksize = mp->dupfix_ksize;
if (is_dupfix_leaf(mp)) { if (is_dupfix_leaf(mp)) {
if (unlikely((mc->flags & z_inner) == 0 || if (unlikely((mc->flags & z_inner) == 0 || (mc->tree->flags & MDBX_DUPFIXED) == 0))
(mc->tree->flags & MDBX_DUPFIXED) == 0)) rc = bad_page(mp, "unexpected leaf2-page (db-flags 0x%x)\n", mc->tree->flags);
rc = bad_page(mp, "unexpected leaf2-page (db-flags 0x%x)\n",
mc->tree->flags);
else if (unlikely(leaf2_ksize != mc->tree->dupfix_size)) else if (unlikely(leaf2_ksize != mc->tree->dupfix_size))
rc = bad_page(mp, "invalid leaf2_ksize %zu\n", leaf2_ksize); rc = bad_page(mp, "invalid leaf2_ksize %zu\n", leaf2_ksize);
else if (unlikely(((leaf2_ksize & nkeys) ^ mp->upper) & 1)) else if (unlikely(((leaf2_ksize & nkeys) ^ mp->upper) & 1))
rc = bad_page( rc = bad_page(mp, "invalid page upper (%u) for nkeys %zu with leaf2-length %zu\n", mp->upper, nkeys, leaf2_ksize);
mp, "invalid page upper (%u) for nkeys %zu with leaf2-length %zu\n",
mp->upper, nkeys, leaf2_ksize);
} else { } else {
if (unlikely((mp->upper & 1) || if (unlikely((mp->upper & 1) || PAGEHDRSZ + mp->upper + nkeys * sizeof(node_t) + nkeys - 1 > env->ps))
PAGEHDRSZ + mp->upper + nkeys * sizeof(node_t) + nkeys - 1 > rc = bad_page(mp, "invalid page upper (%u) for nkeys %zu with limit %zu\n", mp->upper, nkeys, page_space(env));
env->ps))
rc =
bad_page(mp, "invalid page upper (%u) for nkeys %zu with limit %zu\n",
mp->upper, nkeys, page_space(env));
} }
MDBX_val here, prev = {0, 0}; MDBX_val here, prev = {0, 0};
@ -170,17 +140,14 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
if (is_dupfix_leaf(mp)) { if (is_dupfix_leaf(mp)) {
const char *const key = page_dupfix_ptr(mp, i, mc->tree->dupfix_size); const char *const key = page_dupfix_ptr(mp, i, mc->tree->dupfix_size);
if (unlikely(end_of_page < key + leaf2_ksize)) { if (unlikely(end_of_page < key + leaf2_ksize)) {
rc = bad_page(mp, "leaf2-item beyond (%zu) page-end\n", rc = bad_page(mp, "leaf2-item beyond (%zu) page-end\n", key + leaf2_ksize - end_of_page);
key + leaf2_ksize - end_of_page);
continue; continue;
} }
if (unlikely(leaf2_ksize != mc->clc->k.lmin)) { if (unlikely(leaf2_ksize != mc->clc->k.lmin)) {
if (unlikely(leaf2_ksize < mc->clc->k.lmin || if (unlikely(leaf2_ksize < mc->clc->k.lmin || leaf2_ksize > mc->clc->k.lmax))
leaf2_ksize > mc->clc->k.lmax)) rc = bad_page(mp, "leaf2-item size (%zu) <> min/max length (%zu/%zu)\n", leaf2_ksize, mc->clc->k.lmin,
rc = bad_page(mp, mc->clc->k.lmax);
"leaf2-item size (%zu) <> min/max length (%zu/%zu)\n",
leaf2_ksize, mc->clc->k.lmin, mc->clc->k.lmax);
else else
mc->clc->k.lmin = mc->clc->k.lmax = leaf2_ksize; mc->clc->k.lmin = mc->clc->k.lmax = leaf2_ksize;
} }
@ -188,16 +155,14 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
here.iov_base = (void *)key; here.iov_base = (void *)key;
here.iov_len = leaf2_ksize; here.iov_len = leaf2_ksize;
if (prev.iov_base && unlikely(mc->clc->k.cmp(&prev, &here) >= 0)) if (prev.iov_base && unlikely(mc->clc->k.cmp(&prev, &here) >= 0))
rc = bad_page(mp, "leaf2-item #%zu wrong order (%s >= %s)\n", i, rc = bad_page(mp, "leaf2-item #%zu wrong order (%s >= %s)\n", i, DKEY(&prev), DVAL(&here));
DKEY(&prev), DVAL(&here));
prev = here; prev = here;
} }
} else { } else {
const node_t *const node = page_node(mp, i); const node_t *const node = page_node(mp, i);
const char *const node_end = ptr_disp(node, NODESIZE); const char *const node_end = ptr_disp(node, NODESIZE);
if (unlikely(node_end > end_of_page)) { if (unlikely(node_end > end_of_page)) {
rc = bad_page(mp, "node[%zu] (%zu) beyond page-end\n", i, rc = bad_page(mp, "node[%zu] (%zu) beyond page-end\n", i, node_end - end_of_page);
node_end - end_of_page);
continue; continue;
} }
const size_t ksize = node_ks(node); const size_t ksize = node_ks(node);
@ -205,44 +170,36 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
rc = bad_page(mp, "node[%zu] too long key (%zu)\n", i, ksize); rc = bad_page(mp, "node[%zu] too long key (%zu)\n", i, ksize);
const char *const key = node_key(node); const char *const key = node_key(node);
if (unlikely(end_of_page < key + ksize)) { if (unlikely(end_of_page < key + ksize)) {
rc = bad_page(mp, "node[%zu] key (%zu) beyond page-end\n", i, rc = bad_page(mp, "node[%zu] key (%zu) beyond page-end\n", i, key + ksize - end_of_page);
key + ksize - end_of_page);
continue; continue;
} }
if ((is_leaf(mp) || i > 0)) { if ((is_leaf(mp) || i > 0)) {
if (unlikely(ksize < mc->clc->k.lmin || ksize > mc->clc->k.lmax)) if (unlikely(ksize < mc->clc->k.lmin || ksize > mc->clc->k.lmax))
rc = bad_page( rc = bad_page(mp, "node[%zu] key size (%zu) <> min/max key-length (%zu/%zu)\n", i, ksize, mc->clc->k.lmin,
mp, "node[%zu] key size (%zu) <> min/max key-length (%zu/%zu)\n", mc->clc->k.lmax);
i, ksize, mc->clc->k.lmin, mc->clc->k.lmax);
if ((mc->checking & z_ignord) == 0) { if ((mc->checking & z_ignord) == 0) {
here.iov_base = (void *)key; here.iov_base = (void *)key;
here.iov_len = ksize; here.iov_len = ksize;
if (prev.iov_base && unlikely(mc->clc->k.cmp(&prev, &here) >= 0)) if (prev.iov_base && unlikely(mc->clc->k.cmp(&prev, &here) >= 0))
rc = bad_page(mp, "node[%zu] key wrong order (%s >= %s)\n", i, rc = bad_page(mp, "node[%zu] key wrong order (%s >= %s)\n", i, DKEY(&prev), DVAL(&here));
DKEY(&prev), DVAL(&here));
prev = here; prev = here;
} }
} }
if (is_branch(mp)) { if (is_branch(mp)) {
if ((mc->checking & z_updating) == 0 && i == 0 && unlikely(ksize != 0)) if ((mc->checking & z_updating) == 0 && i == 0 && unlikely(ksize != 0))
rc = bad_page(mp, "branch-node[%zu] wrong 0-node key-length (%zu)\n", rc = bad_page(mp, "branch-node[%zu] wrong 0-node key-length (%zu)\n", i, ksize);
i, ksize);
const pgno_t ref = node_pgno(node); const pgno_t ref = node_pgno(node);
if (unlikely(ref < MIN_PAGENO) || if (unlikely(ref < MIN_PAGENO) || (unlikely(ref >= mc->txn->geo.first_unallocated) &&
(unlikely(ref >= mc->txn->geo.first_unallocated) && (unlikely(ref >= mc->txn->geo.now) || !(mc->checking & z_retiring))))
(unlikely(ref >= mc->txn->geo.now) ||
!(mc->checking & z_retiring))))
rc = bad_page(mp, "branch-node[%zu] wrong pgno (%u)\n", i, ref); rc = bad_page(mp, "branch-node[%zu] wrong pgno (%u)\n", i, ref);
if (unlikely(node_flags(node))) if (unlikely(node_flags(node)))
rc = bad_page(mp, "branch-node[%zu] wrong flags (%u)\n", i, rc = bad_page(mp, "branch-node[%zu] wrong flags (%u)\n", i, node_flags(node));
node_flags(node));
continue; continue;
} }
switch (node_flags(node)) { switch (node_flags(node)) {
default: default:
rc = rc = bad_page(mp, "invalid node[%zu] flags (%u)\n", i, node_flags(node));
bad_page(mp, "invalid node[%zu] flags (%u)\n", i, node_flags(node));
break; break;
case N_BIG /* data on large-page */: case N_BIG /* data on large-page */:
case 0 /* usual */: case 0 /* usual */:
@ -256,46 +213,36 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
const char *const data = node_data(node); const char *const data = node_data(node);
if (node_flags(node) & N_BIG) { if (node_flags(node) & N_BIG) {
if (unlikely(end_of_page < data + sizeof(pgno_t))) { if (unlikely(end_of_page < data + sizeof(pgno_t))) {
rc = bad_page( rc = bad_page(mp, "node-%s(%zu of %zu, %zu bytes) beyond (%zu) page-end\n", "bigdata-pgno", i, nkeys, dsize,
mp, "node-%s(%zu of %zu, %zu bytes) beyond (%zu) page-end\n", data + dsize - end_of_page);
"bigdata-pgno", i, nkeys, dsize, data + dsize - end_of_page);
continue; continue;
} }
if (unlikely(dsize <= v_clc.lmin || dsize > v_clc.lmax)) if (unlikely(dsize <= v_clc.lmin || dsize > v_clc.lmax))
rc = bad_page( rc = bad_page(mp, "big-node data size (%zu) <> min/max value-length (%zu/%zu)\n", dsize, v_clc.lmin,
mp, v_clc.lmax);
"big-node data size (%zu) <> min/max value-length (%zu/%zu)\n", if (unlikely(node_size_len(node_ks(node), dsize) <= mc->txn->env->leaf_nodemax) &&
dsize, v_clc.lmin, v_clc.lmax);
if (unlikely(node_size_len(node_ks(node), dsize) <=
mc->txn->env->leaf_nodemax) &&
mc->tree != &mc->txn->dbs[FREE_DBI]) mc->tree != &mc->txn->dbs[FREE_DBI])
poor_page(mp, "too small data (%zu bytes) for bigdata-node", dsize); poor_page(mp, "too small data (%zu bytes) for bigdata-node", dsize);
if ((mc->checking & z_retiring) == 0) { if ((mc->checking & z_retiring) == 0) {
const pgr_t lp = const pgr_t lp = page_get_large(mc, node_largedata_pgno(node), mp->txnid);
page_get_large(mc, node_largedata_pgno(node), mp->txnid);
if (unlikely(lp.err != MDBX_SUCCESS)) if (unlikely(lp.err != MDBX_SUCCESS))
return lp.err; return lp.err;
cASSERT(mc, page_type(lp.page) == P_LARGE); cASSERT(mc, page_type(lp.page) == P_LARGE);
const unsigned npages = largechunk_npages(env, dsize); const unsigned npages = largechunk_npages(env, dsize);
if (unlikely(lp.page->pages != npages)) { if (unlikely(lp.page->pages != npages)) {
if (lp.page->pages < npages) if (lp.page->pages < npages)
rc = bad_page(lp.page, rc = bad_page(lp.page, "too less n-pages %u for bigdata-node (%zu bytes)", lp.page->pages, dsize);
"too less n-pages %u for bigdata-node (%zu bytes)",
lp.page->pages, dsize);
else if (mc->tree != &mc->txn->dbs[FREE_DBI]) else if (mc->tree != &mc->txn->dbs[FREE_DBI])
poor_page(lp.page, poor_page(lp.page, "extra n-pages %u for bigdata-node (%zu bytes)", lp.page->pages, dsize);
"extra n-pages %u for bigdata-node (%zu bytes)",
lp.page->pages, dsize);
} }
} }
continue; continue;
} }
if (unlikely(end_of_page < data + dsize)) { if (unlikely(end_of_page < data + dsize)) {
rc = bad_page(mp, rc = bad_page(mp, "node-%s(%zu of %zu, %zu bytes) beyond (%zu) page-end\n", "data", i, nkeys, dsize,
"node-%s(%zu of %zu, %zu bytes) beyond (%zu) page-end\n", data + dsize - end_of_page);
"data", i, nkeys, dsize, data + dsize - end_of_page);
continue; continue;
} }
@ -305,9 +252,7 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
continue; continue;
case 0 /* usual */: case 0 /* usual */:
if (unlikely(dsize < v_clc.lmin || dsize > v_clc.lmax)) { if (unlikely(dsize < v_clc.lmin || dsize > v_clc.lmax)) {
rc = bad_page( rc = bad_page(mp, "node-data size (%zu) <> min/max value-length (%zu/%zu)\n", dsize, v_clc.lmin, v_clc.lmax);
mp, "node-data size (%zu) <> min/max value-length (%zu/%zu)\n",
dsize, v_clc.lmin, v_clc.lmax);
continue; continue;
} }
break; break;
@ -319,15 +264,13 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
break; break;
case N_TREE | N_DUP /* dupsorted sub-tree */: case N_TREE | N_DUP /* dupsorted sub-tree */:
if (unlikely(dsize != sizeof(tree_t))) { if (unlikely(dsize != sizeof(tree_t))) {
rc = bad_page(mp, "invalid nested-db record size (%zu, expect %zu)\n", rc = bad_page(mp, "invalid nested-db record size (%zu, expect %zu)\n", dsize, sizeof(tree_t));
dsize, sizeof(tree_t));
continue; continue;
} }
break; break;
case N_DUP /* short sub-page */: case N_DUP /* short sub-page */:
if (unlikely(dsize <= PAGEHDRSZ)) { if (unlikely(dsize <= PAGEHDRSZ)) {
rc = bad_page(mp, "invalid nested/sub-page record size (%zu)\n", rc = bad_page(mp, "invalid nested/sub-page record size (%zu)\n", dsize);
dsize);
continue; continue;
} else { } else {
const page_t *const sp = (page_t *)data; const page_t *const sp = (page_t *)data;
@ -337,28 +280,23 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
case P_LEAF | P_DUPFIX | P_SUBP: case P_LEAF | P_DUPFIX | P_SUBP:
break; break;
default: default:
rc = bad_page(mp, "invalid nested/sub-page flags (0x%02x)\n", rc = bad_page(mp, "invalid nested/sub-page flags (0x%02x)\n", sp->flags);
sp->flags);
continue; continue;
} }
const char *const end_of_subpage = data + dsize; const char *const end_of_subpage = data + dsize;
const intptr_t nsubkeys = page_numkeys(sp); const intptr_t nsubkeys = page_numkeys(sp);
if (unlikely(nsubkeys == 0) && !(mc->checking & z_updating) && if (unlikely(nsubkeys == 0) && !(mc->checking & z_updating) && mc->tree->items)
mc->tree->items) rc = bad_page(mp, "no keys on a %s-page\n", is_dupfix_leaf(sp) ? "leaf2-sub" : "leaf-sub");
rc = bad_page(mp, "no keys on a %s-page\n",
is_dupfix_leaf(sp) ? "leaf2-sub" : "leaf-sub");
MDBX_val sub_here, sub_prev = {0, 0}; MDBX_val sub_here, sub_prev = {0, 0};
for (int ii = 0; ii < nsubkeys; ii++) { for (int ii = 0; ii < nsubkeys; ii++) {
if (is_dupfix_leaf(sp)) { if (is_dupfix_leaf(sp)) {
/* DUPFIX pages have no entries[] or node headers */ /* DUPFIX pages have no entries[] or node headers */
const size_t sub_ksize = sp->dupfix_ksize; const size_t sub_ksize = sp->dupfix_ksize;
const char *const sub_key = const char *const sub_key = page_dupfix_ptr(sp, ii, mc->tree->dupfix_size);
page_dupfix_ptr(sp, ii, mc->tree->dupfix_size);
if (unlikely(end_of_subpage < sub_key + sub_ksize)) { if (unlikely(end_of_subpage < sub_key + sub_ksize)) {
rc = bad_page(mp, "nested-leaf2-key beyond (%zu) nested-page\n", rc = bad_page(mp, "nested-leaf2-key beyond (%zu) nested-page\n", sub_key + sub_ksize - end_of_subpage);
sub_key + sub_ksize - end_of_subpage);
continue; continue;
} }
@ -374,24 +312,20 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
if ((mc->checking & z_ignord) == 0) { if ((mc->checking & z_ignord) == 0) {
sub_here.iov_base = (void *)sub_key; sub_here.iov_base = (void *)sub_key;
sub_here.iov_len = sub_ksize; sub_here.iov_len = sub_ksize;
if (sub_prev.iov_base && if (sub_prev.iov_base && unlikely(v_clc.cmp(&sub_prev, &sub_here) >= 0))
unlikely(v_clc.cmp(&sub_prev, &sub_here) >= 0)) rc = bad_page(mp, "nested-leaf2-key #%u wrong order (%s >= %s)\n", ii, DKEY(&sub_prev),
rc = bad_page(mp, DVAL(&sub_here));
"nested-leaf2-key #%u wrong order (%s >= %s)\n",
ii, DKEY(&sub_prev), DVAL(&sub_here));
sub_prev = sub_here; sub_prev = sub_here;
} }
} else { } else {
const node_t *const sub_node = page_node(sp, ii); const node_t *const sub_node = page_node(sp, ii);
const char *const sub_node_end = ptr_disp(sub_node, NODESIZE); const char *const sub_node_end = ptr_disp(sub_node, NODESIZE);
if (unlikely(sub_node_end > end_of_subpage)) { if (unlikely(sub_node_end > end_of_subpage)) {
rc = bad_page(mp, "nested-node beyond (%zu) nested-page\n", rc = bad_page(mp, "nested-node beyond (%zu) nested-page\n", end_of_subpage - sub_node_end);
end_of_subpage - sub_node_end);
continue; continue;
} }
if (unlikely(node_flags(sub_node) != 0)) if (unlikely(node_flags(sub_node) != 0))
rc = bad_page(mp, "nested-node invalid flags (%u)\n", rc = bad_page(mp, "nested-node invalid flags (%u)\n", node_flags(sub_node));
node_flags(sub_node));
const size_t sub_ksize = node_ks(sub_node); const size_t sub_ksize = node_ks(sub_node);
const char *const sub_key = node_key(sub_node); const char *const sub_key = node_key(sub_node);
@ -406,19 +340,15 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
if ((mc->checking & z_ignord) == 0) { if ((mc->checking & z_ignord) == 0) {
sub_here.iov_base = (void *)sub_key; sub_here.iov_base = (void *)sub_key;
sub_here.iov_len = sub_ksize; sub_here.iov_len = sub_ksize;
if (sub_prev.iov_base && if (sub_prev.iov_base && unlikely(v_clc.cmp(&sub_prev, &sub_here) >= 0))
unlikely(v_clc.cmp(&sub_prev, &sub_here) >= 0)) rc = bad_page(mp, "nested-node-key #%u wrong order (%s >= %s)\n", ii, DKEY(&sub_prev),
rc = bad_page(mp, DVAL(&sub_here));
"nested-node-key #%u wrong order (%s >= %s)\n",
ii, DKEY(&sub_prev), DVAL(&sub_here));
sub_prev = sub_here; sub_prev = sub_here;
} }
if (unlikely(sub_dsize != 0)) if (unlikely(sub_dsize != 0))
rc = bad_page(mp, "nested-node non-empty data size (%zu)\n", rc = bad_page(mp, "nested-node non-empty data size (%zu)\n", sub_dsize);
sub_dsize);
if (unlikely(end_of_subpage < sub_key + sub_ksize)) if (unlikely(end_of_subpage < sub_key + sub_ksize))
rc = bad_page(mp, "nested-node-key beyond (%zu) nested-page\n", rc = bad_page(mp, "nested-node-key beyond (%zu) nested-page\n", sub_key + sub_ksize - end_of_subpage);
sub_key + sub_ksize - end_of_subpage);
} }
} }
} }
@ -429,9 +359,7 @@ __cold int page_check(const MDBX_cursor *const mc, const page_t *const mp) {
return rc; return rc;
} }
static __always_inline int check_page_header(const uint16_t ILL, static __always_inline int check_page_header(const uint16_t ILL, const page_t *page, MDBX_txn *const txn,
const page_t *page,
MDBX_txn *const txn,
const txnid_t front) { const txnid_t front) {
if (unlikely(page->flags & ILL)) { if (unlikely(page->flags & ILL)) {
if (ILL == P_ILL_BITS || (page->flags & P_ILL_BITS)) if (ILL == P_ILL_BITS || (page->flags & P_ILL_BITS))
@ -439,59 +367,44 @@ static __always_inline int check_page_header(const uint16_t ILL,
else if (ILL & P_LARGE) { else if (ILL & P_LARGE) {
assert((ILL & (P_BRANCH | P_LEAF | P_DUPFIX)) == 0); assert((ILL & (P_BRANCH | P_LEAF | P_DUPFIX)) == 0);
assert(page->flags & (P_BRANCH | P_LEAF | P_DUPFIX)); assert(page->flags & (P_BRANCH | P_LEAF | P_DUPFIX));
return bad_page(page, "unexpected %s instead of %s (%u)\n", return bad_page(page, "unexpected %s instead of %s (%u)\n", "large/overflow", "branch/leaf/leaf2", page->flags);
"large/overflow", "branch/leaf/leaf2", page->flags);
} else if (ILL & (P_BRANCH | P_LEAF | P_DUPFIX)) { } else if (ILL & (P_BRANCH | P_LEAF | P_DUPFIX)) {
assert((ILL & P_BRANCH) && (ILL & P_LEAF) && (ILL & P_DUPFIX)); assert((ILL & P_BRANCH) && (ILL & P_LEAF) && (ILL & P_DUPFIX));
assert(page->flags & (P_BRANCH | P_LEAF | P_DUPFIX)); assert(page->flags & (P_BRANCH | P_LEAF | P_DUPFIX));
return bad_page(page, "unexpected %s instead of %s (%u)\n", return bad_page(page, "unexpected %s instead of %s (%u)\n", "branch/leaf/leaf2", "large/overflow", page->flags);
"branch/leaf/leaf2", "large/overflow", page->flags);
} else { } else {
assert(false); assert(false);
} }
} }
if (unlikely(page->txnid > front) && if (unlikely(page->txnid > front) && unlikely(page->txnid > txn->front_txnid || front < txn->txnid))
unlikely(page->txnid > txn->front_txnid || front < txn->txnid)) return bad_page(page, "invalid page' txnid (%" PRIaTXN ") for %s' txnid (%" PRIaTXN ")\n", page->txnid,
return bad_page( (front == txn->front_txnid && front != txn->txnid) ? "front-txn" : "parent-page", front);
page,
"invalid page' txnid (%" PRIaTXN ") for %s' txnid (%" PRIaTXN ")\n",
page->txnid,
(front == txn->front_txnid && front != txn->txnid) ? "front-txn"
: "parent-page",
front);
if (((ILL & P_LARGE) || !is_largepage(page)) && if (((ILL & P_LARGE) || !is_largepage(page)) && (ILL & (P_BRANCH | P_LEAF | P_DUPFIX)) == 0) {
(ILL & (P_BRANCH | P_LEAF | P_DUPFIX)) == 0) {
/* Контроль четности page->upper тут либо приводит к ложным ошибкам, /* Контроль четности page->upper тут либо приводит к ложным ошибкам,
* либо слишком дорог по количеству операций. Заковырка в том, что upper * либо слишком дорог по количеству операций. Заковырка в том, что upper
* может быть нечетным на DUPFIX-страницах, при нечетном количестве * может быть нечетным на DUPFIX-страницах, при нечетном количестве
* элементов нечетной длины. Поэтому четность page->upper здесь не * элементов нечетной длины. Поэтому четность page->upper здесь не
* проверяется, но соответствующие полные проверки есть в page_check(). */ * проверяется, но соответствующие полные проверки есть в page_check(). */
if (unlikely(page->upper < page->lower || (page->lower & 1) || if (unlikely(page->upper < page->lower || (page->lower & 1) || PAGEHDRSZ + page->upper > txn->env->ps))
PAGEHDRSZ + page->upper > txn->env->ps)) return bad_page(page, "invalid page' lower(%u)/upper(%u) with limit %zu\n", page->lower, page->upper,
return bad_page(page, page_space(txn->env));
"invalid page' lower(%u)/upper(%u) with limit %zu\n",
page->lower, page->upper, page_space(txn->env));
} else if ((ILL & P_LARGE) == 0) { } else if ((ILL & P_LARGE) == 0) {
const pgno_t npages = page->pages; const pgno_t npages = page->pages;
if (unlikely(npages < 1) || unlikely(npages >= MAX_PAGENO / 2)) if (unlikely(npages < 1) || unlikely(npages >= MAX_PAGENO / 2))
return bad_page(page, "invalid n-pages (%u) for large-page\n", npages); return bad_page(page, "invalid n-pages (%u) for large-page\n", npages);
if (unlikely(page->pgno + npages > txn->geo.first_unallocated)) if (unlikely(page->pgno + npages > txn->geo.first_unallocated))
return bad_page( return bad_page(page, "end of large-page beyond (%u) allocated space (%u next-pgno)\n", page->pgno + npages,
page, txn->geo.first_unallocated);
"end of large-page beyond (%u) allocated space (%u next-pgno)\n",
page->pgno + npages, txn->geo.first_unallocated);
} else { } else {
assert(false); assert(false);
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__cold static __noinline pgr_t check_page_complete(const uint16_t ILL, __cold static __noinline pgr_t check_page_complete(const uint16_t ILL, page_t *page, const MDBX_cursor *const mc,
page_t *page,
const MDBX_cursor *const mc,
const txnid_t front) { const txnid_t front) {
pgr_t r = {page, check_page_header(ILL, page, mc->txn, front)}; pgr_t r = {page, check_page_header(ILL, page, mc->txn, front)};
if (likely(r.err == MDBX_SUCCESS)) if (likely(r.err == MDBX_SUCCESS))
@ -501,9 +414,7 @@ __cold static __noinline pgr_t check_page_complete(const uint16_t ILL,
return r; return r;
} }
static __always_inline pgr_t page_get_inline(const uint16_t ILL, static __always_inline pgr_t page_get_inline(const uint16_t ILL, const MDBX_cursor *const mc, const pgno_t pgno,
const MDBX_cursor *const mc,
const pgno_t pgno,
const txnid_t front) { const txnid_t front) {
MDBX_txn *const txn = mc->txn; MDBX_txn *const txn = mc->txn;
tASSERT(txn, front <= txn->front_txnid); tASSERT(txn, front <= txn->front_txnid);
@ -527,8 +438,7 @@ static __always_inline pgr_t page_get_inline(const uint16_t ILL,
* because the dirty list got full. Bring this page * because the dirty list got full. Bring this page
* back in from the map (but don't unspill it here, * back in from the map (but don't unspill it here,
* leave that unless page_touch happens again). */ * leave that unless page_touch happens again). */
if (unlikely(spiller->flags & MDBX_TXN_SPILLS) && if (unlikely(spiller->flags & MDBX_TXN_SPILLS) && spill_search(spiller, pgno))
spill_search(spiller, pgno))
break; break;
const size_t i = dpl_search(spiller, pgno); const size_t i = dpl_search(spiller, pgno);
@ -543,9 +453,7 @@ static __always_inline pgr_t page_get_inline(const uint16_t ILL,
} }
if (unlikely(r.page->pgno != pgno)) { if (unlikely(r.page->pgno != pgno)) {
r.err = bad_page( r.err = bad_page(r.page, "pgno mismatch (%" PRIaPGNO ") != expected (%" PRIaPGNO ")\n", r.page->pgno, pgno);
r.page, "pgno mismatch (%" PRIaPGNO ") != expected (%" PRIaPGNO ")\n",
r.page->pgno, pgno);
goto bailout; goto bailout;
} }
@ -562,18 +470,14 @@ static __always_inline pgr_t page_get_inline(const uint16_t ILL,
return r; return r;
} }
pgr_t page_get_any(const MDBX_cursor *const mc, const pgno_t pgno, pgr_t page_get_any(const MDBX_cursor *const mc, const pgno_t pgno, const txnid_t front) {
const txnid_t front) {
return page_get_inline(P_ILL_BITS, mc, pgno, front); return page_get_inline(P_ILL_BITS, mc, pgno, front);
} }
__hot pgr_t page_get_three(const MDBX_cursor *const mc, const pgno_t pgno, __hot pgr_t page_get_three(const MDBX_cursor *const mc, const pgno_t pgno, const txnid_t front) {
const txnid_t front) {
return page_get_inline(P_ILL_BITS | P_LARGE, mc, pgno, front); return page_get_inline(P_ILL_BITS | P_LARGE, mc, pgno, front);
} }
pgr_t page_get_large(const MDBX_cursor *const mc, const pgno_t pgno, pgr_t page_get_large(const MDBX_cursor *const mc, const pgno_t pgno, const txnid_t front) {
const txnid_t front) { return page_get_inline(P_ILL_BITS | P_BRANCH | P_LEAF | P_DUPFIX, mc, pgno, front);
return page_get_inline(P_ILL_BITS | P_BRANCH | P_LEAF | P_DUPFIX, mc, pgno,
front);
} }

View File

@ -3,17 +3,14 @@
#include "internals.h" #include "internals.h"
int iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, size_t items, size_t npages, int iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, size_t items, size_t npages, mdbx_filehandle_t fd,
mdbx_filehandle_t fd, bool check_coherence) { bool check_coherence) {
ctx->env = txn->env; ctx->env = txn->env;
ctx->ior = &txn->env->ioring; ctx->ior = &txn->env->ioring;
ctx->fd = fd; ctx->fd = fd;
ctx->coherency_timestamp = ctx->coherency_timestamp =
(check_coherence || txn->env->lck->pgops.incoherence.weak) (check_coherence || txn->env->lck->pgops.incoherence.weak) ? 0 : UINT64_MAX /* не выполнять сверку */;
? 0 ctx->err = osal_ioring_prepare(ctx->ior, items, pgno_align2os_bytes(txn->env, npages));
: UINT64_MAX /* не выполнять сверку */;
ctx->err = osal_ioring_prepare(ctx->ior, items,
pgno_align2os_bytes(txn->env, npages));
if (likely(ctx->err == MDBX_SUCCESS)) { if (likely(ctx->err == MDBX_SUCCESS)) {
#if MDBX_NEED_WRITTEN_RANGE #if MDBX_NEED_WRITTEN_RANGE
ctx->flush_begin = MAX_PAGENO; ctx->flush_begin = MAX_PAGENO;
@ -24,8 +21,7 @@ int iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, size_t items, size_t npages,
return ctx->err; return ctx->err;
} }
static void iov_callback4dirtypages(iov_ctx_t *ctx, size_t offset, void *data, static void iov_callback4dirtypages(iov_ctx_t *ctx, size_t offset, void *data, size_t bytes) {
size_t bytes) {
MDBX_env *const env = ctx->env; MDBX_env *const env = ctx->env;
eASSERT(env, (env->flags & MDBX_WRITEMAP) == 0); eASSERT(env, (env->flags & MDBX_WRITEMAP) == 0);
@ -89,19 +85,15 @@ static void iov_callback4dirtypages(iov_ctx_t *ctx, size_t offset, void *data,
#ifndef MDBX_FORCE_CHECK_MMAP_COHERENCY #ifndef MDBX_FORCE_CHECK_MMAP_COHERENCY
#define MDBX_FORCE_CHECK_MMAP_COHERENCY 0 #define MDBX_FORCE_CHECK_MMAP_COHERENCY 0
#endif /* MDBX_FORCE_CHECK_MMAP_COHERENCY */ #endif /* MDBX_FORCE_CHECK_MMAP_COHERENCY */
if ((MDBX_FORCE_CHECK_MMAP_COHERENCY || if ((MDBX_FORCE_CHECK_MMAP_COHERENCY || ctx->coherency_timestamp != UINT64_MAX) &&
ctx->coherency_timestamp != UINT64_MAX) &&
unlikely(memcmp(wp, rp, bytes))) { unlikely(memcmp(wp, rp, bytes))) {
ctx->coherency_timestamp = 0; ctx->coherency_timestamp = 0;
env->lck->pgops.incoherence.weak = env->lck->pgops.incoherence.weak =
(env->lck->pgops.incoherence.weak >= INT32_MAX) (env->lck->pgops.incoherence.weak >= INT32_MAX) ? INT32_MAX : env->lck->pgops.incoherence.weak + 1;
? INT32_MAX
: env->lck->pgops.incoherence.weak + 1;
WARNING("catch delayed/non-arrived page %" PRIaPGNO " %s", wp->pgno, WARNING("catch delayed/non-arrived page %" PRIaPGNO " %s", wp->pgno,
"(workaround for incoherent flaw of unified page/buffer cache)"); "(workaround for incoherent flaw of unified page/buffer cache)");
do do
if (coherency_timeout(&ctx->coherency_timestamp, wp->pgno, env) != if (coherency_timeout(&ctx->coherency_timestamp, wp->pgno, env) != MDBX_RESULT_TRUE) {
MDBX_RESULT_TRUE) {
ctx->err = MDBX_PROBLEM; ctx->err = MDBX_PROBLEM;
break; break;
} }
@ -160,8 +152,7 @@ int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp, size_t npages) {
#if MDBX_AVOID_MSYNC #if MDBX_AVOID_MSYNC
doit:; doit:;
#endif /* MDBX_AVOID_MSYNC */ #endif /* MDBX_AVOID_MSYNC */
int err = osal_ioring_add(ctx->ior, pgno2bytes(env, dp->pgno), dp, int err = osal_ioring_add(ctx->ior, pgno2bytes(env, dp->pgno), dp, pgno2bytes(env, npages));
pgno2bytes(env, npages));
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
ctx->err = err; ctx->err = err;
if (unlikely(err != MDBX_RESULT_TRUE)) { if (unlikely(err != MDBX_RESULT_TRUE)) {
@ -171,8 +162,7 @@ int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp, size_t npages) {
err = iov_write(ctx); err = iov_write(ctx);
tASSERT(txn, iov_empty(ctx)); tASSERT(txn, iov_empty(ctx));
if (likely(err == MDBX_SUCCESS)) { if (likely(err == MDBX_SUCCESS)) {
err = osal_ioring_add(ctx->ior, pgno2bytes(env, dp->pgno), dp, err = osal_ioring_add(ctx->ior, pgno2bytes(env, dp->pgno), dp, pgno2bytes(env, npages));
pgno2bytes(env, npages));
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
iov_complete(ctx); iov_complete(ctx);
return ctx->err = err; return ctx->err = err;
@ -188,11 +178,8 @@ int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp, size_t npages) {
} }
#if MDBX_NEED_WRITTEN_RANGE #if MDBX_NEED_WRITTEN_RANGE
ctx->flush_begin = ctx->flush_begin = (ctx->flush_begin < dp->pgno) ? ctx->flush_begin : dp->pgno;
(ctx->flush_begin < dp->pgno) ? ctx->flush_begin : dp->pgno; ctx->flush_end = (ctx->flush_end > dp->pgno + (pgno_t)npages) ? ctx->flush_end : dp->pgno + (pgno_t)npages;
ctx->flush_end = (ctx->flush_end > dp->pgno + (pgno_t)npages)
? ctx->flush_end
: dp->pgno + (pgno_t)npages;
#endif /* MDBX_NEED_WRITTEN_RANGE */ #endif /* MDBX_NEED_WRITTEN_RANGE */
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }

View File

@ -24,15 +24,11 @@ struct iov_ctx {
uint64_t coherency_timestamp; uint64_t coherency_timestamp;
}; };
MDBX_INTERNAL __must_check_result int MDBX_INTERNAL __must_check_result int iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, size_t items, size_t npages,
iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, size_t items, size_t npages, mdbx_filehandle_t fd, bool check_coherence);
mdbx_filehandle_t fd, bool check_coherence);
static inline bool iov_empty(const iov_ctx_t *ctx) { static inline bool iov_empty(const iov_ctx_t *ctx) { return osal_ioring_used(ctx->ior) == 0; }
return osal_ioring_used(ctx->ior) == 0;
}
MDBX_INTERNAL __must_check_result int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, MDBX_INTERNAL __must_check_result int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp, size_t npages);
page_t *dp, size_t npages);
MDBX_INTERNAL __must_check_result int iov_write(iov_ctx_t *ctx); MDBX_INTERNAL __must_check_result int iov_write(iov_ctx_t *ctx);

View File

@ -42,13 +42,11 @@ pgr_t page_new(MDBX_cursor *mc, const unsigned flags) {
} }
pgr_t page_new_large(MDBX_cursor *mc, const size_t npages) { pgr_t page_new_large(MDBX_cursor *mc, const size_t npages) {
pgr_t ret = likely(npages == 1) ? gc_alloc_single(mc) pgr_t ret = likely(npages == 1) ? gc_alloc_single(mc) : gc_alloc_ex(mc, npages, ALLOC_DEFAULT);
: gc_alloc_ex(mc, npages, ALLOC_DEFAULT);
if (unlikely(ret.err != MDBX_SUCCESS)) if (unlikely(ret.err != MDBX_SUCCESS))
return ret; return ret;
DEBUG("dbi %zu allocated new large-page %" PRIaPGNO ", num %zu", DEBUG("dbi %zu allocated new large-page %" PRIaPGNO ", num %zu", cursor_dbi(mc), ret.page->pgno, npages);
cursor_dbi(mc), ret.page->pgno, npages);
ret.page->flags = P_LARGE; ret.page->flags = P_LARGE;
cASSERT(mc, *cursor_dbi_state(mc) & DBI_DIRTY); cASSERT(mc, *cursor_dbi_state(mc) & DBI_DIRTY);
cASSERT(mc, mc->txn->flags & MDBX_TXN_DIRTY); cASSERT(mc, mc->txn->flags & MDBX_TXN_DIRTY);
@ -62,8 +60,7 @@ pgr_t page_new_large(MDBX_cursor *mc, const size_t npages) {
return ret; return ret;
} }
__hot void page_copy(page_t *const dst, const page_t *const src, __hot void page_copy(page_t *const dst, const page_t *const src, const size_t size) {
const size_t size) {
STATIC_ASSERT(UINT16_MAX > MDBX_MAX_PAGESIZE - PAGEHDRSZ); STATIC_ASSERT(UINT16_MAX > MDBX_MAX_PAGESIZE - PAGEHDRSZ);
STATIC_ASSERT(MDBX_MIN_PAGESIZE > PAGEHDRSZ + NODESIZE * 4); STATIC_ASSERT(MDBX_MIN_PAGESIZE > PAGEHDRSZ + NODESIZE * 4);
void *copy_dst = dst; void *copy_dst = dst;
@ -94,17 +91,14 @@ __hot void page_copy(page_t *const dst, const page_t *const src,
bailout: bailout:
if (src->flags & P_DUPFIX) if (src->flags & P_DUPFIX)
bad_page(src, "%s addr %p, n-keys %zu, ksize %u", bad_page(src, "%s addr %p, n-keys %zu, ksize %u", "invalid/corrupted source page", __Wpedantic_format_voidptr(src),
"invalid/corrupted source page", __Wpedantic_format_voidptr(src),
page_numkeys(src), src->dupfix_ksize); page_numkeys(src), src->dupfix_ksize);
else else
bad_page(src, "%s addr %p, upper %u", "invalid/corrupted source page", bad_page(src, "%s addr %p, upper %u", "invalid/corrupted source page", __Wpedantic_format_voidptr(src), src->upper);
__Wpedantic_format_voidptr(src), src->upper);
memset(dst, -1, size); memset(dst, -1, size);
} }
__cold pgr_t __must_check_result page_unspill(MDBX_txn *const txn, __cold pgr_t __must_check_result page_unspill(MDBX_txn *const txn, const page_t *const mp) {
const page_t *const mp) {
VERBOSE("unspill page %" PRIaPGNO, mp->pgno); VERBOSE("unspill page %" PRIaPGNO, mp->pgno);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0);
tASSERT(txn, is_spilled(txn, mp)); tASSERT(txn, is_spilled(txn, mp));
@ -139,13 +133,11 @@ __cold pgr_t __must_check_result page_unspill(MDBX_txn *const txn,
ret.page->flags |= (scan == txn) ? 0 : P_SPILLED; ret.page->flags |= (scan == txn) ? 0 : P_SPILLED;
ret.err = MDBX_SUCCESS; ret.err = MDBX_SUCCESS;
return ret; return ret;
} while (likely((scan = scan->parent) != nullptr && } while (likely((scan = scan->parent) != nullptr && (scan->flags & MDBX_TXN_SPILLS) != 0));
(scan->flags & MDBX_TXN_SPILLS) != 0)); ERROR("Page %" PRIaPGNO " mod-txnid %" PRIaTXN " not found in the spill-list(s), current txn %" PRIaTXN
ERROR("Page %" PRIaPGNO " mod-txnid %" PRIaTXN
" not found in the spill-list(s), current txn %" PRIaTXN
" front %" PRIaTXN ", root txn %" PRIaTXN " front %" PRIaTXN, " front %" PRIaTXN ", root txn %" PRIaTXN " front %" PRIaTXN,
mp->pgno, mp->txnid, txn->txnid, txn->front_txnid, mp->pgno, mp->txnid, txn->txnid, txn->front_txnid, txn->env->basal_txn->txnid,
txn->env->basal_txn->txnid, txn->env->basal_txn->front_txnid); txn->env->basal_txn->front_txnid);
ret.err = MDBX_PROBLEM; ret.err = MDBX_PROBLEM;
ret.page = nullptr; ret.page = nullptr;
return ret; return ret;
@ -157,8 +149,7 @@ __hot int page_touch_modifable(MDBX_txn *txn, const page_t *const mp) {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
const size_t n = dpl_search(txn, mp->pgno); const size_t n = dpl_search(txn, mp->pgno);
if (MDBX_AVOID_MSYNC && if (MDBX_AVOID_MSYNC && unlikely(txn->tw.dirtylist->items[n].pgno != mp->pgno)) {
unlikely(txn->tw.dirtylist->items[n].pgno != mp->pgno)) {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP)); tASSERT(txn, (txn->flags & MDBX_WRITEMAP));
tASSERT(txn, n > 0 && n <= txn->tw.dirtylist->length + 1); tASSERT(txn, n > 0 && n <= txn->tw.dirtylist->length + 1);
VERBOSE("unspill page %" PRIaPGNO, mp->pgno); VERBOSE("unspill page %" PRIaPGNO, mp->pgno);
@ -169,18 +160,15 @@ __hot int page_touch_modifable(MDBX_txn *txn, const page_t *const mp) {
} }
tASSERT(txn, n > 0 && n <= txn->tw.dirtylist->length); tASSERT(txn, n > 0 && n <= txn->tw.dirtylist->length);
tASSERT(txn, txn->tw.dirtylist->items[n].pgno == mp->pgno && tASSERT(txn, txn->tw.dirtylist->items[n].pgno == mp->pgno && txn->tw.dirtylist->items[n].ptr == mp);
txn->tw.dirtylist->items[n].ptr == mp);
if (!MDBX_AVOID_MSYNC || (txn->flags & MDBX_WRITEMAP) == 0) { if (!MDBX_AVOID_MSYNC || (txn->flags & MDBX_WRITEMAP) == 0) {
size_t *const ptr = size_t *const ptr = ptr_disp(txn->tw.dirtylist->items[n].ptr, -(ptrdiff_t)sizeof(size_t));
ptr_disp(txn->tw.dirtylist->items[n].ptr, -(ptrdiff_t)sizeof(size_t));
*ptr = txn->tw.dirtylru; *ptr = txn->tw.dirtylru;
} }
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot int page_touch_unmodifable(MDBX_txn *txn, MDBX_cursor *mc, __hot int page_touch_unmodifable(MDBX_txn *txn, MDBX_cursor *mc, const page_t *const mp) {
const page_t *const mp) {
tASSERT(txn, !is_modifable(txn, mp) && !is_largepage(mp)); tASSERT(txn, !is_modifable(txn, mp) && !is_largepage(mp));
if (is_subpage(mp)) { if (is_subpage(mp)) {
((page_t *)mp)->txnid = txn->front_txnid; ((page_t *)mp)->txnid = txn->front_txnid;
@ -201,8 +189,7 @@ __hot int page_touch_unmodifable(MDBX_txn *txn, MDBX_cursor *mc,
goto fail; goto fail;
const pgno_t pgno = np->pgno; const pgno_t pgno = np->pgno;
DEBUG("touched db %d page %" PRIaPGNO " -> %" PRIaPGNO, cursor_dbi_dbg(mc), DEBUG("touched db %d page %" PRIaPGNO " -> %" PRIaPGNO, cursor_dbi_dbg(mc), mp->pgno, pgno);
mp->pgno, pgno);
tASSERT(txn, mp->pgno != pgno); tASSERT(txn, mp->pgno != pgno);
pnl_append_prereserved(txn->tw.retired_pages, mp->pgno); pnl_append_prereserved(txn->tw.retired_pages, mp->pgno);
/* Update the parent page, if any, to point to the new page */ /* Update the parent page, if any, to point to the new page */
@ -233,17 +220,14 @@ __hot int page_touch_unmodifable(MDBX_txn *txn, MDBX_cursor *mc,
if (unlikely(!txn->parent)) { if (unlikely(!txn->parent)) {
ERROR("Unexpected not frozen/modifiable/spilled but shadowed %s " ERROR("Unexpected not frozen/modifiable/spilled but shadowed %s "
"page %" PRIaPGNO " mod-txnid %" PRIaTXN "," "page %" PRIaPGNO " mod-txnid %" PRIaTXN ","
" without parent transaction, current txn %" PRIaTXN " without parent transaction, current txn %" PRIaTXN " front %" PRIaTXN,
" front %" PRIaTXN, is_branch(mp) ? "branch" : "leaf", mp->pgno, mp->txnid, mc->txn->txnid, mc->txn->front_txnid);
is_branch(mp) ? "branch" : "leaf", mp->pgno, mp->txnid,
mc->txn->txnid, mc->txn->front_txnid);
rc = MDBX_PROBLEM; rc = MDBX_PROBLEM;
goto fail; goto fail;
} }
DEBUG("clone db %d page %" PRIaPGNO, cursor_dbi_dbg(mc), mp->pgno); DEBUG("clone db %d page %" PRIaPGNO, cursor_dbi_dbg(mc), mp->pgno);
tASSERT(txn, tASSERT(txn, txn->tw.dirtylist->length <= PAGELIST_LIMIT + MDBX_PNL_GRANULATE);
txn->tw.dirtylist->length <= PAGELIST_LIMIT + MDBX_PNL_GRANULATE);
/* No - copy it */ /* No - copy it */
np = page_shadow_alloc(txn, 1); np = page_shadow_alloc(txn, 1);
if (unlikely(!np)) { if (unlikely(!np)) {
@ -299,8 +283,7 @@ page_t *page_shadow_alloc(MDBX_txn *txn, size_t num) {
if (likely(num == 1 && np)) { if (likely(num == 1 && np)) {
eASSERT(env, env->shadow_reserve_len > 0); eASSERT(env, env->shadow_reserve_len > 0);
MDBX_ASAN_UNPOISON_MEMORY_REGION(np, size); MDBX_ASAN_UNPOISON_MEMORY_REGION(np, size);
VALGRIND_MEMPOOL_ALLOC(env, ptr_disp(np, -(ptrdiff_t)sizeof(size_t)), VALGRIND_MEMPOOL_ALLOC(env, ptr_disp(np, -(ptrdiff_t)sizeof(size_t)), size + sizeof(size_t));
size + sizeof(size_t));
VALGRIND_MAKE_MEM_DEFINED(&page_next(np), sizeof(page_t *)); VALGRIND_MAKE_MEM_DEFINED(&page_next(np), sizeof(page_t *));
env->shadow_reserve = page_next(np); env->shadow_reserve = page_next(np);
env->shadow_reserve_len -= 1; env->shadow_reserve_len -= 1;
@ -338,8 +321,7 @@ void page_shadow_release(MDBX_env *env, page_t *dp, size_t npages) {
MDBX_ASAN_UNPOISON_MEMORY_REGION(dp, pgno2bytes(env, npages)); MDBX_ASAN_UNPOISON_MEMORY_REGION(dp, pgno2bytes(env, npages));
if (unlikely(env->flags & MDBX_PAGEPERTURB)) if (unlikely(env->flags & MDBX_PAGEPERTURB))
memset(dp, -1, pgno2bytes(env, npages)); memset(dp, -1, pgno2bytes(env, npages));
if (likely(npages == 1 && if (likely(npages == 1 && env->shadow_reserve_len < env->options.dp_reserve_limit)) {
env->shadow_reserve_len < env->options.dp_reserve_limit)) {
MDBX_ASAN_POISON_MEMORY_REGION(dp, env->ps); MDBX_ASAN_POISON_MEMORY_REGION(dp, env->ps);
MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(dp), sizeof(page_t *)); MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(dp), sizeof(page_t *));
page_next(dp) = env->shadow_reserve; page_next(dp) = env->shadow_reserve;
@ -354,8 +336,7 @@ void page_shadow_release(MDBX_env *env, page_t *dp, size_t npages) {
} }
} }
__cold static void page_kill(MDBX_txn *txn, page_t *mp, pgno_t pgno, __cold static void page_kill(MDBX_txn *txn, page_t *mp, pgno_t pgno, size_t npages) {
size_t npages) {
MDBX_env *const env = txn->env; MDBX_env *const env = txn->env;
DEBUG("kill %zu page(s) %" PRIaPGNO, npages, pgno); DEBUG("kill %zu page(s) %" PRIaPGNO, npages, pgno);
eASSERT(env, pgno >= NUM_METAS && npages); eASSERT(env, pgno >= NUM_METAS && npages);
@ -391,8 +372,7 @@ static inline bool suitable4loose(const MDBX_txn *txn, pgno_t pgno) {
return txn->tw.loose_count < txn->env->options.dp_loose_limit && return txn->tw.loose_count < txn->env->options.dp_loose_limit &&
(!MDBX_ENABLE_REFUND || (!MDBX_ENABLE_REFUND ||
/* skip pages near to the end in favor of compactification */ /* skip pages near to the end in favor of compactification */
txn->geo.first_unallocated > txn->geo.first_unallocated > pgno + txn->env->options.dp_loose_limit ||
pgno + txn->env->options.dp_loose_limit ||
txn->geo.first_unallocated <= txn->env->options.dp_loose_limit); txn->geo.first_unallocated <= txn->env->options.dp_loose_limit);
} }
@ -404,8 +384,7 @@ static inline bool suitable4loose(const MDBX_txn *txn, pgno_t pgno) {
* *
* If the page wasn't dirtied in this txn, just add it * If the page wasn't dirtied in this txn, just add it
* to this txn's free list. */ * to this txn's free list. */
int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno, int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno, page_t *mp /* maybe null */,
page_t *mp /* maybe null */,
unsigned pageflags /* maybe unknown/zero */) { unsigned pageflags /* maybe unknown/zero */) {
int rc; int rc;
MDBX_txn *const txn = mc->txn; MDBX_txn *const txn = mc->txn;
@ -423,13 +402,7 @@ int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno,
* So for flexibility and avoid extra internal dependencies we just * So for flexibility and avoid extra internal dependencies we just
* fallback to reading if dirty list was not allocated yet. */ * fallback to reading if dirty list was not allocated yet. */
size_t di = 0, si = 0, npages = 1; size_t di = 0, si = 0, npages = 1;
enum page_status { enum page_status { unknown, frozen, spilled, shadowed, modifable } status = unknown;
unknown,
frozen,
spilled,
shadowed,
modifable
} status = unknown;
if (unlikely(!mp)) { if (unlikely(!mp)) {
if (ASSERT_ENABLED() && pageflags) { if (ASSERT_ENABLED() && pageflags) {
@ -437,8 +410,7 @@ int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno,
check = page_get_any(mc, pgno, txn->front_txnid); check = page_get_any(mc, pgno, txn->front_txnid);
if (unlikely(check.err != MDBX_SUCCESS)) if (unlikely(check.err != MDBX_SUCCESS))
return check.err; return check.err;
tASSERT(txn, ((unsigned)check.page->flags & ~P_SPILLED) == tASSERT(txn, ((unsigned)check.page->flags & ~P_SPILLED) == (pageflags & ~P_FROZEN));
(pageflags & ~P_FROZEN));
tASSERT(txn, !(pageflags & P_FROZEN) || is_frozen(txn, check.page)); tASSERT(txn, !(pageflags & P_FROZEN) || is_frozen(txn, check.page));
} }
if (pageflags & P_FROZEN) { if (pageflags & P_FROZEN) {
@ -540,8 +512,7 @@ status_done:
/* Возврат страниц в нераспределенный "хвост" БД. /* Возврат страниц в нераспределенный "хвост" БД.
* Содержимое страниц не уничтожается, а для вложенных транзакций граница * Содержимое страниц не уничтожается, а для вложенных транзакций граница
* нераспределенного "хвоста" БД сдвигается только при их коммите. */ * нераспределенного "хвоста" БД сдвигается только при их коммите. */
if (MDBX_ENABLE_REFUND && if (MDBX_ENABLE_REFUND && unlikely(pgno + npages == txn->geo.first_unallocated)) {
unlikely(pgno + npages == txn->geo.first_unallocated)) {
const char *kind = nullptr; const char *kind = nullptr;
if (status == modifable) { if (status == modifable) {
/* Страница испачкана в этой транзакции, но до этого могла быть /* Страница испачкана в этой транзакции, но до этого могла быть
@ -589,8 +560,7 @@ status_done:
if (status == modifable) { if (status == modifable) {
/* Dirty page from this transaction */ /* Dirty page from this transaction */
/* If suitable we can reuse it through loose list */ /* If suitable we can reuse it through loose list */
if (likely(npages == 1 && suitable4loose(txn, pgno)) && if (likely(npages == 1 && suitable4loose(txn, pgno)) && (di || !txn->tw.dirtylist)) {
(di || !txn->tw.dirtylist)) {
DEBUG("loosen dirty page %" PRIaPGNO, pgno); DEBUG("loosen dirty page %" PRIaPGNO, pgno);
if (MDBX_DEBUG != 0 || unlikely(txn->env->flags & MDBX_PAGEPERTURB)) if (MDBX_DEBUG != 0 || unlikely(txn->env->flags & MDBX_PAGEPERTURB))
memset(page_data(mp), -1, txn->env->ps - PAGEHDRSZ); memset(page_data(mp), -1, txn->env->ps - PAGEHDRSZ);
@ -600,9 +570,7 @@ status_done:
txn->tw.loose_pages = mp; txn->tw.loose_pages = mp;
txn->tw.loose_count++; txn->tw.loose_count++;
#if MDBX_ENABLE_REFUND #if MDBX_ENABLE_REFUND
txn->tw.loose_refund_wl = (pgno + 2 > txn->tw.loose_refund_wl) txn->tw.loose_refund_wl = (pgno + 2 > txn->tw.loose_refund_wl) ? pgno + 2 : txn->tw.loose_refund_wl;
? pgno + 2
: txn->tw.loose_refund_wl;
#endif /* MDBX_ENABLE_REFUND */ #endif /* MDBX_ENABLE_REFUND */
VALGRIND_MAKE_MEM_NOACCESS(page_data(mp), txn->env->ps - PAGEHDRSZ); VALGRIND_MAKE_MEM_NOACCESS(page_data(mp), txn->env->ps - PAGEHDRSZ);
MDBX_ASAN_POISON_MEMORY_REGION(page_data(mp), txn->env->ps - PAGEHDRSZ); MDBX_ASAN_POISON_MEMORY_REGION(page_data(mp), txn->env->ps - PAGEHDRSZ);
@ -617,9 +585,7 @@ status_done:
* в том числе, позже выгружена и затем снова загружена и изменена. * в том числе, позже выгружена и затем снова загружена и изменена.
* В обоих случаях её нельзя затирать на диске и помечать недоступной * В обоих случаях её нельзя затирать на диске и помечать недоступной
* в asan и/или valgrind */ * в asan и/или valgrind */
for (MDBX_txn *parent = txn->parent; for (MDBX_txn *parent = txn->parent; parent && (parent->flags & MDBX_TXN_SPILLS); parent = parent->parent) {
parent && (parent->flags & MDBX_TXN_SPILLS);
parent = parent->parent) {
if (spill_intersect(parent, pgno, npages)) if (spill_intersect(parent, pgno, npages))
goto skip_invalidate; goto skip_invalidate;
if (dpl_intersect(parent, pgno, npages)) if (dpl_intersect(parent, pgno, npages))
@ -631,11 +597,8 @@ status_done:
#endif #endif
page_kill(txn, mp, pgno, npages); page_kill(txn, mp, pgno, npages);
if ((txn->flags & MDBX_WRITEMAP) == 0) { if ((txn->flags & MDBX_WRITEMAP) == 0) {
VALGRIND_MAKE_MEM_NOACCESS(page_data(pgno2page(txn->env, pgno)), VALGRIND_MAKE_MEM_NOACCESS(page_data(pgno2page(txn->env, pgno)), pgno2bytes(txn->env, npages) - PAGEHDRSZ);
pgno2bytes(txn->env, npages) - PAGEHDRSZ); MDBX_ASAN_POISON_MEMORY_REGION(page_data(pgno2page(txn->env, pgno)), pgno2bytes(txn->env, npages) - PAGEHDRSZ);
MDBX_ASAN_POISON_MEMORY_REGION(page_data(pgno2page(txn->env, pgno)),
pgno2bytes(txn->env, npages) -
PAGEHDRSZ);
} }
} }
skip_invalidate: skip_invalidate:
@ -646,9 +609,7 @@ status_done:
reclaim: reclaim:
DEBUG("reclaim %zu %s page %" PRIaPGNO, npages, "dirty", pgno); DEBUG("reclaim %zu %s page %" PRIaPGNO, npages, "dirty", pgno);
rc = pnl_insert_span(&txn->tw.relist, pgno, npages); rc = pnl_insert_span(&txn->tw.relist, pgno, npages);
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated -
MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
return rc; return rc;
} }
@ -675,8 +636,7 @@ status_done:
if (ASSERT_ENABLED()) { if (ASSERT_ENABLED()) {
const page_t *parent_dp = nullptr; const page_t *parent_dp = nullptr;
/* Check parent(s)'s dirty lists. */ /* Check parent(s)'s dirty lists. */
for (MDBX_txn *parent = txn->parent; parent && !parent_dp; for (MDBX_txn *parent = txn->parent; parent && !parent_dp; parent = parent->parent) {
parent = parent->parent) {
tASSERT(txn, !spill_search(parent, pgno)); tASSERT(txn, !spill_search(parent, pgno));
parent_dp = debug_dpl_find(parent, pgno); parent_dp = debug_dpl_find(parent, pgno);
} }
@ -697,8 +657,7 @@ status_done:
goto retire; goto retire;
} }
__hot int __must_check_result page_dirty(MDBX_txn *txn, page_t *mp, __hot int __must_check_result page_dirty(MDBX_txn *txn, page_t *mp, size_t npages) {
size_t npages) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
mp->txnid = txn->front_txnid; mp->txnid = txn->front_txnid;
if (!txn->tw.dirtylist) { if (!txn->tw.dirtylist) {
@ -756,37 +715,27 @@ void recalculate_subpage_thresholds(MDBX_env *env) {
size_t whole = env->leaf_nodemax - NODESIZE; size_t whole = env->leaf_nodemax - NODESIZE;
env->subpage_limit = (whole * env->options.subpage.limit + 32767) >> 16; env->subpage_limit = (whole * env->options.subpage.limit + 32767) >> 16;
whole = env->subpage_limit; whole = env->subpage_limit;
env->subpage_reserve_limit = env->subpage_reserve_limit = (whole * env->options.subpage.reserve_limit + 32767) >> 16;
(whole * env->options.subpage.reserve_limit + 32767) >> 16;
eASSERT(env, env->leaf_nodemax >= env->subpage_limit + NODESIZE); eASSERT(env, env->leaf_nodemax >= env->subpage_limit + NODESIZE);
eASSERT(env, env->subpage_limit >= env->subpage_reserve_limit); eASSERT(env, env->subpage_limit >= env->subpage_reserve_limit);
whole = env->leaf_nodemax; whole = env->leaf_nodemax;
env->subpage_room_threshold = env->subpage_room_threshold = (whole * env->options.subpage.room_threshold + 32767) >> 16;
(whole * env->options.subpage.room_threshold + 32767) >> 16; env->subpage_reserve_prereq = (whole * env->options.subpage.reserve_prereq + 32767) >> 16;
env->subpage_reserve_prereq = if (env->subpage_room_threshold + env->subpage_reserve_limit > (intptr_t)page_space(env))
(whole * env->options.subpage.reserve_prereq + 32767) >> 16;
if (env->subpage_room_threshold + env->subpage_reserve_limit >
(intptr_t)page_space(env))
env->subpage_reserve_prereq = page_space(env); env->subpage_reserve_prereq = page_space(env);
else if (env->subpage_reserve_prereq < else if (env->subpage_reserve_prereq < env->subpage_room_threshold + env->subpage_reserve_limit)
env->subpage_room_threshold + env->subpage_reserve_limit) env->subpage_reserve_prereq = env->subpage_room_threshold + env->subpage_reserve_limit;
env->subpage_reserve_prereq = eASSERT(env, env->subpage_reserve_prereq > env->subpage_room_threshold + env->subpage_reserve_limit);
env->subpage_room_threshold + env->subpage_reserve_limit;
eASSERT(env, env->subpage_reserve_prereq >
env->subpage_room_threshold + env->subpage_reserve_limit);
} }
size_t page_subleaf2_reserve(const MDBX_env *env, size_t host_page_room, size_t page_subleaf2_reserve(const MDBX_env *env, size_t host_page_room, size_t subpage_len, size_t item_len) {
size_t subpage_len, size_t item_len) {
eASSERT(env, (subpage_len & 1) == 0); eASSERT(env, (subpage_len & 1) == 0);
eASSERT(env, env->leaf_nodemax >= env->subpage_limit + NODESIZE); eASSERT(env, env->leaf_nodemax >= env->subpage_limit + NODESIZE);
size_t reserve = 0; size_t reserve = 0;
for (size_t n = 0; for (size_t n = 0; n < 5 && reserve + item_len <= env->subpage_reserve_limit &&
n < 5 && reserve + item_len <= env->subpage_reserve_limit && EVEN_CEIL(subpage_len + item_len) <= env->subpage_limit &&
EVEN_CEIL(subpage_len + item_len) <= env->subpage_limit && host_page_room >= env->subpage_reserve_prereq + EVEN_CEIL(subpage_len + item_len);
host_page_room >=
env->subpage_reserve_prereq + EVEN_CEIL(subpage_len + item_len);
++n) { ++n) {
subpage_len += item_len; subpage_len += item_len;
reserve += item_len; reserve += item_len;

View File

@ -5,9 +5,7 @@
#include "essentials.h" #include "essentials.h"
MDBX_INTERNAL int __must_check_result tree_search_finalize(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result tree_search_finalize(MDBX_cursor *mc, const MDBX_val *key, int flags);
const MDBX_val *key,
int flags);
MDBX_INTERNAL int tree_search_lowest(MDBX_cursor *mc); MDBX_INTERNAL int tree_search_lowest(MDBX_cursor *mc);
enum page_search_flags { enum page_search_flags {
@ -16,64 +14,47 @@ enum page_search_flags {
Z_FIRST = 4, Z_FIRST = 4,
Z_LAST = 8, Z_LAST = 8,
}; };
MDBX_INTERNAL int __must_check_result tree_search(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result tree_search(MDBX_cursor *mc, const MDBX_val *key, int flags);
const MDBX_val *key,
int flags);
#define MDBX_SPLIT_REPLACE MDBX_APPENDDUP /* newkey is not new */ #define MDBX_SPLIT_REPLACE MDBX_APPENDDUP /* newkey is not new */
MDBX_INTERNAL int __must_check_result page_split(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result page_split(MDBX_cursor *mc, const MDBX_val *const newkey, MDBX_val *const newdata,
const MDBX_val *const newkey, pgno_t newpgno, const unsigned naf);
MDBX_val *const newdata,
pgno_t newpgno,
const unsigned naf);
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_INTERNAL int MDBX_PRINTF_ARGS(2, 3) MDBX_INTERNAL int MDBX_PRINTF_ARGS(2, 3) bad_page(const page_t *mp, const char *fmt, ...);
bad_page(const page_t *mp, const char *fmt, ...);
MDBX_INTERNAL void MDBX_PRINTF_ARGS(2, 3) MDBX_INTERNAL void MDBX_PRINTF_ARGS(2, 3) poor_page(const page_t *mp, const char *fmt, ...);
poor_page(const page_t *mp, const char *fmt, ...);
MDBX_NOTHROW_PURE_FUNCTION static inline bool is_frozen(const MDBX_txn *txn, MDBX_NOTHROW_PURE_FUNCTION static inline bool is_frozen(const MDBX_txn *txn, const page_t *mp) {
const page_t *mp) {
return mp->txnid < txn->txnid; return mp->txnid < txn->txnid;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline bool is_spilled(const MDBX_txn *txn, MDBX_NOTHROW_PURE_FUNCTION static inline bool is_spilled(const MDBX_txn *txn, const page_t *mp) {
const page_t *mp) {
return mp->txnid == txn->txnid; return mp->txnid == txn->txnid;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline bool is_shadowed(const MDBX_txn *txn, MDBX_NOTHROW_PURE_FUNCTION static inline bool is_shadowed(const MDBX_txn *txn, const page_t *mp) {
const page_t *mp) {
return mp->txnid > txn->txnid; return mp->txnid > txn->txnid;
} }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline bool is_correct(const MDBX_txn *txn, const page_t *mp) {
is_correct(const MDBX_txn *txn, const page_t *mp) {
return mp->txnid <= txn->front_txnid; return mp->txnid <= txn->front_txnid;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline bool is_modifable(const MDBX_txn *txn, MDBX_NOTHROW_PURE_FUNCTION static inline bool is_modifable(const MDBX_txn *txn, const page_t *mp) {
const page_t *mp) {
return mp->txnid == txn->front_txnid; return mp->txnid == txn->front_txnid;
} }
MDBX_INTERNAL int __must_check_result page_check(const MDBX_cursor *const mc, MDBX_INTERNAL int __must_check_result page_check(const MDBX_cursor *const mc, const page_t *const mp);
const page_t *const mp);
MDBX_INTERNAL pgr_t page_get_any(const MDBX_cursor *const mc, const pgno_t pgno, MDBX_INTERNAL pgr_t page_get_any(const MDBX_cursor *const mc, const pgno_t pgno, const txnid_t front);
const txnid_t front);
MDBX_INTERNAL pgr_t page_get_three(const MDBX_cursor *const mc, MDBX_INTERNAL pgr_t page_get_three(const MDBX_cursor *const mc, const pgno_t pgno, const txnid_t front);
const pgno_t pgno, const txnid_t front);
MDBX_INTERNAL pgr_t page_get_large(const MDBX_cursor *const mc, MDBX_INTERNAL pgr_t page_get_large(const MDBX_cursor *const mc, const pgno_t pgno, const txnid_t front);
const pgno_t pgno, const txnid_t front);
static inline int __must_check_result page_get(const MDBX_cursor *mc, static inline int __must_check_result page_get(const MDBX_cursor *mc, const pgno_t pgno, page_t **mp,
const pgno_t pgno, page_t **mp,
const txnid_t front) { const txnid_t front) {
pgr_t ret = page_get_three(mc, pgno, front); pgr_t ret = page_get_three(mc, pgno, front);
*mp = ret.page; *mp = ret.page;
@ -82,21 +63,18 @@ static inline int __must_check_result page_get(const MDBX_cursor *mc,
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
MDBX_INTERNAL int __must_check_result page_dirty(MDBX_txn *txn, page_t *mp, MDBX_INTERNAL int __must_check_result page_dirty(MDBX_txn *txn, page_t *mp, size_t npages);
size_t npages);
MDBX_INTERNAL pgr_t page_new(MDBX_cursor *mc, const unsigned flags); MDBX_INTERNAL pgr_t page_new(MDBX_cursor *mc, const unsigned flags);
MDBX_INTERNAL pgr_t page_new_large(MDBX_cursor *mc, const size_t npages); MDBX_INTERNAL pgr_t page_new_large(MDBX_cursor *mc, const size_t npages);
MDBX_INTERNAL int page_touch_modifable(MDBX_txn *txn, const page_t *const mp); MDBX_INTERNAL int page_touch_modifable(MDBX_txn *txn, const page_t *const mp);
MDBX_INTERNAL int page_touch_unmodifable(MDBX_txn *txn, MDBX_cursor *mc, MDBX_INTERNAL int page_touch_unmodifable(MDBX_txn *txn, MDBX_cursor *mc, const page_t *const mp);
const page_t *const mp);
static inline int page_touch(MDBX_cursor *mc) { static inline int page_touch(MDBX_cursor *mc) {
page_t *const mp = mc->pg[mc->top]; page_t *const mp = mc->pg[mc->top];
MDBX_txn *txn = mc->txn; MDBX_txn *txn = mc->txn;
tASSERT(txn, mc->txn->flags & MDBX_TXN_DIRTY); tASSERT(txn, mc->txn->flags & MDBX_TXN_DIRTY);
tASSERT(txn, tASSERT(txn, F_ISSET(*cursor_dbi_state(mc), DBI_LINDO | DBI_VALID | DBI_DIRTY));
F_ISSET(*cursor_dbi_state(mc), DBI_LINDO | DBI_VALID | DBI_DIRTY));
tASSERT(txn, !is_largepage(mp)); tASSERT(txn, !is_largepage(mp));
if (ASSERT_ENABLED()) { if (ASSERT_ENABLED()) {
if (mc->flags & z_inner) { if (mc->flags & z_inner) {
@ -119,40 +97,31 @@ static inline int page_touch(MDBX_cursor *mc) {
return page_touch_unmodifable(txn, mc, mp); return page_touch_unmodifable(txn, mc, mp);
} }
MDBX_INTERNAL void page_copy(page_t *const dst, const page_t *const src, MDBX_INTERNAL void page_copy(page_t *const dst, const page_t *const src, const size_t size);
const size_t size); MDBX_INTERNAL pgr_t __must_check_result page_unspill(MDBX_txn *const txn, const page_t *const mp);
MDBX_INTERNAL pgr_t __must_check_result page_unspill(MDBX_txn *const txn,
const page_t *const mp);
MDBX_INTERNAL page_t *page_shadow_alloc(MDBX_txn *txn, size_t num); MDBX_INTERNAL page_t *page_shadow_alloc(MDBX_txn *txn, size_t num);
MDBX_INTERNAL void page_shadow_release(MDBX_env *env, page_t *dp, MDBX_INTERNAL void page_shadow_release(MDBX_env *env, page_t *dp, size_t npages);
size_t npages);
MDBX_INTERNAL int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno, MDBX_INTERNAL int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno, page_t *mp /* maybe null */,
page_t *mp /* maybe null */,
unsigned pageflags /* maybe unknown/zero */); unsigned pageflags /* maybe unknown/zero */);
static inline int page_retire(MDBX_cursor *mc, page_t *mp) { static inline int page_retire(MDBX_cursor *mc, page_t *mp) { return page_retire_ex(mc, mp->pgno, mp, mp->flags); }
return page_retire_ex(mc, mp->pgno, mp, mp->flags);
}
static inline void page_wash(MDBX_txn *txn, size_t di, page_t *const mp, static inline void page_wash(MDBX_txn *txn, size_t di, page_t *const mp, const size_t npages) {
const size_t npages) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
mp->txnid = INVALID_TXNID; mp->txnid = INVALID_TXNID;
mp->flags = P_BAD; mp->flags = P_BAD;
if (txn->tw.dirtylist) { if (txn->tw.dirtylist) {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
tASSERT(txn, tASSERT(txn, MDBX_AVOID_MSYNC || (di && txn->tw.dirtylist->items[di].ptr == mp));
MDBX_AVOID_MSYNC || (di && txn->tw.dirtylist->items[di].ptr == mp));
if (!MDBX_AVOID_MSYNC || di) { if (!MDBX_AVOID_MSYNC || di) {
dpl_remove_ex(txn, di, npages); dpl_remove_ex(txn, di, npages);
txn->tw.dirtyroom++; txn->tw.dirtyroom++;
tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length == tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length ==
(txn->parent ? txn->parent->tw.dirtyroom (txn->parent ? txn->parent->tw.dirtyroom : txn->env->options.dp_limit));
: txn->env->options.dp_limit));
if (!MDBX_AVOID_MSYNC || !(txn->flags & MDBX_WRITEMAP)) { if (!MDBX_AVOID_MSYNC || !(txn->flags & MDBX_WRITEMAP)) {
page_shadow_release(txn->env, mp, npages); page_shadow_release(txn->env, mp, npages);
return; return;
@ -160,20 +129,14 @@ static inline void page_wash(MDBX_txn *txn, size_t di, page_t *const mp,
} }
} else { } else {
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) && !MDBX_AVOID_MSYNC && !di); tASSERT(txn, (txn->flags & MDBX_WRITEMAP) && !MDBX_AVOID_MSYNC && !di);
txn->tw.writemap_dirty_npages -= (txn->tw.writemap_dirty_npages > npages) txn->tw.writemap_dirty_npages -= (txn->tw.writemap_dirty_npages > npages) ? npages : txn->tw.writemap_dirty_npages;
? npages
: txn->tw.writemap_dirty_npages;
} }
VALGRIND_MAKE_MEM_UNDEFINED(mp, PAGEHDRSZ); VALGRIND_MAKE_MEM_UNDEFINED(mp, PAGEHDRSZ);
VALGRIND_MAKE_MEM_NOACCESS(page_data(mp), VALGRIND_MAKE_MEM_NOACCESS(page_data(mp), pgno2bytes(txn->env, npages) - PAGEHDRSZ);
pgno2bytes(txn->env, npages) - PAGEHDRSZ); MDBX_ASAN_POISON_MEMORY_REGION(page_data(mp), pgno2bytes(txn->env, npages) - PAGEHDRSZ);
MDBX_ASAN_POISON_MEMORY_REGION(page_data(mp),
pgno2bytes(txn->env, npages) - PAGEHDRSZ);
} }
MDBX_INTERNAL size_t page_subleaf2_reserve(const MDBX_env *env, MDBX_INTERNAL size_t page_subleaf2_reserve(const MDBX_env *env, size_t host_page_room, size_t subpage_len,
size_t host_page_room, size_t item_len);
size_t subpage_len, size_t item_len);
#define page_next(mp) \ #define page_next(mp) (*(page_t **)ptr_disp((mp)->entries, sizeof(void *) - sizeof(uint32_t)))
(*(page_t **)ptr_disp((mp)->entries, sizeof(void *) - sizeof(uint32_t)))

View File

@ -54,14 +54,11 @@ __hot int tree_search(MDBX_cursor *mc, const MDBX_val *key, int flags) {
cASSERT(mc, root >= NUM_METAS && root < mc->txn->geo.first_unallocated); cASSERT(mc, root >= NUM_METAS && root < mc->txn->geo.first_unallocated);
if (mc->top < 0 || mc->pg[0]->pgno != root) { if (mc->top < 0 || mc->pg[0]->pgno != root) {
txnid_t pp_txnid = mc->tree->mod_txnid; txnid_t pp_txnid = mc->tree->mod_txnid;
pp_txnid = /* tree->mod_txnid maybe zero in a legacy DB */ pp_txnid pp_txnid = /* tree->mod_txnid maybe zero in a legacy DB */ pp_txnid ? pp_txnid : mc->txn->txnid;
? pp_txnid
: mc->txn->txnid;
if ((mc->txn->flags & MDBX_TXN_RDONLY) == 0) { if ((mc->txn->flags & MDBX_TXN_RDONLY) == 0) {
MDBX_txn *scan = mc->txn; MDBX_txn *scan = mc->txn;
do do
if ((scan->flags & MDBX_TXN_DIRTY) && if ((scan->flags & MDBX_TXN_DIRTY) && (dbi == MAIN_DBI || (scan->dbi_state[dbi] & DBI_DIRTY))) {
(dbi == MAIN_DBI || (scan->dbi_state[dbi] & DBI_DIRTY))) {
/* После коммита вложенных тразакций может быть mod_txnid > front */ /* После коммита вложенных тразакций может быть mod_txnid > front */
pp_txnid = scan->front_txnid; pp_txnid = scan->front_txnid;
break; break;
@ -75,8 +72,7 @@ __hot int tree_search(MDBX_cursor *mc, const MDBX_val *key, int flags) {
mc->top = 0; mc->top = 0;
mc->ki[0] = (flags & Z_LAST) ? page_numkeys(mc->pg[0]) - 1 : 0; mc->ki[0] = (flags & Z_LAST) ? page_numkeys(mc->pg[0]) - 1 : 0;
DEBUG("db %d root page %" PRIaPGNO " has flags 0x%X", cursor_dbi_dbg(mc), DEBUG("db %d root page %" PRIaPGNO " has flags 0x%X", cursor_dbi_dbg(mc), root, mc->pg[0]->flags);
root, mc->pg[0]->flags);
if (flags & Z_MODIFY) { if (flags & Z_MODIFY) {
err = page_touch(mc); err = page_touch(mc);
@ -90,8 +86,7 @@ __hot int tree_search(MDBX_cursor *mc, const MDBX_val *key, int flags) {
return tree_search_finalize(mc, key, flags); return tree_search_finalize(mc, key, flags);
} }
__hot __noinline int tree_search_finalize(MDBX_cursor *mc, const MDBX_val *key, __hot __noinline int tree_search_finalize(MDBX_cursor *mc, const MDBX_val *key, int flags) {
int flags) {
cASSERT(mc, !is_poor(mc)); cASSERT(mc, !is_poor(mc));
DKBUF_DEBUG; DKBUF_DEBUG;
int err; int err;
@ -128,16 +123,14 @@ __hot __noinline int tree_search_finalize(MDBX_cursor *mc, const MDBX_val *key,
} }
if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) { if (!MDBX_DISABLE_VALIDATION && unlikely(!check_leaf_type(mc, mp))) {
ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", ERROR("unexpected leaf-page #%" PRIaPGNO " type 0x%x seen by cursor", mp->pgno, mp->flags);
mp->pgno, mp->flags);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
bailout: bailout:
be_poor(mc); be_poor(mc);
return err; return err;
} }
DEBUG("found leaf page %" PRIaPGNO " for key [%s]", mp->pgno, DEBUG("found leaf page %" PRIaPGNO " for key [%s]", mp->pgno, DKEY_DEBUG(key));
DKEY_DEBUG(key));
/* Логически верно, но (в текущем понимании) нет необходимости. /* Логически верно, но (в текущем понимании) нет необходимости.
Однако, стоит ещё по-проверять/по-тестировать. Однако, стоит ещё по-проверять/по-тестировать.
Возможно есть сценарий, в котором очистка флагов всё-таки требуется. Возможно есть сценарий, в котором очистка флагов всё-таки требуется.

View File

@ -25,14 +25,11 @@ MDBX_INTERNAL void pnl_free(pnl_t pnl) {
MDBX_INTERNAL void pnl_shrink(pnl_t __restrict *__restrict ppnl) { MDBX_INTERNAL void pnl_shrink(pnl_t __restrict *__restrict ppnl) {
assert(pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) >= MDBX_PNL_INITIAL && assert(pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) >= MDBX_PNL_INITIAL &&
pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) < pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) < MDBX_PNL_INITIAL * 3 / 2);
MDBX_PNL_INITIAL * 3 / 2); assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT &&
MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
MDBX_PNL_SETSIZE(*ppnl, 0); MDBX_PNL_SETSIZE(*ppnl, 0);
if (unlikely(MDBX_PNL_ALLOCLEN(*ppnl) > if (unlikely(MDBX_PNL_ALLOCLEN(*ppnl) >
MDBX_PNL_INITIAL * (MDBX_PNL_PREALLOC_FOR_RADIXSORT ? 8 : 4) - MDBX_PNL_INITIAL * (MDBX_PNL_PREALLOC_FOR_RADIXSORT ? 8 : 4) - MDBX_CACHELINE_SIZE / sizeof(pgno_t))) {
MDBX_CACHELINE_SIZE / sizeof(pgno_t))) {
size_t bytes = pnl_size2bytes(MDBX_PNL_INITIAL * 2); size_t bytes = pnl_size2bytes(MDBX_PNL_INITIAL * 2);
pnl_t pnl = osal_realloc(*ppnl - 1, bytes); pnl_t pnl = osal_realloc(*ppnl - 1, bytes);
if (likely(pnl)) { if (likely(pnl)) {
@ -45,11 +42,9 @@ MDBX_INTERNAL void pnl_shrink(pnl_t __restrict *__restrict ppnl) {
} }
} }
MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl, MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl, const size_t wanna) {
const size_t wanna) {
const size_t allocated = MDBX_PNL_ALLOCLEN(*ppnl); const size_t allocated = MDBX_PNL_ALLOCLEN(*ppnl);
assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
if (likely(allocated >= wanna)) if (likely(allocated >= wanna))
return MDBX_SUCCESS; return MDBX_SUCCESS;
@ -58,9 +53,7 @@ MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl,
return MDBX_TXN_FULL; return MDBX_TXN_FULL;
} }
const size_t size = (wanna + wanna - allocated < PAGELIST_LIMIT) const size_t size = (wanna + wanna - allocated < PAGELIST_LIMIT) ? wanna + wanna - allocated : PAGELIST_LIMIT;
? wanna + wanna - allocated
: PAGELIST_LIMIT;
size_t bytes = pnl_size2bytes(size); size_t bytes = pnl_size2bytes(size);
pnl_t pnl = osal_realloc(*ppnl - 1, bytes); pnl_t pnl = osal_realloc(*ppnl - 1, bytes);
if (likely(pnl)) { if (likely(pnl)) {
@ -75,8 +68,8 @@ MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl,
return MDBX_ENOMEM; return MDBX_ENOMEM;
} }
static __always_inline int __must_check_result pnl_append_stepped( static __always_inline int __must_check_result pnl_append_stepped(unsigned step, __restrict pnl_t *ppnl, pgno_t pgno,
unsigned step, __restrict pnl_t *ppnl, pgno_t pgno, size_t n) { size_t n) {
assert(n > 0); assert(n > 0);
int rc = pnl_need(ppnl, n); int rc = pnl_need(ppnl, n);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -106,18 +99,15 @@ static __always_inline int __must_check_result pnl_append_stepped(
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot MDBX_INTERNAL int __must_check_result __hot MDBX_INTERNAL int __must_check_result spill_append_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
spill_append_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
return pnl_append_stepped(2, ppnl, pgno << 1, n); return pnl_append_stepped(2, ppnl, pgno << 1, n);
} }
__hot MDBX_INTERNAL int __must_check_result __hot MDBX_INTERNAL int __must_check_result pnl_append_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
pnl_append_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
return pnl_append_stepped(1, ppnl, pgno, n); return pnl_append_stepped(1, ppnl, pgno, n);
} }
__hot MDBX_INTERNAL int __must_check_result __hot MDBX_INTERNAL int __must_check_result pnl_insert_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
pnl_insert_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
assert(n > 0); assert(n > 0);
int rc = pnl_need(ppnl, n); int rc = pnl_need(ppnl, n);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -135,8 +125,7 @@ pnl_insert_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n) {
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot __noinline MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl, __hot __noinline MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl, const size_t limit) {
const size_t limit) {
assert(limit >= MIN_PAGENO - MDBX_ENABLE_REFUND); assert(limit >= MIN_PAGENO - MDBX_ENABLE_REFUND);
if (likely(MDBX_PNL_GETSIZE(pnl))) { if (likely(MDBX_PNL_GETSIZE(pnl))) {
if (unlikely(MDBX_PNL_GETSIZE(pnl) > PAGELIST_LIMIT)) if (unlikely(MDBX_PNL_GETSIZE(pnl) > PAGELIST_LIMIT))
@ -146,8 +135,7 @@ __hot __noinline MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl,
if (unlikely(MDBX_PNL_MOST(pnl) >= limit)) if (unlikely(MDBX_PNL_MOST(pnl) >= limit))
return false; return false;
if ((!MDBX_DISABLE_VALIDATION || AUDIT_ENABLED()) && if ((!MDBX_DISABLE_VALIDATION || AUDIT_ENABLED()) && likely(MDBX_PNL_GETSIZE(pnl) > 1)) {
likely(MDBX_PNL_GETSIZE(pnl) > 1)) {
const pgno_t *scan = MDBX_PNL_BEGIN(pnl); const pgno_t *scan = MDBX_PNL_BEGIN(pnl);
const pgno_t *const end = MDBX_PNL_END(pnl); const pgno_t *const end = MDBX_PNL_END(pnl);
pgno_t prev = *scan++; pgno_t prev = *scan++;
@ -161,10 +149,9 @@ __hot __noinline MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl,
return true; return true;
} }
static __always_inline void static __always_inline void pnl_merge_inner(pgno_t *__restrict dst, const pgno_t *__restrict src_a,
pnl_merge_inner(pgno_t *__restrict dst, const pgno_t *__restrict src_a, const pgno_t *__restrict src_b,
const pgno_t *__restrict src_b, const pgno_t *__restrict const src_b_detent) {
const pgno_t *__restrict const src_b_detent) {
do { do {
#if MDBX_HAVE_CMOV #if MDBX_HAVE_CMOV
const bool flag = MDBX_PNL_ORDERED(*src_b, *src_a); const bool flag = MDBX_PNL_ORDERED(*src_b, *src_a);
@ -203,14 +190,11 @@ __hot MDBX_INTERNAL size_t pnl_merge(pnl_t dst, const pnl_t src) {
total += src_len; total += src_len;
if (!MDBX_DEBUG && total < (MDBX_HAVE_CMOV ? 21 : 12)) if (!MDBX_DEBUG && total < (MDBX_HAVE_CMOV ? 21 : 12))
goto avoid_call_libc_for_short_cases; goto avoid_call_libc_for_short_cases;
if (dst_len == 0 || if (dst_len == 0 || MDBX_PNL_ORDERED(MDBX_PNL_LAST(dst), MDBX_PNL_FIRST(src)))
MDBX_PNL_ORDERED(MDBX_PNL_LAST(dst), MDBX_PNL_FIRST(src)))
memcpy(MDBX_PNL_END(dst), MDBX_PNL_BEGIN(src), src_len * sizeof(pgno_t)); memcpy(MDBX_PNL_END(dst), MDBX_PNL_BEGIN(src), src_len * sizeof(pgno_t));
else if (MDBX_PNL_ORDERED(MDBX_PNL_LAST(src), MDBX_PNL_FIRST(dst))) { else if (MDBX_PNL_ORDERED(MDBX_PNL_LAST(src), MDBX_PNL_FIRST(dst))) {
memmove(MDBX_PNL_BEGIN(dst) + src_len, MDBX_PNL_BEGIN(dst), memmove(MDBX_PNL_BEGIN(dst) + src_len, MDBX_PNL_BEGIN(dst), dst_len * sizeof(pgno_t));
dst_len * sizeof(pgno_t)); memcpy(MDBX_PNL_BEGIN(dst), MDBX_PNL_BEGIN(src), src_len * sizeof(pgno_t));
memcpy(MDBX_PNL_BEGIN(dst), MDBX_PNL_BEGIN(src),
src_len * sizeof(pgno_t));
} else { } else {
avoid_call_libc_for_short_cases: avoid_call_libc_for_short_cases:
dst[0] = /* the detent */ (MDBX_PNL_ASCENDING ? 0 : P_INVALID); dst[0] = /* the detent */ (MDBX_PNL_ASCENDING ? 0 : P_INVALID);
@ -227,8 +211,7 @@ __hot MDBX_INTERNAL size_t pnl_merge(pnl_t dst, const pnl_t src) {
#else #else
#define MDBX_PNL_EXTRACT_KEY(ptr) (P_INVALID - *(ptr)) #define MDBX_PNL_EXTRACT_KEY(ptr) (P_INVALID - *(ptr))
#endif #endif
RADIXSORT_IMPL(pgno, pgno_t, MDBX_PNL_EXTRACT_KEY, RADIXSORT_IMPL(pgno, pgno_t, MDBX_PNL_EXTRACT_KEY, MDBX_PNL_PREALLOC_FOR_RADIXSORT, 0)
MDBX_PNL_PREALLOC_FOR_RADIXSORT, 0)
SORT_IMPL(pgno_sort, false, pgno_t, MDBX_PNL_ORDERED) SORT_IMPL(pgno_sort, false, pgno_t, MDBX_PNL_ORDERED)
@ -240,8 +223,7 @@ __hot __noinline MDBX_INTERNAL void pnl_sort_nochk(pnl_t pnl) {
SEARCH_IMPL(pgno_bsearch, pgno_t, pgno_t, MDBX_PNL_ORDERED) SEARCH_IMPL(pgno_bsearch, pgno_t, pgno_t, MDBX_PNL_ORDERED)
__hot __noinline MDBX_INTERNAL size_t pnl_search_nochk(const pnl_t pnl, __hot __noinline MDBX_INTERNAL size_t pnl_search_nochk(const pnl_t pnl, pgno_t pgno) {
pgno_t pgno) {
const pgno_t *begin = MDBX_PNL_BEGIN(pnl); const pgno_t *begin = MDBX_PNL_BEGIN(pnl);
const pgno_t *it = pgno_bsearch(begin, MDBX_PNL_GETSIZE(pnl), pgno); const pgno_t *it = pgno_bsearch(begin, MDBX_PNL_GETSIZE(pnl), pgno);
const pgno_t *end = begin + MDBX_PNL_GETSIZE(pnl); const pgno_t *end = begin + MDBX_PNL_GETSIZE(pnl);

View File

@ -26,16 +26,15 @@ typedef const pgno_t *const_pnl_t;
#define MDBX_PNL_GRANULATE_LOG2 10 #define MDBX_PNL_GRANULATE_LOG2 10
#define MDBX_PNL_GRANULATE (1 << MDBX_PNL_GRANULATE_LOG2) #define MDBX_PNL_GRANULATE (1 << MDBX_PNL_GRANULATE_LOG2)
#define MDBX_PNL_INITIAL \ #define MDBX_PNL_INITIAL (MDBX_PNL_GRANULATE - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(pgno_t))
(MDBX_PNL_GRANULATE - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(pgno_t))
#define MDBX_PNL_ALLOCLEN(pl) ((pl)[-1]) #define MDBX_PNL_ALLOCLEN(pl) ((pl)[-1])
#define MDBX_PNL_GETSIZE(pl) ((size_t)((pl)[0])) #define MDBX_PNL_GETSIZE(pl) ((size_t)((pl)[0]))
#define MDBX_PNL_SETSIZE(pl, size) \ #define MDBX_PNL_SETSIZE(pl, size) \
do { \ do { \
const size_t __size = size; \ const size_t __size = size; \
assert(__size < INT_MAX); \ assert(__size < INT_MAX); \
(pl)[0] = (pgno_t)__size; \ (pl)[0] = (pgno_t)__size; \
} while (0) } while (0)
#define MDBX_PNL_FIRST(pl) ((pl)[1]) #define MDBX_PNL_FIRST(pl) ((pl)[1])
#define MDBX_PNL_LAST(pl) ((pl)[MDBX_PNL_GETSIZE(pl)]) #define MDBX_PNL_LAST(pl) ((pl)[MDBX_PNL_GETSIZE(pl)])
@ -62,13 +61,10 @@ MDBX_MAYBE_UNUSED static inline size_t pnl_size2bytes(size_t size) {
size += size; size += size;
#endif /* MDBX_PNL_PREALLOC_FOR_RADIXSORT */ #endif /* MDBX_PNL_PREALLOC_FOR_RADIXSORT */
STATIC_ASSERT(MDBX_ASSUME_MALLOC_OVERHEAD + STATIC_ASSERT(MDBX_ASSUME_MALLOC_OVERHEAD +
(PAGELIST_LIMIT * (MDBX_PNL_PREALLOC_FOR_RADIXSORT + 1) + (PAGELIST_LIMIT * (MDBX_PNL_PREALLOC_FOR_RADIXSORT + 1) + MDBX_PNL_GRANULATE + 3) * sizeof(pgno_t) <
MDBX_PNL_GRANULATE + 3) *
sizeof(pgno_t) <
SIZE_MAX / 4 * 3); SIZE_MAX / 4 * 3);
size_t bytes = size_t bytes =
ceil_powerof2(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(pgno_t) * (size + 3), ceil_powerof2(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(pgno_t) * (size + 3), MDBX_PNL_GRANULATE * sizeof(pgno_t)) -
MDBX_PNL_GRANULATE * sizeof(pgno_t)) -
MDBX_ASSUME_MALLOC_OVERHEAD; MDBX_ASSUME_MALLOC_OVERHEAD;
return bytes; return bytes;
} }
@ -87,21 +83,16 @@ MDBX_INTERNAL pnl_t pnl_alloc(size_t size);
MDBX_INTERNAL void pnl_free(pnl_t pnl); MDBX_INTERNAL void pnl_free(pnl_t pnl);
MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl, MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl, const size_t wanna);
const size_t wanna);
MDBX_MAYBE_UNUSED static inline int __must_check_result MDBX_MAYBE_UNUSED static inline int __must_check_result pnl_need(pnl_t __restrict *__restrict ppnl, size_t num) {
pnl_need(pnl_t __restrict *__restrict ppnl, size_t num) { assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT &&
MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
assert(num <= PAGELIST_LIMIT); assert(num <= PAGELIST_LIMIT);
const size_t wanna = MDBX_PNL_GETSIZE(*ppnl) + num; const size_t wanna = MDBX_PNL_GETSIZE(*ppnl) + num;
return likely(MDBX_PNL_ALLOCLEN(*ppnl) >= wanna) ? MDBX_SUCCESS return likely(MDBX_PNL_ALLOCLEN(*ppnl) >= wanna) ? MDBX_SUCCESS : pnl_reserve(ppnl, wanna);
: pnl_reserve(ppnl, wanna);
} }
MDBX_MAYBE_UNUSED static inline void MDBX_MAYBE_UNUSED static inline void pnl_append_prereserved(__restrict pnl_t pnl, pgno_t pgno) {
pnl_append_prereserved(__restrict pnl_t pnl, pgno_t pgno) {
assert(MDBX_PNL_GETSIZE(pnl) < MDBX_PNL_ALLOCLEN(pnl)); assert(MDBX_PNL_GETSIZE(pnl) < MDBX_PNL_ALLOCLEN(pnl));
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
for (size_t i = MDBX_PNL_GETSIZE(pnl); i > 0; --i) for (size_t i = MDBX_PNL_GETSIZE(pnl); i > 0; --i)
@ -113,14 +104,11 @@ pnl_append_prereserved(__restrict pnl_t pnl, pgno_t pgno) {
MDBX_INTERNAL void pnl_shrink(pnl_t __restrict *__restrict ppnl); MDBX_INTERNAL void pnl_shrink(pnl_t __restrict *__restrict ppnl);
MDBX_INTERNAL int __must_check_result spill_append_span(__restrict pnl_t *ppnl, MDBX_INTERNAL int __must_check_result spill_append_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n);
pgno_t pgno, size_t n);
MDBX_INTERNAL int __must_check_result pnl_append_span(__restrict pnl_t *ppnl, MDBX_INTERNAL int __must_check_result pnl_append_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n);
pgno_t pgno, size_t n);
MDBX_INTERNAL int __must_check_result pnl_insert_span(__restrict pnl_t *ppnl, MDBX_INTERNAL int __must_check_result pnl_insert_span(__restrict pnl_t *ppnl, pgno_t pgno, size_t n);
pgno_t pgno, size_t n);
MDBX_INTERNAL size_t pnl_search_nochk(const pnl_t pnl, pgno_t pgno); MDBX_INTERNAL size_t pnl_search_nochk(const pnl_t pnl, pgno_t pgno);
@ -128,10 +116,8 @@ MDBX_INTERNAL void pnl_sort_nochk(pnl_t pnl);
MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl, const size_t limit); MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl, const size_t limit);
MDBX_MAYBE_UNUSED static inline bool pnl_check_allocated(const const_pnl_t pnl, MDBX_MAYBE_UNUSED static inline bool pnl_check_allocated(const const_pnl_t pnl, const size_t limit) {
const size_t limit) { return pnl == nullptr || (MDBX_PNL_ALLOCLEN(pnl) >= MDBX_PNL_GETSIZE(pnl) && pnl_check(pnl, limit));
return pnl == nullptr || (MDBX_PNL_ALLOCLEN(pnl) >= MDBX_PNL_GETSIZE(pnl) &&
pnl_check(pnl, limit));
} }
MDBX_MAYBE_UNUSED static inline void pnl_sort(pnl_t pnl, size_t limit4check) { MDBX_MAYBE_UNUSED static inline void pnl_sort(pnl_t pnl, size_t limit4check) {
@ -140,8 +126,7 @@ MDBX_MAYBE_UNUSED static inline void pnl_sort(pnl_t pnl, size_t limit4check) {
(void)limit4check; (void)limit4check;
} }
MDBX_MAYBE_UNUSED static inline size_t pnl_search(const pnl_t pnl, pgno_t pgno, MDBX_MAYBE_UNUSED static inline size_t pnl_search(const pnl_t pnl, pgno_t pgno, size_t limit) {
size_t limit) {
assert(pnl_check_allocated(pnl, limit)); assert(pnl_check_allocated(pnl, limit));
if (MDBX_HAVE_CMOV) { if (MDBX_HAVE_CMOV) {
/* cmov-ускоренный бинарный поиск может читать (но не использовать) один /* cmov-ускоренный бинарный поиск может читать (но не использовать) один

View File

@ -4,8 +4,7 @@
#pragma once #pragma once
/* Undefine the NDEBUG if debugging is enforced by MDBX_DEBUG */ /* Undefine the NDEBUG if debugging is enforced by MDBX_DEBUG */
#if (defined(MDBX_DEBUG) && MDBX_DEBUG > 0) || \ #if (defined(MDBX_DEBUG) && MDBX_DEBUG > 0) || (defined(MDBX_FORCE_ASSERTIONS) && MDBX_FORCE_ASSERTIONS)
(defined(MDBX_FORCE_ASSERTIONS) && MDBX_FORCE_ASSERTIONS)
#undef NDEBUG #undef NDEBUG
#ifndef MDBX_DEBUG #ifndef MDBX_DEBUG
/* Чтобы избежать включения отладки только из-за включения assert-проверок */ /* Чтобы избежать включения отладки только из-за включения assert-проверок */
@ -29,8 +28,7 @@
#endif /* MDBX_DISABLE_GNU_SOURCE */ #endif /* MDBX_DISABLE_GNU_SOURCE */
/* Should be defined before any includes */ /* Should be defined before any includes */
#if !defined(_FILE_OFFSET_BITS) && !defined(__ANDROID_API__) && \ #if !defined(_FILE_OFFSET_BITS) && !defined(__ANDROID_API__) && !defined(ANDROID)
!defined(ANDROID)
#define _FILE_OFFSET_BITS 64 #define _FILE_OFFSET_BITS 64
#endif /* _FILE_OFFSET_BITS */ #endif /* _FILE_OFFSET_BITS */
@ -38,8 +36,7 @@
#define _DARWIN_C_SOURCE #define _DARWIN_C_SOURCE
#endif /* _DARWIN_C_SOURCE */ #endif /* _DARWIN_C_SOURCE */
#if (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) && \ #if (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) && !defined(__USE_MINGW_ANSI_STDIO)
!defined(__USE_MINGW_ANSI_STDIO)
#define __USE_MINGW_ANSI_STDIO 1 #define __USE_MINGW_ANSI_STDIO 1
#endif /* MinGW */ #endif /* MinGW */
@ -56,8 +53,7 @@
#define UNICODE #define UNICODE
#endif /* UNICODE */ #endif /* UNICODE */
#if !defined(_NO_CRT_STDIO_INLINE) && MDBX_BUILD_SHARED_LIBRARY && \ #if !defined(_NO_CRT_STDIO_INLINE) && MDBX_BUILD_SHARED_LIBRARY && !defined(xMDBX_TOOLS) && MDBX_WITHOUT_MSVC_CRT
!defined(xMDBX_TOOLS) && MDBX_WITHOUT_MSVC_CRT
#define _NO_CRT_STDIO_INLINE #define _NO_CRT_STDIO_INLINE
#endif /* _NO_CRT_STDIO_INLINE */ #endif /* _NO_CRT_STDIO_INLINE */
@ -72,8 +68,7 @@
#endif /* NOMINMAX */ #endif /* NOMINMAX */
/* Workaround for modern libstdc++ with CLANG < 4.x */ /* Workaround for modern libstdc++ with CLANG < 4.x */
#if defined(__SIZEOF_INT128__) && !defined(__GLIBCXX_TYPE_INT_N_0) && \ #if defined(__SIZEOF_INT128__) && !defined(__GLIBCXX_TYPE_INT_N_0) && defined(__clang__) && __clang_major__ < 4
defined(__clang__) && __clang_major__ < 4
#define __GLIBCXX_BITSIZE_INT_N_0 128 #define __GLIBCXX_BITSIZE_INT_N_0 128
#define __GLIBCXX_TYPE_INT_N_0 __int128 #define __GLIBCXX_TYPE_INT_N_0 __int128
#endif /* Workaround for modern libstdc++ with CLANG < 4.x */ #endif /* Workaround for modern libstdc++ with CLANG < 4.x */
@ -107,8 +102,7 @@
* and how to and where you can obtain the latest "Visual Studio 2015" build * and how to and where you can obtain the latest "Visual Studio 2015" build
* with all fixes. * with all fixes.
*/ */
#error \ #error "At least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required."
"At least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required."
#endif #endif
#if _MSC_VER > 1800 #if _MSC_VER > 1800
#pragma warning(disable : 4464) /* relative include path contains '..' */ #pragma warning(disable : 4464) /* relative include path contains '..' */
@ -117,9 +111,8 @@
#pragma warning(disable : 5045) /* will insert Spectre mitigation... */ #pragma warning(disable : 5045) /* will insert Spectre mitigation... */
#endif #endif
#if _MSC_VER > 1914 #if _MSC_VER > 1914
#pragma warning( \ #pragma warning(disable : 5105) /* winbase.h(9531): warning C5105: macro expansion \
disable : 5105) /* winbase.h(9531): warning C5105: macro expansion \ producing 'defined' has undefined behavior */
producing 'defined' has undefined behavior */
#endif #endif
#if _MSC_VER < 1920 #if _MSC_VER < 1920
/* avoid "error C2219: syntax error: type qualifier must be after '*'" */ /* avoid "error C2219: syntax error: type qualifier must be after '*'" */
@ -127,33 +120,32 @@
#endif #endif
#if _MSC_VER > 1930 #if _MSC_VER > 1930
#pragma warning(disable : 6235) /* <expression> is always a constant */ #pragma warning(disable : 6235) /* <expression> is always a constant */
#pragma warning(disable : 6237) /* <expression> is never evaluated and might \ #pragma warning(disable : 6237) /* <expression> is never evaluated and might \
have side effects */ have side effects */
#endif #endif
#pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4710) /* 'xyz': function not inlined */
#pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \
inline expansion */ inline expansion */
#pragma warning(disable : 4201) /* nonstandard extension used: nameless \ #pragma warning(disable : 4201) /* nonstandard extension used: nameless \
struct/union */ struct/union */
#pragma warning(disable : 4702) /* unreachable code */ #pragma warning(disable : 4702) /* unreachable code */
#pragma warning(disable : 4706) /* assignment within conditional expression */ #pragma warning(disable : 4706) /* assignment within conditional expression */
#pragma warning(disable : 4127) /* conditional expression is constant */ #pragma warning(disable : 4127) /* conditional expression is constant */
#pragma warning(disable : 4324) /* 'xyz': structure was padded due to \ #pragma warning(disable : 4324) /* 'xyz': structure was padded due to \
alignment specifier */ alignment specifier */
#pragma warning(disable : 4310) /* cast truncates constant value */ #pragma warning(disable : 4310) /* cast truncates constant value */
#pragma warning(disable : 4820) /* bytes padding added after data member for \ #pragma warning(disable : 4820) /* bytes padding added after data member for \
alignment */ alignment */
#pragma warning(disable : 4548) /* expression before comma has no effect; \ #pragma warning(disable : 4548) /* expression before comma has no effect; \
expected expression with side - effect */ expected expression with side - effect */
#pragma warning(disable : 4366) /* the result of the unary '&' operator may be \ #pragma warning(disable : 4366) /* the result of the unary '&' operator may be \
unaligned */ unaligned */
#pragma warning(disable : 4200) /* nonstandard extension used: zero-sized \ #pragma warning(disable : 4200) /* nonstandard extension used: zero-sized \
array in struct/union */ array in struct/union */
#pragma warning(disable : 4204) /* nonstandard extension used: non-constant \ #pragma warning(disable : 4204) /* nonstandard extension used: non-constant \
aggregate initializer */ aggregate initializer */
#pragma warning( \ #pragma warning(disable : 4505) /* unreferenced local function has been removed */
disable : 4505) /* unreferenced local function has been removed */ #endif /* _MSC_VER (warnings) */
#endif /* _MSC_VER (warnings) */
#if defined(__GNUC__) && __GNUC__ < 9 #if defined(__GNUC__) && __GNUC__ < 9
#pragma GCC diagnostic ignored "-Wattributes" #pragma GCC diagnostic ignored "-Wattributes"
@ -166,12 +158,12 @@
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(push, 1) #pragma warning(push, 1)
#pragma warning(disable : 4548) /* expression before comma has no effect; \ #pragma warning(disable : 4548) /* expression before comma has no effect; \
expected expression with side - effect */ expected expression with side - effect */
#pragma warning(disable : 4530) /* C++ exception handler used, but unwind \ #pragma warning(disable : 4530) /* C++ exception handler used, but unwind \
* semantics are not enabled. Specify /EHsc */ * semantics are not enabled. Specify /EHsc */
#pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \ #pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \
* mode specified; termination on exception is \ * mode specified; termination on exception is \
* not guaranteed. Specify /EHsc */ * not guaranteed. Specify /EHsc */
#endif /* _MSC_VER (warnings) */ #endif /* _MSC_VER (warnings) */
@ -232,8 +224,7 @@
#ifndef __GNUC_PREREQ #ifndef __GNUC_PREREQ
#if defined(__GNUC__) && defined(__GNUC_MINOR__) #if defined(__GNUC__) && defined(__GNUC_MINOR__)
#define __GNUC_PREREQ(maj, min) \ #define __GNUC_PREREQ(maj, min) ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
#else #else
#define __GNUC_PREREQ(maj, min) (0) #define __GNUC_PREREQ(maj, min) (0)
#endif #endif
@ -241,8 +232,7 @@
#ifndef __CLANG_PREREQ #ifndef __CLANG_PREREQ
#ifdef __clang__ #ifdef __clang__
#define __CLANG_PREREQ(maj, min) \ #define __CLANG_PREREQ(maj, min) ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min))
((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min))
#else #else
#define __CLANG_PREREQ(maj, min) (0) #define __CLANG_PREREQ(maj, min) (0)
#endif #endif
@ -250,8 +240,7 @@
#ifndef __GLIBC_PREREQ #ifndef __GLIBC_PREREQ
#if defined(__GLIBC__) && defined(__GLIBC_MINOR__) #if defined(__GLIBC__) && defined(__GLIBC_MINOR__)
#define __GLIBC_PREREQ(maj, min) \ #define __GLIBC_PREREQ(maj, min) ((__GLIBC__ << 16) + __GLIBC_MINOR__ >= ((maj) << 16) + (min))
((__GLIBC__ << 16) + __GLIBC_MINOR__ >= ((maj) << 16) + (min))
#else #else
#define __GLIBC_PREREQ(maj, min) (0) #define __GLIBC_PREREQ(maj, min) (0)
#endif #endif
@ -261,8 +250,7 @@
/* pre-requirements */ /* pre-requirements */
#if (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF #if (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF
#error \ #error "Sanity checking failed: Two's complement, reasonably sized integer types"
"Sanity checking failed: Two's complement, reasonably sized integer types"
#endif #endif
#ifndef SSIZE_MAX #ifndef SSIZE_MAX
@ -294,8 +282,7 @@
#endif #endif
#ifdef __SANITIZE_THREAD__ #ifdef __SANITIZE_THREAD__
#warning \ #warning "libmdbx don't compatible with ThreadSanitizer, you will get a lot of false-positive issues."
"libmdbx don't compatible with ThreadSanitizer, you will get a lot of false-positive issues."
#endif /* __SANITIZE_THREAD__ */ #endif /* __SANITIZE_THREAD__ */
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
@ -327,8 +314,7 @@
#endif #endif
#endif /* __extern_C */ #endif /* __extern_C */
#if !defined(nullptr) && !defined(__cplusplus) || \ #if !defined(nullptr) && !defined(__cplusplus) || (__cplusplus < 201103L && !defined(_MSC_VER))
(__cplusplus < 201103L && !defined(_MSC_VER))
#define nullptr NULL #define nullptr NULL
#endif #endif
@ -340,9 +326,8 @@
#endif #endif
#endif /* Apple OSX & iOS */ #endif /* Apple OSX & iOS */
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__BSD__) || defined(__bsdi__) || \
defined(__BSD__) || defined(__bsdi__) || defined(__DragonFly__) || \ defined(__DragonFly__) || defined(__APPLE__) || defined(__MACH__)
defined(__APPLE__) || defined(__MACH__)
#include <sys/cdefs.h> #include <sys/cdefs.h>
#include <sys/mount.h> #include <sys/mount.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
@ -359,8 +344,7 @@
#endif #endif
#else #else
#include <malloc.h> #include <malloc.h>
#if !(defined(__sun) || defined(__SVR4) || defined(__svr4__) || \ #if !(defined(__sun) || defined(__SVR4) || defined(__svr4__) || defined(_WIN32) || defined(_WIN64))
defined(_WIN32) || defined(_WIN64))
#include <mntent.h> #include <mntent.h>
#endif /* !Solaris */ #endif /* !Solaris */
#endif /* !xBSD */ #endif /* !xBSD */
@ -469,43 +453,38 @@ __extern_C key_t ftok(const char *, int);
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
/* Byteorder */ /* Byteorder */
#if defined(i386) || defined(__386) || defined(__i386) || defined(__i386__) || \ #if defined(i386) || defined(__386) || defined(__i386) || defined(__i386__) || defined(i486) || defined(__i486) || \
defined(i486) || defined(__i486) || defined(__i486__) || defined(i586) || \ defined(__i486__) || defined(i586) || defined(__i586) || defined(__i586__) || defined(i686) || defined(__i686) || \
defined(__i586) || defined(__i586__) || defined(i686) || \ defined(__i686__) || defined(_M_IX86) || defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || \
defined(__i686) || defined(__i686__) || defined(_M_IX86) || \ defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(__amd64) || \
defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || \ defined(_M_X64) || defined(_M_AMD64) || defined(__IA32__) || defined(__INTEL__)
defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__) || \
defined(__amd64__) || defined(__amd64) || defined(_M_X64) || \
defined(_M_AMD64) || defined(__IA32__) || defined(__INTEL__)
#ifndef __ia32__ #ifndef __ia32__
/* LY: define neutral __ia32__ for x86 and x86-64 */ /* LY: define neutral __ia32__ for x86 and x86-64 */
#define __ia32__ 1 #define __ia32__ 1
#endif /* __ia32__ */ #endif /* __ia32__ */
#if !defined(__amd64__) && \ #if !defined(__amd64__) && \
(defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || \ (defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64) || defined(_M_AMD64))
defined(_M_X64) || defined(_M_AMD64))
/* LY: define trusty __amd64__ for all AMD64/x86-64 arch */ /* LY: define trusty __amd64__ for all AMD64/x86-64 arch */
#define __amd64__ 1 #define __amd64__ 1
#endif /* __amd64__ */ #endif /* __amd64__ */
#endif /* all x86 */ #endif /* all x86 */
#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \ #if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || !defined(__ORDER_BIG_ENDIAN__)
!defined(__ORDER_BIG_ENDIAN__)
#if defined(__GLIBC__) || defined(__GNU_LIBRARY__) || \ #if defined(__GLIBC__) || defined(__GNU_LIBRARY__) || defined(__ANDROID_API__) || defined(HAVE_ENDIAN_H) || \
defined(__ANDROID_API__) || defined(HAVE_ENDIAN_H) || __has_include(<endian.h>) __has_include(<endian.h>)
#include <endian.h> #include <endian.h>
#elif defined(__APPLE__) || defined(__MACH__) || defined(__OpenBSD__) || \ #elif defined(__APPLE__) || defined(__MACH__) || defined(__OpenBSD__) || defined(HAVE_MACHINE_ENDIAN_H) || \
defined(HAVE_MACHINE_ENDIAN_H) || __has_include(<machine/endian.h>) __has_include(<machine/endian.h>)
#include <machine/endian.h> #include <machine/endian.h>
#elif defined(HAVE_SYS_ISA_DEFS_H) || __has_include(<sys/isa_defs.h>) #elif defined(HAVE_SYS_ISA_DEFS_H) || __has_include(<sys/isa_defs.h>)
#include <sys/isa_defs.h> #include <sys/isa_defs.h>
#elif (defined(HAVE_SYS_TYPES_H) && defined(HAVE_SYS_ENDIAN_H)) || \ #elif (defined(HAVE_SYS_TYPES_H) && defined(HAVE_SYS_ENDIAN_H)) || \
(__has_include(<sys/types.h>) && __has_include(<sys/endian.h>)) (__has_include(<sys/types.h>) && __has_include(<sys/endian.h>))
#include <sys/endian.h> #include <sys/endian.h>
#include <sys/types.h> #include <sys/types.h>
#elif defined(__bsdi__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ #elif defined(__bsdi__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
defined(__NetBSD__) || defined(HAVE_SYS_PARAM_H) || __has_include(<sys/param.h>) defined(HAVE_SYS_PARAM_H) || __has_include(<sys/param.h>)
#include <sys/param.h> #include <sys/param.h>
#endif /* OS */ #endif /* OS */
@ -521,27 +500,19 @@ __extern_C key_t ftok(const char *, int);
#define __ORDER_LITTLE_ENDIAN__ 1234 #define __ORDER_LITTLE_ENDIAN__ 1234
#define __ORDER_BIG_ENDIAN__ 4321 #define __ORDER_BIG_ENDIAN__ 4321
#if defined(__LITTLE_ENDIAN__) || \ #if defined(__LITTLE_ENDIAN__) || (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || defined(__ARMEL__) || \
(defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \ defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(__MIPSEL__) || defined(_MIPSEL) || defined(__MIPSEL) || \
defined(__ARMEL__) || defined(__THUMBEL__) || defined(__AARCH64EL__) || \ defined(_M_ARM) || defined(_M_ARM64) || defined(__e2k__) || defined(__elbrus_4c__) || defined(__elbrus_8c__) || \
defined(__MIPSEL__) || defined(_MIPSEL) || defined(__MIPSEL) || \ defined(__bfin__) || defined(__BFIN__) || defined(__ia64__) || defined(_IA64) || defined(__IA64__) || \
defined(_M_ARM) || defined(_M_ARM64) || defined(__e2k__) || \ defined(__ia64) || defined(_M_IA64) || defined(__itanium__) || defined(__ia32__) || defined(__CYGWIN__) || \
defined(__elbrus_4c__) || defined(__elbrus_8c__) || defined(__bfin__) || \ defined(_WIN64) || defined(_WIN32) || defined(__TOS_WIN__) || defined(__WINDOWS__)
defined(__BFIN__) || defined(__ia64__) || defined(_IA64) || \
defined(__IA64__) || defined(__ia64) || defined(_M_IA64) || \
defined(__itanium__) || defined(__ia32__) || defined(__CYGWIN__) || \
defined(_WIN64) || defined(_WIN32) || defined(__TOS_WIN__) || \
defined(__WINDOWS__)
#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
#elif defined(__BIG_ENDIAN__) || \ #elif defined(__BIG_ENDIAN__) || (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || defined(__ARMEB__) || \
(defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \ defined(__THUMBEB__) || defined(__AARCH64EB__) || defined(__MIPSEB__) || defined(_MIPSEB) || defined(__MIPSEB) || \
defined(__ARMEB__) || defined(__THUMBEB__) || defined(__AARCH64EB__) || \ defined(__m68k__) || defined(M68000) || defined(__hppa__) || defined(__hppa) || defined(__HPPA__) || \
defined(__MIPSEB__) || defined(_MIPSEB) || defined(__MIPSEB) || \ defined(__sparc__) || defined(__sparc) || defined(__370__) || defined(__THW_370__) || defined(__s390__) || \
defined(__m68k__) || defined(M68000) || defined(__hppa__) || \ defined(__s390x__) || defined(__SYSC_ZARCH__)
defined(__hppa) || defined(__HPPA__) || defined(__sparc__) || \
defined(__sparc) || defined(__370__) || defined(__THW_370__) || \
defined(__s390__) || defined(__s390x__) || defined(__SYSC_ZARCH__)
#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__ #define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__
#else #else
@ -561,17 +532,14 @@ __extern_C key_t ftok(const char *, int);
#define MDBX_HAVE_CMOV 1 #define MDBX_HAVE_CMOV 1
#elif defined(__thumb__) || defined(__thumb) || defined(__TARGET_ARCH_THUMB) #elif defined(__thumb__) || defined(__thumb) || defined(__TARGET_ARCH_THUMB)
#define MDBX_HAVE_CMOV 0 #define MDBX_HAVE_CMOV 0
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(__aarch64__) || \ #elif defined(_M_ARM) || defined(_M_ARM64) || defined(__aarch64__) || defined(__aarch64) || defined(__arm__) || \
defined(__aarch64) || defined(__arm__) || defined(__arm) || \ defined(__arm) || defined(__CC_ARM)
defined(__CC_ARM)
#define MDBX_HAVE_CMOV 1 #define MDBX_HAVE_CMOV 1
#elif (defined(__riscv__) || defined(__riscv64)) && \ #elif (defined(__riscv__) || defined(__riscv64)) && (defined(__riscv_b) || defined(__riscv_bitmanip))
(defined(__riscv_b) || defined(__riscv_bitmanip))
#define MDBX_HAVE_CMOV 1 #define MDBX_HAVE_CMOV 1
#elif defined(i686) || defined(__i686) || defined(__i686__) || \ #elif defined(i686) || defined(__i686) || defined(__i686__) || (defined(_M_IX86) && _M_IX86 > 600) || \
(defined(_M_IX86) && _M_IX86 > 600) || defined(__x86_64) || \ defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(__amd64) || defined(_M_X64) || \
defined(__x86_64__) || defined(__amd64__) || defined(__amd64) || \ defined(_M_AMD64)
defined(_M_X64) || defined(_M_AMD64)
#define MDBX_HAVE_CMOV 1 #define MDBX_HAVE_CMOV 1
#else #else
#define MDBX_HAVE_CMOV 0 #define MDBX_HAVE_CMOV 0
@ -597,8 +565,7 @@ __extern_C key_t ftok(const char *, int);
#endif #endif
#elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) #elif defined(__SUNPRO_C) || defined(__sun) || defined(sun)
#include <mbarrier.h> #include <mbarrier.h>
#elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && \ #elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && (defined(HP_IA64) || defined(__ia64))
(defined(HP_IA64) || defined(__ia64))
#include <machine/sys/inline.h> #include <machine/sys/inline.h>
#elif defined(__IBMC__) && defined(__powerpc) #elif defined(__IBMC__) && defined(__powerpc)
#include <atomic.h> #include <atomic.h>
@ -620,29 +587,26 @@ __extern_C key_t ftok(const char *, int);
#endif /* Compiler */ #endif /* Compiler */
#if !defined(__noop) && !defined(_MSC_VER) #if !defined(__noop) && !defined(_MSC_VER)
#define __noop \ #define __noop \
do { \ do { \
} while (0) } while (0)
#endif /* __noop */ #endif /* __noop */
#if defined(__fallthrough) && \ #if defined(__fallthrough) && (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__))
(defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__))
#undef __fallthrough #undef __fallthrough
#endif /* __fallthrough workaround for MinGW */ #endif /* __fallthrough workaround for MinGW */
#ifndef __fallthrough #ifndef __fallthrough
#if defined(__cplusplus) && (__has_cpp_attribute(fallthrough) && \ #if defined(__cplusplus) && (__has_cpp_attribute(fallthrough) && (!defined(__clang__) || __clang__ > 4)) || \
(!defined(__clang__) || __clang__ > 4)) || \
__cplusplus >= 201703L __cplusplus >= 201703L
#define __fallthrough [[fallthrough]] #define __fallthrough [[fallthrough]]
#elif __GNUC_PREREQ(8, 0) && defined(__cplusplus) && __cplusplus >= 201103L #elif __GNUC_PREREQ(8, 0) && defined(__cplusplus) && __cplusplus >= 201103L
#define __fallthrough [[fallthrough]] #define __fallthrough [[fallthrough]]
#elif __GNUC_PREREQ(7, 0) && \ #elif __GNUC_PREREQ(7, 0) && (!defined(__LCC__) || (__LCC__ == 124 && __LCC_MINOR__ >= 12) || \
(!defined(__LCC__) || (__LCC__ == 124 && __LCC_MINOR__ >= 12) || \ (__LCC__ == 125 && __LCC_MINOR__ >= 5) || (__LCC__ >= 126))
(__LCC__ == 125 && __LCC_MINOR__ >= 5) || (__LCC__ >= 126))
#define __fallthrough __attribute__((__fallthrough__)) #define __fallthrough __attribute__((__fallthrough__))
#elif defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L && \ #elif defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L && __has_feature(cxx_attributes) && \
__has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") __has_warning("-Wimplicit-fallthrough")
#define __fallthrough [[clang::fallthrough]] #define __fallthrough [[clang::fallthrough]]
#else #else
#define __fallthrough #define __fallthrough
@ -655,8 +619,8 @@ __extern_C key_t ftok(const char *, int);
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#define __unreachable() __assume(0) #define __unreachable() __assume(0)
#else #else
#define __unreachable() \ #define __unreachable() \
do { \ do { \
} while (1) } while (1)
#endif #endif
#endif /* __unreachable */ #endif /* __unreachable */
@ -665,9 +629,9 @@ __extern_C key_t ftok(const char *, int);
#if defined(__GNUC__) || defined(__clang__) || __has_builtin(__builtin_prefetch) #if defined(__GNUC__) || defined(__clang__) || __has_builtin(__builtin_prefetch)
#define __prefetch(ptr) __builtin_prefetch(ptr) #define __prefetch(ptr) __builtin_prefetch(ptr)
#else #else
#define __prefetch(ptr) \ #define __prefetch(ptr) \
do { \ do { \
(void)(ptr); \ (void)(ptr); \
} while (0) } while (0)
#endif #endif
#endif /* __prefetch */ #endif /* __prefetch */
@ -677,8 +641,7 @@ __extern_C key_t ftok(const char *, int);
#endif /* offsetof */ #endif /* offsetof */
#ifndef container_of #ifndef container_of
#define container_of(ptr, type, member) \ #define container_of(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member)))
((type *)((char *)(ptr) - offsetof(type, member)))
#endif /* container_of */ #endif /* container_of */
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
@ -750,8 +713,7 @@ __extern_C key_t ftok(const char *, int);
#ifndef __hot #ifndef __hot
#if defined(__OPTIMIZE__) #if defined(__OPTIMIZE__)
#if defined(__clang__) && !__has_attribute(__hot__) && \ #if defined(__clang__) && !__has_attribute(__hot__) && __has_attribute(__section__) && \
__has_attribute(__section__) && \
(defined(__linux__) || defined(__gnu_linux__)) (defined(__linux__) || defined(__gnu_linux__))
/* just put frequently used functions in separate section */ /* just put frequently used functions in separate section */
#define __hot __attribute__((__section__("text.hot"))) __optimize("O3") #define __hot __attribute__((__section__("text.hot"))) __optimize("O3")
@ -767,8 +729,7 @@ __extern_C key_t ftok(const char *, int);
#ifndef __cold #ifndef __cold
#if defined(__OPTIMIZE__) #if defined(__OPTIMIZE__)
#if defined(__clang__) && !__has_attribute(__cold__) && \ #if defined(__clang__) && !__has_attribute(__cold__) && __has_attribute(__section__) && \
__has_attribute(__section__) && \
(defined(__linux__) || defined(__gnu_linux__)) (defined(__linux__) || defined(__gnu_linux__))
/* just put infrequently used functions in separate section */ /* just put infrequently used functions in separate section */
#define __cold __attribute__((__section__("text.unlikely"))) __optimize("Os") #define __cold __attribute__((__section__("text.unlikely"))) __optimize("Os")
@ -791,8 +752,7 @@ __extern_C key_t ftok(const char *, int);
#endif /* __flatten */ #endif /* __flatten */
#ifndef likely #ifndef likely
#if (defined(__GNUC__) || __has_builtin(__builtin_expect)) && \ #if (defined(__GNUC__) || __has_builtin(__builtin_expect)) && !defined(__COVERITY__)
!defined(__COVERITY__)
#define likely(cond) __builtin_expect(!!(cond), 1) #define likely(cond) __builtin_expect(!!(cond), 1)
#else #else
#define likely(x) (!!(x)) #define likely(x) (!!(x))
@ -800,8 +760,7 @@ __extern_C key_t ftok(const char *, int);
#endif /* likely */ #endif /* likely */
#ifndef unlikely #ifndef unlikely
#if (defined(__GNUC__) || __has_builtin(__builtin_expect)) && \ #if (defined(__GNUC__) || __has_builtin(__builtin_expect)) && !defined(__COVERITY__)
!defined(__COVERITY__)
#define unlikely(cond) __builtin_expect(!!(cond), 0) #define unlikely(cond) __builtin_expect(!!(cond), 0)
#else #else
#define unlikely(x) (!!(x)) #define unlikely(x) (!!(x))
@ -821,8 +780,7 @@ __extern_C key_t ftok(const char *, int);
#define MDBX_WEAK_IMPORT_ATTRIBUTE WEAK_IMPORT_ATTRIBUTE #define MDBX_WEAK_IMPORT_ATTRIBUTE WEAK_IMPORT_ATTRIBUTE
#elif __has_attribute(__weak__) && __has_attribute(__weak_import__) #elif __has_attribute(__weak__) && __has_attribute(__weak_import__)
#define MDBX_WEAK_IMPORT_ATTRIBUTE __attribute__((__weak__, __weak_import__)) #define MDBX_WEAK_IMPORT_ATTRIBUTE __attribute__((__weak__, __weak_import__))
#elif __has_attribute(__weak__) || \ #elif __has_attribute(__weak__) || (defined(__GNUC__) && __GNUC__ >= 4 && defined(__ELF__))
(defined(__GNUC__) && __GNUC__ >= 4 && defined(__ELF__))
#define MDBX_WEAK_IMPORT_ATTRIBUTE __attribute__((__weak__)) #define MDBX_WEAK_IMPORT_ATTRIBUTE __attribute__((__weak__))
#else #else
#define MDBX_WEAK_IMPORT_ATTRIBUTE #define MDBX_WEAK_IMPORT_ATTRIBUTE
@ -835,9 +793,7 @@ __extern_C key_t ftok(const char *, int);
#ifndef MDBX_EXCLUDE_FOR_GPROF #ifndef MDBX_EXCLUDE_FOR_GPROF
#ifdef ENABLE_GPROF #ifdef ENABLE_GPROF
#define MDBX_EXCLUDE_FOR_GPROF \ #define MDBX_EXCLUDE_FOR_GPROF __attribute__((__no_instrument_function__, __no_profile_instrument_function__))
__attribute__((__no_instrument_function__, \
__no_profile_instrument_function__))
#else #else
#define MDBX_EXCLUDE_FOR_GPROF #define MDBX_EXCLUDE_FOR_GPROF
#endif /* ENABLE_GPROF */ #endif /* ENABLE_GPROF */
@ -846,10 +802,9 @@ __extern_C key_t ftok(const char *, int);
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
#ifndef expect_with_probability #ifndef expect_with_probability
#if defined(__builtin_expect_with_probability) || \ #if defined(__builtin_expect_with_probability) || __has_builtin(__builtin_expect_with_probability) || \
__has_builtin(__builtin_expect_with_probability) || __GNUC_PREREQ(9, 0) __GNUC_PREREQ(9, 0)
#define expect_with_probability(expr, value, prob) \ #define expect_with_probability(expr, value, prob) __builtin_expect_with_probability(expr, value, prob)
__builtin_expect_with_probability(expr, value, prob)
#else #else
#define expect_with_probability(expr, value, prob) (expr) #define expect_with_probability(expr, value, prob) (expr)
#endif #endif
@ -866,11 +821,9 @@ __extern_C key_t ftok(const char *, int);
#if MDBX_GOOFY_MSVC_STATIC_ANALYZER || (defined(_MSC_VER) && _MSC_VER > 1919) #if MDBX_GOOFY_MSVC_STATIC_ANALYZER || (defined(_MSC_VER) && _MSC_VER > 1919)
#define MDBX_ANALYSIS_ASSUME(expr) __analysis_assume(expr) #define MDBX_ANALYSIS_ASSUME(expr) __analysis_assume(expr)
#ifdef _PREFAST_ #ifdef _PREFAST_
#define MDBX_SUPPRESS_GOOFY_MSVC_ANALYZER(warn_id) \ #define MDBX_SUPPRESS_GOOFY_MSVC_ANALYZER(warn_id) __pragma(prefast(suppress : warn_id))
__pragma(prefast(suppress : warn_id))
#else #else
#define MDBX_SUPPRESS_GOOFY_MSVC_ANALYZER(warn_id) \ #define MDBX_SUPPRESS_GOOFY_MSVC_ANALYZER(warn_id) __pragma(warning(suppress : warn_id))
__pragma(warning(suppress : warn_id))
#endif #endif
#else #else
#define MDBX_ANALYSIS_ASSUME(expr) assert(expr) #define MDBX_ANALYSIS_ASSUME(expr) assert(expr)
@ -878,8 +831,7 @@ __extern_C key_t ftok(const char *, int);
#endif /* MDBX_GOOFY_MSVC_STATIC_ANALYZER */ #endif /* MDBX_GOOFY_MSVC_STATIC_ANALYZER */
#ifndef FLEXIBLE_ARRAY_MEMBERS #ifndef FLEXIBLE_ARRAY_MEMBERS
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (!defined(__cplusplus) && defined(_MSC_VER))
(!defined(__cplusplus) && defined(_MSC_VER))
#define FLEXIBLE_ARRAY_MEMBERS 1 #define FLEXIBLE_ARRAY_MEMBERS 1
#else #else
#define FLEXIBLE_ARRAY_MEMBERS 0 #define FLEXIBLE_ARRAY_MEMBERS 0
@ -938,8 +890,7 @@ template <typename T, size_t N> char (&__ArraySizeHelper(T (&array)[N]))[N];
#define CONCAT(a, b) a##b #define CONCAT(a, b) a##b
#define XCONCAT(a, b) CONCAT(a, b) #define XCONCAT(a, b) CONCAT(a, b)
#define MDBX_TETRAD(a, b, c, d) \ #define MDBX_TETRAD(a, b, c, d) ((uint32_t)(a) << 24 | (uint32_t)(b) << 16 | (uint32_t)(c) << 8 | (d))
((uint32_t)(a) << 24 | (uint32_t)(b) << 16 | (uint32_t)(c) << 8 | (d))
#define MDBX_STRING_TETRAD(str) MDBX_TETRAD(str[0], str[1], str[2], str[3]) #define MDBX_STRING_TETRAD(str) MDBX_TETRAD(str[0], str[1], str[2], str[3])
@ -953,14 +904,13 @@ template <typename T, size_t N> char (&__ArraySizeHelper(T (&array)[N]))[N];
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#include <crtdbg.h> #include <crtdbg.h>
#define STATIC_ASSERT_MSG(expr, msg) _STATIC_ASSERT(expr) #define STATIC_ASSERT_MSG(expr, msg) _STATIC_ASSERT(expr)
#elif (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ #elif (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || __has_feature(c_static_assert)
__has_feature(c_static_assert)
#define STATIC_ASSERT_MSG(expr, msg) _Static_assert(expr, msg) #define STATIC_ASSERT_MSG(expr, msg) _Static_assert(expr, msg)
#else #else
#define STATIC_ASSERT_MSG(expr, msg) \ #define STATIC_ASSERT_MSG(expr, msg) \
switch (0) { \ switch (0) { \
case 0: \ case 0: \
case (expr):; \ case (expr):; \
} }
#endif #endif
#endif /* STATIC_ASSERT */ #endif /* STATIC_ASSERT */

View File

@ -8,39 +8,26 @@
/* Internal prototypes */ /* Internal prototypes */
/* audit.c */ /* audit.c */
MDBX_INTERNAL int audit_ex(MDBX_txn *txn, size_t retired_stored, MDBX_INTERNAL int audit_ex(MDBX_txn *txn, size_t retired_stored, bool dont_filter_gc);
bool dont_filter_gc);
/* mvcc-readers.c */ /* mvcc-readers.c */
MDBX_INTERNAL bsr_t mvcc_bind_slot(MDBX_env *env); MDBX_INTERNAL bsr_t mvcc_bind_slot(MDBX_env *env);
MDBX_MAYBE_UNUSED MDBX_INTERNAL pgno_t mvcc_largest_this(MDBX_env *env, MDBX_MAYBE_UNUSED MDBX_INTERNAL pgno_t mvcc_largest_this(MDBX_env *env, pgno_t largest);
pgno_t largest); MDBX_INTERNAL txnid_t mvcc_shapshot_oldest(MDBX_env *const env, const txnid_t steady);
MDBX_INTERNAL txnid_t mvcc_shapshot_oldest(MDBX_env *const env, MDBX_INTERNAL pgno_t mvcc_snapshot_largest(const MDBX_env *env, pgno_t last_used_page);
const txnid_t steady); MDBX_INTERNAL txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t straggler);
MDBX_INTERNAL pgno_t mvcc_snapshot_largest(const MDBX_env *env,
pgno_t last_used_page);
MDBX_INTERNAL txnid_t mvcc_kick_laggards(MDBX_env *env,
const txnid_t straggler);
MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rlocked, int *dead); MDBX_INTERNAL int mvcc_cleanup_dead(MDBX_env *env, int rlocked, int *dead);
MDBX_INTERNAL txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t laggard); MDBX_INTERNAL txnid_t mvcc_kick_laggards(MDBX_env *env, const txnid_t laggard);
/* dxb.c */ /* dxb.c */
MDBX_INTERNAL int dxb_setup(MDBX_env *env, const int lck_rc, MDBX_INTERNAL int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bits);
const mdbx_mode_t mode_bits); MDBX_INTERNAL int __must_check_result dxb_read_header(MDBX_env *env, meta_t *meta, const int lck_exclusive,
MDBX_INTERNAL int __must_check_result const mdbx_mode_t mode_bits);
dxb_read_header(MDBX_env *env, meta_t *meta, const int lck_exclusive,
const mdbx_mode_t mode_bits);
enum resize_mode { implicit_grow, impilict_shrink, explicit_resize }; enum resize_mode { implicit_grow, impilict_shrink, explicit_resize };
MDBX_INTERNAL int __must_check_result dxb_resize(MDBX_env *const env, MDBX_INTERNAL int __must_check_result dxb_resize(MDBX_env *const env, const pgno_t used_pgno, const pgno_t size_pgno,
const pgno_t used_pgno, pgno_t limit_pgno, const enum resize_mode mode);
const pgno_t size_pgno, MDBX_INTERNAL int dxb_set_readahead(const MDBX_env *env, const pgno_t edge, const bool enable, const bool force_whole);
pgno_t limit_pgno, MDBX_INTERNAL int __must_check_result dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending,
const enum resize_mode mode);
MDBX_INTERNAL int dxb_set_readahead(const MDBX_env *env, const pgno_t edge,
const bool enable, const bool force_whole);
MDBX_INTERNAL int __must_check_result dxb_sync_locked(MDBX_env *env,
unsigned flags,
meta_t *const pending,
troika_t *const troika); troika_t *const troika);
#if defined(ENABLE_MEMCHECK) || defined(__SANITIZE_ADDRESS__) #if defined(ENABLE_MEMCHECK) || defined(__SANITIZE_ADDRESS__)
MDBX_INTERNAL void dxb_sanitize_tail(MDBX_env *env, MDBX_txn *txn); MDBX_INTERNAL void dxb_sanitize_tail(MDBX_env *env, MDBX_txn *txn);
@ -60,9 +47,8 @@ MDBX_INTERNAL int txn_park(MDBX_txn *txn, bool autounpark);
MDBX_INTERNAL int txn_unpark(MDBX_txn *txn); MDBX_INTERNAL int txn_unpark(MDBX_txn *txn);
MDBX_INTERNAL int txn_check_badbits_parked(const MDBX_txn *txn, int bad_bits); MDBX_INTERNAL int txn_check_badbits_parked(const MDBX_txn *txn, int bad_bits);
#define TXN_END_NAMES \ #define TXN_END_NAMES \
{"committed", "empty-commit", "abort", "reset", \ {"committed", "empty-commit", "abort", "reset", "fail-begin", "fail-beginchild", "ousted", nullptr}
"fail-begin", "fail-beginchild", "ousted", nullptr}
enum { enum {
/* txn_end operation number, for logging */ /* txn_end operation number, for logging */
TXN_END_COMMITTED, TXN_END_COMMITTED,
@ -84,8 +70,7 @@ MDBX_INTERNAL int txn_write(MDBX_txn *txn, iov_ctx_t *ctx);
/* env.c */ /* env.c */
MDBX_INTERNAL int env_open(MDBX_env *env, mdbx_mode_t mode); MDBX_INTERNAL int env_open(MDBX_env *env, mdbx_mode_t mode);
MDBX_INTERNAL int env_info(const MDBX_env *env, const MDBX_txn *txn, MDBX_INTERNAL int env_info(const MDBX_env *env, const MDBX_txn *txn, MDBX_envinfo *out, size_t bytes, troika_t *troika);
MDBX_envinfo *out, size_t bytes, troika_t *troika);
MDBX_INTERNAL int env_sync(MDBX_env *env, bool force, bool nonblock); MDBX_INTERNAL int env_sync(MDBX_env *env, bool force, bool nonblock);
MDBX_INTERNAL int env_close(MDBX_env *env, bool resurrect_after_fork); MDBX_INTERNAL int env_close(MDBX_env *env, bool resurrect_after_fork);
MDBX_INTERNAL bool env_txn0_owned(const MDBX_env *env); MDBX_INTERNAL bool env_txn0_owned(const MDBX_env *env);
@ -97,27 +82,17 @@ MDBX_INTERNAL unsigned env_setup_pagesize(MDBX_env *env, const size_t pagesize);
/* tree.c */ /* tree.c */
MDBX_INTERNAL int tree_drop(MDBX_cursor *mc, const bool may_have_tables); MDBX_INTERNAL int tree_drop(MDBX_cursor *mc, const bool may_have_tables);
MDBX_INTERNAL int __must_check_result tree_rebalance(MDBX_cursor *mc); MDBX_INTERNAL int __must_check_result tree_rebalance(MDBX_cursor *mc);
MDBX_INTERNAL int __must_check_result tree_propagate_key(MDBX_cursor *mc, MDBX_INTERNAL int __must_check_result tree_propagate_key(MDBX_cursor *mc, const MDBX_val *key);
const MDBX_val *key);
MDBX_INTERNAL void recalculate_merge_thresholds(MDBX_env *env); MDBX_INTERNAL void recalculate_merge_thresholds(MDBX_env *env);
MDBX_INTERNAL void recalculate_subpage_thresholds(MDBX_env *env); MDBX_INTERNAL void recalculate_subpage_thresholds(MDBX_env *env);
/* table.c */ /* table.c */
MDBX_INTERNAL int __must_check_result tbl_fetch(MDBX_txn *txn, size_t dbi); MDBX_INTERNAL int __must_check_result tbl_fetch(MDBX_txn *txn, size_t dbi);
MDBX_INTERNAL int __must_check_result tbl_setup(const MDBX_env *env, MDBX_INTERNAL int __must_check_result tbl_setup(const MDBX_env *env, kvx_t *const kvx, const tree_t *const db);
kvx_t *const kvx,
const tree_t *const db);
/* coherency.c */ /* coherency.c */
MDBX_INTERNAL bool coherency_check_meta(const MDBX_env *env, MDBX_INTERNAL bool coherency_check_meta(const MDBX_env *env, const volatile meta_t *meta, bool report);
const volatile meta_t *meta, MDBX_INTERNAL int coherency_fetch_head(MDBX_txn *txn, const meta_ptr_t head, uint64_t *timestamp);
bool report); MDBX_INTERNAL int coherency_check_written(const MDBX_env *env, const txnid_t txnid, const volatile meta_t *meta,
MDBX_INTERNAL int coherency_fetch_head(MDBX_txn *txn, const meta_ptr_t head, const intptr_t pgno, uint64_t *timestamp);
uint64_t *timestamp); MDBX_INTERNAL int coherency_timeout(uint64_t *timestamp, intptr_t pgno, const MDBX_env *env);
MDBX_INTERNAL int coherency_check_written(const MDBX_env *env,
const txnid_t txnid,
const volatile meta_t *meta,
const intptr_t pgno,
uint64_t *timestamp);
MDBX_INTERNAL int coherency_timeout(uint64_t *timestamp, intptr_t pgno,
const MDBX_env *env);

View File

@ -10,20 +10,17 @@ typedef struct diff_result {
} diff_t; } diff_t;
/* calculates: r = x - y */ /* calculates: r = x - y */
__hot static int cursor_diff(const MDBX_cursor *const __restrict x, __hot static int cursor_diff(const MDBX_cursor *const __restrict x, const MDBX_cursor *const __restrict y,
const MDBX_cursor *const __restrict y,
diff_t *const __restrict r) { diff_t *const __restrict r) {
r->diff = 0; r->diff = 0;
r->level = 0; r->level = 0;
r->root_nkeys = 0; r->root_nkeys = 0;
if (unlikely(x->signature != cur_signature_live)) if (unlikely(x->signature != cur_signature_live))
return (x->signature == cur_signature_ready4dispose) ? MDBX_EINVAL return (x->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN;
: MDBX_EBADSIGN;
if (unlikely(y->signature != cur_signature_live)) if (unlikely(y->signature != cur_signature_live))
return (y->signature == cur_signature_ready4dispose) ? MDBX_EINVAL return (y->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN;
: MDBX_EBADSIGN;
int rc = check_txn(x->txn, MDBX_TXN_BLOCKED); int rc = check_txn(x->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -86,8 +83,7 @@ __hot static int cursor_diff(const MDBX_cursor *const __restrict x,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot static ptrdiff_t estimate(const tree_t *tree, __hot static ptrdiff_t estimate(const tree_t *tree, diff_t *const __restrict dr) {
diff_t *const __restrict dr) {
/* root: branch-page => scale = leaf-factor * branch-factor^(N-1) /* root: branch-page => scale = leaf-factor * branch-factor^(N-1)
* level-1: branch-page(s) => scale = leaf-factor * branch-factor^2 * level-1: branch-page(s) => scale = leaf-factor * branch-factor^2
* level-2: branch-page(s) => scale = leaf-factor * branch-factor * level-2: branch-page(s) => scale = leaf-factor * branch-factor
@ -98,8 +94,7 @@ __hot static ptrdiff_t estimate(const tree_t *tree,
if (btree_power < 0) if (btree_power < 0)
return dr->diff; return dr->diff;
ptrdiff_t estimated = ptrdiff_t estimated = (ptrdiff_t)tree->items * dr->diff / (ptrdiff_t)tree->leaf_pages;
(ptrdiff_t)tree->items * dr->diff / (ptrdiff_t)tree->leaf_pages;
if (btree_power == 0) if (btree_power == 0)
return estimated; return estimated;
@ -112,9 +107,7 @@ __hot static ptrdiff_t estimate(const tree_t *tree,
total(branch_entries) = leaf_pages + branch_pages - 1 (root page) */ total(branch_entries) = leaf_pages + branch_pages - 1 (root page) */
const size_t log2_fixedpoint = sizeof(size_t) - 1; const size_t log2_fixedpoint = sizeof(size_t) - 1;
const size_t half = UINT64_C(1) << (log2_fixedpoint - 1); const size_t half = UINT64_C(1) << (log2_fixedpoint - 1);
const size_t factor = const size_t factor = ((tree->leaf_pages + tree->branch_pages - 1) << log2_fixedpoint) / tree->branch_pages;
((tree->leaf_pages + tree->branch_pages - 1) << log2_fixedpoint) /
tree->branch_pages;
while (1) { while (1) {
switch ((size_t)btree_power) { switch ((size_t)btree_power) {
default: { default: {
@ -149,11 +142,8 @@ __hot static ptrdiff_t estimate(const tree_t *tree,
} }
} }
__hot int mdbx_estimate_distance(const MDBX_cursor *first, __hot int mdbx_estimate_distance(const MDBX_cursor *first, const MDBX_cursor *last, ptrdiff_t *distance_items) {
const MDBX_cursor *last, if (unlikely(first == nullptr || last == nullptr || distance_items == nullptr))
ptrdiff_t *distance_items) {
if (unlikely(first == nullptr || last == nullptr ||
distance_items == nullptr))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
*distance_items = 0; *distance_items = 0;
@ -177,17 +167,14 @@ __hot int mdbx_estimate_distance(const MDBX_cursor *first,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
__hot int mdbx_estimate_move(const MDBX_cursor *cursor, MDBX_val *key, __hot int mdbx_estimate_move(const MDBX_cursor *cursor, MDBX_val *key, MDBX_val *data, MDBX_cursor_op move_op,
MDBX_val *data, MDBX_cursor_op move_op,
ptrdiff_t *distance_items) { ptrdiff_t *distance_items) {
if (unlikely(cursor == nullptr || distance_items == nullptr || if (unlikely(cursor == nullptr || distance_items == nullptr || move_op == MDBX_GET_CURRENT ||
move_op == MDBX_GET_CURRENT || move_op == MDBX_GET_MULTIPLE)) move_op == MDBX_GET_MULTIPLE))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(cursor->signature != cur_signature_live)) if (unlikely(cursor->signature != cur_signature_live))
return LOG_IFERR((cursor->signature == cur_signature_ready4dispose) return LOG_IFERR((cursor->signature == cur_signature_ready4dispose) ? MDBX_EINVAL : MDBX_EBADSIGN);
? MDBX_EINVAL
: MDBX_EBADSIGN);
int rc = check_txn(cursor->txn, MDBX_TXN_BLOCKED); int rc = check_txn(cursor->txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -209,8 +196,7 @@ __hot int mdbx_estimate_move(const MDBX_cursor *cursor, MDBX_val *key,
MDBX_val stub_data; MDBX_val stub_data;
if (data == nullptr) { if (data == nullptr) {
const unsigned mask = const unsigned mask = 1 << MDBX_GET_BOTH | 1 << MDBX_GET_BOTH_RANGE | 1 << MDBX_SET_KEY;
1 << MDBX_GET_BOTH | 1 << MDBX_GET_BOTH_RANGE | 1 << MDBX_SET_KEY;
if (unlikely(mask & (1 << move_op))) if (unlikely(mask & (1 << move_op)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
stub_data.iov_base = nullptr; stub_data.iov_base = nullptr;
@ -220,9 +206,8 @@ __hot int mdbx_estimate_move(const MDBX_cursor *cursor, MDBX_val *key,
MDBX_val stub_key; MDBX_val stub_key;
if (key == nullptr) { if (key == nullptr) {
const unsigned mask = 1 << MDBX_GET_BOTH | 1 << MDBX_GET_BOTH_RANGE | const unsigned mask =
1 << MDBX_SET_KEY | 1 << MDBX_SET | 1 << MDBX_GET_BOTH | 1 << MDBX_GET_BOTH_RANGE | 1 << MDBX_SET_KEY | 1 << MDBX_SET | 1 << MDBX_SET_RANGE;
1 << MDBX_SET_RANGE;
if (unlikely(mask & (1 << move_op))) if (unlikely(mask & (1 << move_op)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
stub_key.iov_base = nullptr; stub_key.iov_base = nullptr;
@ -232,8 +217,7 @@ __hot int mdbx_estimate_move(const MDBX_cursor *cursor, MDBX_val *key,
next.outer.signature = cur_signature_live; next.outer.signature = cur_signature_live;
rc = cursor_ops(&next.outer, key, data, move_op); rc = cursor_ops(&next.outer, key, data, move_op);
if (unlikely(rc != MDBX_SUCCESS && if (unlikely(rc != MDBX_SUCCESS && (rc != MDBX_NOTFOUND || !is_pointed(&next.outer))))
(rc != MDBX_NOTFOUND || !is_pointed(&next.outer))))
return LOG_IFERR(rc); return LOG_IFERR(rc);
if (move_op == MDBX_LAST) { if (move_op == MDBX_LAST) {
@ -243,11 +227,8 @@ __hot int mdbx_estimate_move(const MDBX_cursor *cursor, MDBX_val *key,
return mdbx_estimate_distance(cursor, &next.outer, distance_items); return mdbx_estimate_distance(cursor, &next.outer, distance_items);
} }
__hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi, __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *begin_key, const MDBX_val *begin_data,
const MDBX_val *begin_key, const MDBX_val *end_key, const MDBX_val *end_data, ptrdiff_t *size_items) {
const MDBX_val *begin_data,
const MDBX_val *end_key, const MDBX_val *end_data,
ptrdiff_t *size_items) {
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -255,8 +236,7 @@ __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi,
if (unlikely(!size_items)) if (unlikely(!size_items))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(begin_data && if (unlikely(begin_data && (begin_key == nullptr || begin_key == MDBX_EPSILON)))
(begin_key == nullptr || begin_key == MDBX_EPSILON)))
return LOG_IFERR(MDBX_EINVAL); return LOG_IFERR(MDBX_EINVAL);
if (unlikely(end_data && (end_key == nullptr || end_key == MDBX_EPSILON))) if (unlikely(end_data && (end_key == nullptr || end_key == MDBX_EPSILON)))
@ -285,20 +265,14 @@ __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi,
rc = outer_first(&begin.outer, nullptr, nullptr); rc = outer_first(&begin.outer, nullptr, nullptr);
if (unlikely(end_key == MDBX_EPSILON)) { if (unlikely(end_key == MDBX_EPSILON)) {
/* LY: FIRST..+epsilon case */ /* LY: FIRST..+epsilon case */
return LOG_IFERR( return LOG_IFERR((rc == MDBX_SUCCESS) ? mdbx_cursor_count(&begin.outer, (size_t *)size_items) : rc);
(rc == MDBX_SUCCESS)
? mdbx_cursor_count(&begin.outer, (size_t *)size_items)
: rc);
} }
} else { } else {
if (unlikely(begin_key == MDBX_EPSILON)) { if (unlikely(begin_key == MDBX_EPSILON)) {
if (end_key == nullptr) { if (end_key == nullptr) {
/* LY: -epsilon..LAST case */ /* LY: -epsilon..LAST case */
rc = outer_last(&begin.outer, nullptr, nullptr); rc = outer_last(&begin.outer, nullptr, nullptr);
return LOG_IFERR( return LOG_IFERR((rc == MDBX_SUCCESS) ? mdbx_cursor_count(&begin.outer, (size_t *)size_items) : rc);
(rc == MDBX_SUCCESS)
? mdbx_cursor_count(&begin.outer, (size_t *)size_items)
: rc);
} }
/* LY: -epsilon..value case */ /* LY: -epsilon..value case */
assert(end_key != MDBX_EPSILON); assert(end_key != MDBX_EPSILON);
@ -309,22 +283,19 @@ __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi,
end_key = begin_key; end_key = begin_key;
} }
if (end_key && !begin_data && !end_data && if (end_key && !begin_data && !end_data &&
(begin_key == end_key || (begin_key == end_key || begin.outer.clc->k.cmp(begin_key, end_key) == 0)) {
begin.outer.clc->k.cmp(begin_key, end_key) == 0)) {
/* LY: single key case */ /* LY: single key case */
rc = cursor_seek(&begin.outer, (MDBX_val *)begin_key, nullptr, MDBX_SET) rc = cursor_seek(&begin.outer, (MDBX_val *)begin_key, nullptr, MDBX_SET).err;
.err;
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
*size_items = 0; *size_items = 0;
return LOG_IFERR((rc == MDBX_NOTFOUND) ? MDBX_SUCCESS : rc); return LOG_IFERR((rc == MDBX_NOTFOUND) ? MDBX_SUCCESS : rc);
} }
*size_items = 1; *size_items = 1;
if (inner_pointed(&begin.outer)) if (inner_pointed(&begin.outer))
*size_items = *size_items = (sizeof(*size_items) >= sizeof(begin.inner.nested_tree.items) ||
(sizeof(*size_items) >= sizeof(begin.inner.nested_tree.items) || begin.inner.nested_tree.items <= PTRDIFF_MAX)
begin.inner.nested_tree.items <= PTRDIFF_MAX) ? (size_t)begin.inner.nested_tree.items
? (size_t)begin.inner.nested_tree.items : PTRDIFF_MAX;
: PTRDIFF_MAX;
return MDBX_SUCCESS; return MDBX_SUCCESS;
} else { } else {
@ -332,9 +303,7 @@ __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi,
MDBX_val proxy_data = {nullptr, 0}; MDBX_val proxy_data = {nullptr, 0};
if (begin_data) if (begin_data)
proxy_data = *begin_data; proxy_data = *begin_data;
rc = LOG_IFERR(cursor_seek(&begin.outer, &proxy_key, &proxy_data, rc = LOG_IFERR(cursor_seek(&begin.outer, &proxy_key, &proxy_data, MDBX_SET_LOWERBOUND).err);
MDBX_SET_LOWERBOUND)
.err);
} }
} }
@ -356,8 +325,7 @@ __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi,
MDBX_val proxy_data = {nullptr, 0}; MDBX_val proxy_data = {nullptr, 0};
if (end_data) if (end_data)
proxy_data = *end_data; proxy_data = *end_data;
rc = cursor_seek(&end.outer, &proxy_key, &proxy_data, MDBX_SET_LOWERBOUND) rc = cursor_seek(&end.outer, &proxy_key, &proxy_data, MDBX_SET_LOWERBOUND).err;
.err;
} }
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
if (rc != MDBX_NOTFOUND || !is_pointed(&end.outer)) if (rc != MDBX_NOTFOUND || !is_pointed(&end.outer))
@ -367,10 +335,9 @@ __hot int mdbx_estimate_range(const MDBX_txn *txn, MDBX_dbi dbi,
rc = mdbx_estimate_distance(&begin.outer, &end.outer, size_items); rc = mdbx_estimate_distance(&begin.outer, &end.outer, size_items);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return LOG_IFERR(rc); return LOG_IFERR(rc);
assert(*size_items >= -(ptrdiff_t)begin.outer.tree->items && assert(*size_items >= -(ptrdiff_t)begin.outer.tree->items && *size_items <= (ptrdiff_t)begin.outer.tree->items);
*size_items <= (ptrdiff_t)begin.outer.tree->items);
#if 0 /* LY: Was decided to returns as-is (i.e. negative) the estimation \ #if 0 /* LY: Was decided to returns as-is (i.e. negative) the estimation \
* results for an inverted ranges. */ * results for an inverted ranges. */
/* Commit 8ddfd1f34ad7cf7a3c4aa75d2e248ca7e639ed63 /* Commit 8ddfd1f34ad7cf7a3c4aa75d2e248ca7e639ed63

View File

@ -8,8 +8,7 @@ static void refund_reclaimed(MDBX_txn *txn) {
/* Scanning in descend order */ /* Scanning in descend order */
pgno_t first_unallocated = txn->geo.first_unallocated; pgno_t first_unallocated = txn->geo.first_unallocated;
const pnl_t pnl = txn->tw.relist; const pnl_t pnl = txn->tw.relist;
tASSERT(txn, tASSERT(txn, MDBX_PNL_GETSIZE(pnl) && MDBX_PNL_MOST(pnl) == first_unallocated - 1);
MDBX_PNL_GETSIZE(pnl) && MDBX_PNL_MOST(pnl) == first_unallocated - 1);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
size_t i = MDBX_PNL_GETSIZE(pnl); size_t i = MDBX_PNL_GETSIZE(pnl);
tASSERT(txn, pnl[i] == first_unallocated - 1); tASSERT(txn, pnl[i] == first_unallocated - 1);
@ -26,12 +25,10 @@ static void refund_reclaimed(MDBX_txn *txn) {
for (size_t move = 0; move < len; ++move) for (size_t move = 0; move < len; ++move)
pnl[1 + move] = pnl[i + move]; pnl[1 + move] = pnl[i + move];
#endif #endif
VERBOSE("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, VERBOSE("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, txn->geo.first_unallocated - first_unallocated,
txn->geo.first_unallocated - first_unallocated,
txn->geo.first_unallocated, first_unallocated); txn->geo.first_unallocated, first_unallocated);
txn->geo.first_unallocated = first_unallocated; txn->geo.first_unallocated = first_unallocated;
tASSERT(txn, tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - 1));
pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - 1));
} }
static void refund_loose(MDBX_txn *txn) { static void refund_loose(MDBX_txn *txn) {
@ -58,18 +55,14 @@ static void refund_loose(MDBX_txn *txn) {
} }
/* Collect loose-pages which may be refunded. */ /* Collect loose-pages which may be refunded. */
tASSERT(txn, tASSERT(txn, txn->geo.first_unallocated >= MIN_PAGENO + txn->tw.loose_count);
txn->geo.first_unallocated >= MIN_PAGENO + txn->tw.loose_count);
pgno_t most = MIN_PAGENO; pgno_t most = MIN_PAGENO;
size_t w = 0; size_t w = 0;
for (const page_t *lp = txn->tw.loose_pages; lp; lp = page_next(lp)) { for (const page_t *lp = txn->tw.loose_pages; lp; lp = page_next(lp)) {
tASSERT(txn, lp->flags == P_LOOSE); tASSERT(txn, lp->flags == P_LOOSE);
tASSERT(txn, txn->geo.first_unallocated > lp->pgno); tASSERT(txn, txn->geo.first_unallocated > lp->pgno);
if (likely(txn->geo.first_unallocated - txn->tw.loose_count <= if (likely(txn->geo.first_unallocated - txn->tw.loose_count <= lp->pgno)) {
lp->pgno)) { tASSERT(txn, w < ((suitable == onstack) ? pnl_bytes2size(sizeof(onstack)) : MDBX_PNL_ALLOCLEN(suitable)));
tASSERT(txn,
w < ((suitable == onstack) ? pnl_bytes2size(sizeof(onstack))
: MDBX_PNL_ALLOCLEN(suitable)));
suitable[++w] = lp->pgno; suitable[++w] = lp->pgno;
most = (lp->pgno > most) ? lp->pgno : most; most = (lp->pgno > most) ? lp->pgno : most;
} }
@ -84,10 +77,8 @@ static void refund_loose(MDBX_txn *txn) {
/* Scanning in descend order */ /* Scanning in descend order */
const intptr_t step = MDBX_PNL_ASCENDING ? -1 : 1; const intptr_t step = MDBX_PNL_ASCENDING ? -1 : 1;
const intptr_t begin = const intptr_t begin = MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(suitable) : 1;
MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(suitable) : 1; const intptr_t end = MDBX_PNL_ASCENDING ? 0 : MDBX_PNL_GETSIZE(suitable) + 1;
const intptr_t end =
MDBX_PNL_ASCENDING ? 0 : MDBX_PNL_GETSIZE(suitable) + 1;
tASSERT(txn, suitable[begin] >= suitable[end - step]); tASSERT(txn, suitable[begin] >= suitable[end - step]);
tASSERT(txn, most == suitable[begin]); tASSERT(txn, most == suitable[begin]);
@ -97,8 +88,7 @@ static void refund_loose(MDBX_txn *txn) {
most -= 1; most -= 1;
} }
const size_t refunded = txn->geo.first_unallocated - most; const size_t refunded = txn->geo.first_unallocated - most;
DEBUG("refund-suitable %zu pages %" PRIaPGNO " -> %" PRIaPGNO, refunded, DEBUG("refund-suitable %zu pages %" PRIaPGNO " -> %" PRIaPGNO, refunded, most, txn->geo.first_unallocated);
most, txn->geo.first_unallocated);
txn->geo.first_unallocated = most; txn->geo.first_unallocated = most;
txn->tw.loose_count -= refunded; txn->tw.loose_count -= refunded;
if (dl) { if (dl) {
@ -126,22 +116,19 @@ static void refund_loose(MDBX_txn *txn) {
} }
dpl_setlen(dl, w); dpl_setlen(dl, w);
tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length == tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length ==
(txn->parent ? txn->parent->tw.dirtyroom (txn->parent ? txn->parent->tw.dirtyroom : txn->env->options.dp_limit));
: txn->env->options.dp_limit));
} }
goto unlink_loose; goto unlink_loose;
} }
} else { } else {
/* Dirtylist is mostly sorted, just refund loose pages at the end. */ /* Dirtylist is mostly sorted, just refund loose pages at the end. */
dpl_sort(txn); dpl_sort(txn);
tASSERT(txn, tASSERT(txn, dl->length < 2 || dl->items[1].pgno < dl->items[dl->length].pgno);
dl->length < 2 || dl->items[1].pgno < dl->items[dl->length].pgno);
tASSERT(txn, dl->sorted == dl->length); tASSERT(txn, dl->sorted == dl->length);
/* Scan dirtylist tail-forward and cutoff suitable pages. */ /* Scan dirtylist tail-forward and cutoff suitable pages. */
size_t n; size_t n;
for (n = dl->length; dl->items[n].pgno == txn->geo.first_unallocated - 1 && for (n = dl->length; dl->items[n].pgno == txn->geo.first_unallocated - 1 && dl->items[n].ptr->flags == P_LOOSE;
dl->items[n].ptr->flags == P_LOOSE;
--n) { --n) {
tASSERT(txn, n > 0); tASSERT(txn, n > 0);
page_t *dp = dl->items[n].ptr; page_t *dp = dl->items[n].ptr;
@ -158,8 +145,7 @@ static void refund_loose(MDBX_txn *txn) {
txn->tw.dirtyroom += refunded; txn->tw.dirtyroom += refunded;
dl->pages_including_loose -= refunded; dl->pages_including_loose -= refunded;
tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length == tASSERT(txn, txn->tw.dirtyroom + txn->tw.dirtylist->length ==
(txn->parent ? txn->parent->tw.dirtyroom (txn->parent ? txn->parent->tw.dirtyroom : txn->env->options.dp_limit));
: txn->env->options.dp_limit));
/* Filter-out loose chain & dispose refunded pages. */ /* Filter-out loose chain & dispose refunded pages. */
unlink_loose: unlink_loose:
@ -188,18 +174,15 @@ static void refund_loose(MDBX_txn *txn) {
bool txn_refund(MDBX_txn *txn) { bool txn_refund(MDBX_txn *txn) {
const pgno_t before = txn->geo.first_unallocated; const pgno_t before = txn->geo.first_unallocated;
if (txn->tw.loose_pages && if (txn->tw.loose_pages && txn->tw.loose_refund_wl > txn->geo.first_unallocated)
txn->tw.loose_refund_wl > txn->geo.first_unallocated)
refund_loose(txn); refund_loose(txn);
while (true) { while (true) {
if (MDBX_PNL_GETSIZE(txn->tw.relist) == 0 || if (MDBX_PNL_GETSIZE(txn->tw.relist) == 0 || MDBX_PNL_MOST(txn->tw.relist) != txn->geo.first_unallocated - 1)
MDBX_PNL_MOST(txn->tw.relist) != txn->geo.first_unallocated - 1)
break; break;
refund_reclaimed(txn); refund_reclaimed(txn);
if (!txn->tw.loose_pages || if (!txn->tw.loose_pages || txn->tw.loose_refund_wl <= txn->geo.first_unallocated)
txn->tw.loose_refund_wl <= txn->geo.first_unallocated)
break; break;
const pgno_t memo = txn->geo.first_unallocated; const pgno_t memo = txn->geo.first_unallocated;

View File

@ -14,21 +14,21 @@
* Thanks to John M. Gamble for the http://pages.ripco.net/~jgamble/nw.html */ * Thanks to John M. Gamble for the http://pages.ripco.net/~jgamble/nw.html */
#if MDBX_HAVE_CMOV #if MDBX_HAVE_CMOV
#define SORT_CMP_SWAP(TYPE, CMP, a, b) \ #define SORT_CMP_SWAP(TYPE, CMP, a, b) \
do { \ do { \
const TYPE swap_tmp = (a); \ const TYPE swap_tmp = (a); \
const bool swap_cmp = expect_with_probability(CMP(swap_tmp, b), 0, .5); \ const bool swap_cmp = expect_with_probability(CMP(swap_tmp, b), 0, .5); \
(a) = swap_cmp ? swap_tmp : b; \ (a) = swap_cmp ? swap_tmp : b; \
(b) = swap_cmp ? b : swap_tmp; \ (b) = swap_cmp ? b : swap_tmp; \
} while (0) } while (0)
#else #else
#define SORT_CMP_SWAP(TYPE, CMP, a, b) \ #define SORT_CMP_SWAP(TYPE, CMP, a, b) \
do \ do \
if (expect_with_probability(!CMP(a, b), 0, .5)) { \ if (expect_with_probability(!CMP(a, b), 0, .5)) { \
const TYPE swap_tmp = (a); \ const TYPE swap_tmp = (a); \
(a) = (b); \ (a) = (b); \
(b) = swap_tmp; \ (b) = swap_tmp; \
} \ } \
while (0) while (0)
#endif #endif
@ -42,11 +42,11 @@
// [[1,2]] // [[1,2]]
// [[0,2]] // [[0,2]]
// [[0,1]] // [[0,1]]
#define SORT_NETWORK_3(TYPE, CMP, begin) \ #define SORT_NETWORK_3(TYPE, CMP, begin) \
do { \ do { \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
} while (0) } while (0)
// 5 comparators, 3 parallel operations // 5 comparators, 3 parallel operations
@ -61,13 +61,13 @@
// [[0,1],[2,3]] // [[0,1],[2,3]]
// [[0,2],[1,3]] // [[0,2],[1,3]]
// [[1,2]] // [[1,2]]
#define SORT_NETWORK_4(TYPE, CMP, begin) \ #define SORT_NETWORK_4(TYPE, CMP, begin) \
do { \ do { \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \
} while (0) } while (0)
// 9 comparators, 5 parallel operations // 9 comparators, 5 parallel operations
@ -86,17 +86,17 @@
// [[2,4],[0,1]] // [[2,4],[0,1]]
// [[2,3],[1,4]] // [[2,3],[1,4]]
// [[1,2],[3,4]] // [[1,2],[3,4]]
#define SORT_NETWORK_5(TYPE, CMP, begin) \ #define SORT_NETWORK_5(TYPE, CMP, begin) \
do { \ do { \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \
} while (0) } while (0)
// 12 comparators, 6 parallel operations // 12 comparators, 6 parallel operations
@ -118,20 +118,20 @@
// [[0,3],[1,4]] // [[0,3],[1,4]]
// [[2,4],[1,3]] // [[2,4],[1,3]]
// [[2,3]] // [[2,3]]
#define SORT_NETWORK_6(TYPE, CMP, begin) \ #define SORT_NETWORK_6(TYPE, CMP, begin) \
do { \ do { \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \
} while (0) } while (0)
// 16 comparators, 6 parallel operations // 16 comparators, 6 parallel operations
@ -155,24 +155,24 @@
// [[2,3],[4,5]] // [[2,3],[4,5]]
// [[1,4],[3,6]] // [[1,4],[3,6]]
// [[1,2],[3,4],[5,6]] // [[1,2],[3,4],[5,6]]
#define SORT_NETWORK_7(TYPE, CMP, begin) \ #define SORT_NETWORK_7(TYPE, CMP, begin) \
do { \ do { \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[6]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[6]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[6]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[5], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[5], begin[6]); \
} while (0) } while (0)
// 19 comparators, 6 parallel operations // 19 comparators, 6 parallel operations
@ -198,237 +198,236 @@
// [[2,3],[4,5]] // [[2,3],[4,5]]
// [[1,4],[3,6]] // [[1,4],[3,6]]
// [[1,2],[3,4],[5,6]] // [[1,2],[3,4],[5,6]]
#define SORT_NETWORK_8(TYPE, CMP, begin) \ #define SORT_NETWORK_8(TYPE, CMP, begin) \
do { \ do { \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[6]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[7]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[7]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[6]); \
SORT_CMP_SWAP(TYPE, CMP, begin[5], begin[7]); \ SORT_CMP_SWAP(TYPE, CMP, begin[5], begin[7]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
SORT_CMP_SWAP(TYPE, CMP, begin[6], begin[7]); \ SORT_CMP_SWAP(TYPE, CMP, begin[6], begin[7]); \
SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \ SORT_CMP_SWAP(TYPE, CMP, begin[2], begin[3]); \
SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[5]); \ SORT_CMP_SWAP(TYPE, CMP, begin[4], begin[5]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[6]); \
SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \ SORT_CMP_SWAP(TYPE, CMP, begin[1], begin[2]); \
SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \ SORT_CMP_SWAP(TYPE, CMP, begin[3], begin[4]); \
SORT_CMP_SWAP(TYPE, CMP, begin[5], begin[6]); \ SORT_CMP_SWAP(TYPE, CMP, begin[5], begin[6]); \
} while (0) } while (0)
#define SORT_INNER(TYPE, CMP, begin, end, len) \ #define SORT_INNER(TYPE, CMP, begin, end, len) \
switch (len) { \ switch (len) { \
default: \ default: \
assert(false); \ assert(false); \
__unreachable(); \ __unreachable(); \
case 0: \ case 0: \
case 1: \ case 1: \
break; \ break; \
case 2: \ case 2: \
SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \ SORT_CMP_SWAP(TYPE, CMP, begin[0], begin[1]); \
break; \ break; \
case 3: \ case 3: \
SORT_NETWORK_3(TYPE, CMP, begin); \ SORT_NETWORK_3(TYPE, CMP, begin); \
break; \ break; \
case 4: \ case 4: \
SORT_NETWORK_4(TYPE, CMP, begin); \ SORT_NETWORK_4(TYPE, CMP, begin); \
break; \ break; \
case 5: \ case 5: \
SORT_NETWORK_5(TYPE, CMP, begin); \ SORT_NETWORK_5(TYPE, CMP, begin); \
break; \ break; \
case 6: \ case 6: \
SORT_NETWORK_6(TYPE, CMP, begin); \ SORT_NETWORK_6(TYPE, CMP, begin); \
break; \ break; \
case 7: \ case 7: \
SORT_NETWORK_7(TYPE, CMP, begin); \ SORT_NETWORK_7(TYPE, CMP, begin); \
break; \ break; \
case 8: \ case 8: \
SORT_NETWORK_8(TYPE, CMP, begin); \ SORT_NETWORK_8(TYPE, CMP, begin); \
break; \ break; \
} }
#define SORT_SWAP(TYPE, a, b) \ #define SORT_SWAP(TYPE, a, b) \
do { \ do { \
const TYPE swap_tmp = (a); \ const TYPE swap_tmp = (a); \
(a) = (b); \ (a) = (b); \
(b) = swap_tmp; \ (b) = swap_tmp; \
} while (0) } while (0)
#define SORT_PUSH(low, high) \ #define SORT_PUSH(low, high) \
do { \ do { \
top->lo = (low); \ top->lo = (low); \
top->hi = (high); \ top->hi = (high); \
++top; \ ++top; \
} while (0) } while (0)
#define SORT_POP(low, high) \ #define SORT_POP(low, high) \
do { \ do { \
--top; \ --top; \
low = top->lo; \ low = top->lo; \
high = top->hi; \ high = top->hi; \
} while (0) } while (0)
#define SORT_IMPL(NAME, EXPECT_LOW_CARDINALITY_OR_PRESORTED, TYPE, CMP) \ #define SORT_IMPL(NAME, EXPECT_LOW_CARDINALITY_OR_PRESORTED, TYPE, CMP) \
\ \
static inline bool NAME##_is_sorted(const TYPE *first, const TYPE *last) { \ static inline bool NAME##_is_sorted(const TYPE *first, const TYPE *last) { \
while (++first <= last) \ while (++first <= last) \
if (expect_with_probability(CMP(first[0], first[-1]), 1, .1)) \ if (expect_with_probability(CMP(first[0], first[-1]), 1, .1)) \
return false; \ return false; \
return true; \ return true; \
} \ } \
\ \
typedef struct { \ typedef struct { \
TYPE *lo, *hi; \ TYPE *lo, *hi; \
} NAME##_stack; \ } NAME##_stack; \
\ \
__hot static void NAME(TYPE *const __restrict begin, \ __hot static void NAME(TYPE *const __restrict begin, TYPE *const __restrict end) { \
TYPE *const __restrict end) { \ NAME##_stack stack[sizeof(size_t) * CHAR_BIT], *__restrict top = stack; \
NAME##_stack stack[sizeof(size_t) * CHAR_BIT], *__restrict top = stack; \ \
\ TYPE *__restrict hi = end - 1; \
TYPE *__restrict hi = end - 1; \ TYPE *__restrict lo = begin; \
TYPE *__restrict lo = begin; \ while (true) { \
while (true) { \ const ptrdiff_t len = hi - lo; \
const ptrdiff_t len = hi - lo; \ if (len < 8) { \
if (len < 8) { \ SORT_INNER(TYPE, CMP, lo, hi + 1, len + 1); \
SORT_INNER(TYPE, CMP, lo, hi + 1, len + 1); \ if (unlikely(top == stack)) \
if (unlikely(top == stack)) \ break; \
break; \ SORT_POP(lo, hi); \
SORT_POP(lo, hi); \ continue; \
continue; \ } \
} \ \
\ TYPE *__restrict mid = lo + (len >> 1); \
TYPE *__restrict mid = lo + (len >> 1); \ SORT_CMP_SWAP(TYPE, CMP, *lo, *mid); \
SORT_CMP_SWAP(TYPE, CMP, *lo, *mid); \ SORT_CMP_SWAP(TYPE, CMP, *mid, *hi); \
SORT_CMP_SWAP(TYPE, CMP, *mid, *hi); \ SORT_CMP_SWAP(TYPE, CMP, *lo, *mid); \
SORT_CMP_SWAP(TYPE, CMP, *lo, *mid); \ \
\ TYPE *right = hi - 1; \
TYPE *right = hi - 1; \ TYPE *left = lo + 1; \
TYPE *left = lo + 1; \ while (1) { \
while (1) { \ while (expect_with_probability(CMP(*left, *mid), 0, .5)) \
while (expect_with_probability(CMP(*left, *mid), 0, .5)) \ ++left; \
++left; \ while (expect_with_probability(CMP(*mid, *right), 0, .5)) \
while (expect_with_probability(CMP(*mid, *right), 0, .5)) \ --right; \
--right; \ if (unlikely(left > right)) { \
if (unlikely(left > right)) { \ if (EXPECT_LOW_CARDINALITY_OR_PRESORTED) { \
if (EXPECT_LOW_CARDINALITY_OR_PRESORTED) { \ if (NAME##_is_sorted(lo, right)) \
if (NAME##_is_sorted(lo, right)) \ lo = right + 1; \
lo = right + 1; \ if (NAME##_is_sorted(left, hi)) \
if (NAME##_is_sorted(left, hi)) \ hi = left; \
hi = left; \ } \
} \ break; \
break; \ } \
} \ SORT_SWAP(TYPE, *left, *right); \
SORT_SWAP(TYPE, *left, *right); \ mid = (mid == left) ? right : (mid == right) ? left : mid; \
mid = (mid == left) ? right : (mid == right) ? left : mid; \ ++left; \
++left; \ --right; \
--right; \ } \
} \ \
\ if (right - lo > hi - left) { \
if (right - lo > hi - left) { \ SORT_PUSH(lo, right); \
SORT_PUSH(lo, right); \ lo = left; \
lo = left; \ } else { \
} else { \ SORT_PUSH(left, hi); \
SORT_PUSH(left, hi); \ hi = right; \
hi = right; \ } \
} \ } \
} \ \
\ if (AUDIT_ENABLED()) { \
if (AUDIT_ENABLED()) { \ for (TYPE *scan = begin + 1; scan < end; ++scan) \
for (TYPE *scan = begin + 1; scan < end; ++scan) \ assert(CMP(scan[-1], scan[0])); \
assert(CMP(scan[-1], scan[0])); \ } \
} \
} }
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
* LY: radix sort for large chunks */ * LY: radix sort for large chunks */
#define RADIXSORT_IMPL(NAME, TYPE, EXTRACT_KEY, BUFFER_PREALLOCATED, END_GAP) \ #define RADIXSORT_IMPL(NAME, TYPE, EXTRACT_KEY, BUFFER_PREALLOCATED, END_GAP) \
\ \
__hot static bool NAME##_radixsort(TYPE *const begin, const size_t length) { \ __hot static bool NAME##_radixsort(TYPE *const begin, const size_t length) { \
TYPE *tmp; \ TYPE *tmp; \
if (BUFFER_PREALLOCATED) { \ if (BUFFER_PREALLOCATED) { \
tmp = begin + length + END_GAP; \ tmp = begin + length + END_GAP; \
/* memset(tmp, 0xDeadBeef, sizeof(TYPE) * length); */ \ /* memset(tmp, 0xDeadBeef, sizeof(TYPE) * length); */ \
} else { \ } else { \
tmp = osal_malloc(sizeof(TYPE) * length); \ tmp = osal_malloc(sizeof(TYPE) * length); \
if (unlikely(!tmp)) \ if (unlikely(!tmp)) \
return false; \ return false; \
} \ } \
\ \
size_t key_shift = 0, key_diff_mask; \ size_t key_shift = 0, key_diff_mask; \
do { \ do { \
struct { \ struct { \
pgno_t a[256], b[256]; \ pgno_t a[256], b[256]; \
} counters; \ } counters; \
memset(&counters, 0, sizeof(counters)); \ memset(&counters, 0, sizeof(counters)); \
\ \
key_diff_mask = 0; \ key_diff_mask = 0; \
size_t prev_key = EXTRACT_KEY(begin) >> key_shift; \ size_t prev_key = EXTRACT_KEY(begin) >> key_shift; \
TYPE *r = begin, *end = begin + length; \ TYPE *r = begin, *end = begin + length; \
do { \ do { \
const size_t key = EXTRACT_KEY(r) >> key_shift; \ const size_t key = EXTRACT_KEY(r) >> key_shift; \
counters.a[key & 255]++; \ counters.a[key & 255]++; \
counters.b[(key >> 8) & 255]++; \ counters.b[(key >> 8) & 255]++; \
key_diff_mask |= prev_key ^ key; \ key_diff_mask |= prev_key ^ key; \
prev_key = key; \ prev_key = key; \
} while (++r != end); \ } while (++r != end); \
\ \
pgno_t ta = 0, tb = 0; \ pgno_t ta = 0, tb = 0; \
for (size_t i = 0; i < 256; ++i) { \ for (size_t i = 0; i < 256; ++i) { \
const pgno_t ia = counters.a[i]; \ const pgno_t ia = counters.a[i]; \
counters.a[i] = ta; \ counters.a[i] = ta; \
ta += ia; \ ta += ia; \
const pgno_t ib = counters.b[i]; \ const pgno_t ib = counters.b[i]; \
counters.b[i] = tb; \ counters.b[i] = tb; \
tb += ib; \ tb += ib; \
} \ } \
\ \
r = begin; \ r = begin; \
do { \ do { \
const size_t key = EXTRACT_KEY(r) >> key_shift; \ const size_t key = EXTRACT_KEY(r) >> key_shift; \
tmp[counters.a[key & 255]++] = *r; \ tmp[counters.a[key & 255]++] = *r; \
} while (++r != end); \ } while (++r != end); \
\ \
if (unlikely(key_diff_mask < 256)) { \ if (unlikely(key_diff_mask < 256)) { \
memcpy(begin, tmp, ptr_dist(end, begin)); \ memcpy(begin, tmp, ptr_dist(end, begin)); \
break; \ break; \
} \ } \
end = (r = tmp) + length; \ end = (r = tmp) + length; \
do { \ do { \
const size_t key = EXTRACT_KEY(r) >> key_shift; \ const size_t key = EXTRACT_KEY(r) >> key_shift; \
begin[counters.b[(key >> 8) & 255]++] = *r; \ begin[counters.b[(key >> 8) & 255]++] = *r; \
} while (++r != end); \ } while (++r != end); \
\ \
key_shift += 16; \ key_shift += 16; \
} while (key_diff_mask >> 16); \ } while (key_diff_mask >> 16); \
\ \
if (!(BUFFER_PREALLOCATED)) \ if (!(BUFFER_PREALLOCATED)) \
osal_free(tmp); \ osal_free(tmp); \
return true; \ return true; \
} }
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
* LY: Binary search */ * LY: Binary search */
#if defined(__clang__) && __clang_major__ > 4 && defined(__ia32__) #if defined(__clang__) && __clang_major__ > 4 && defined(__ia32__)
#define WORKAROUND_FOR_CLANG_OPTIMIZER_BUG(size, flag) \ #define WORKAROUND_FOR_CLANG_OPTIMIZER_BUG(size, flag) \
do \ do \
__asm __volatile("" \ __asm __volatile("" \
: "+r"(size) \ : "+r"(size) \
: "r" /* the `b` constraint is more suitable here, but \ : "r" /* the `b` constraint is more suitable here, but \
cause CLANG to allocate and push/pop an one more \ cause CLANG to allocate and push/pop an one more \
register, so using the `r` which avoids this. */ \ register, so using the `r` which avoids this. */ \
(flag)); \ (flag)); \
while (0) while (0)
#else #else
#define WORKAROUND_FOR_CLANG_OPTIMIZER_BUG(size, flag) \ #define WORKAROUND_FOR_CLANG_OPTIMIZER_BUG(size, flag) \
do { \ do { \
/* nope for non-clang or non-x86 */; \ /* nope for non-clang or non-x86 */; \
} while (0) } while (0)
#endif /* Workaround for CLANG */ #endif /* Workaround for CLANG */

View File

@ -4,33 +4,25 @@
#include "internals.h" #include "internals.h"
void spill_remove(MDBX_txn *txn, size_t idx, size_t npages) { void spill_remove(MDBX_txn *txn, size_t idx, size_t npages) {
tASSERT(txn, idx > 0 && idx <= MDBX_PNL_GETSIZE(txn->tw.spilled.list) && tASSERT(txn, idx > 0 && idx <= MDBX_PNL_GETSIZE(txn->tw.spilled.list) && txn->tw.spilled.least_removed > 0);
txn->tw.spilled.least_removed > 0); txn->tw.spilled.least_removed = (idx < txn->tw.spilled.least_removed) ? idx : txn->tw.spilled.least_removed;
txn->tw.spilled.least_removed = (idx < txn->tw.spilled.least_removed)
? idx
: txn->tw.spilled.least_removed;
txn->tw.spilled.list[idx] |= 1; txn->tw.spilled.list[idx] |= 1;
MDBX_PNL_SETSIZE(txn->tw.spilled.list, MDBX_PNL_SETSIZE(txn->tw.spilled.list,
MDBX_PNL_GETSIZE(txn->tw.spilled.list) - MDBX_PNL_GETSIZE(txn->tw.spilled.list) - (idx == MDBX_PNL_GETSIZE(txn->tw.spilled.list)));
(idx == MDBX_PNL_GETSIZE(txn->tw.spilled.list)));
while (unlikely(npages > 1)) { while (unlikely(npages > 1)) {
const pgno_t pgno = (txn->tw.spilled.list[idx] >> 1) + 1; const pgno_t pgno = (txn->tw.spilled.list[idx] >> 1) + 1;
if (MDBX_PNL_ASCENDING) { if (MDBX_PNL_ASCENDING) {
if (++idx > MDBX_PNL_GETSIZE(txn->tw.spilled.list) || if (++idx > MDBX_PNL_GETSIZE(txn->tw.spilled.list) || (txn->tw.spilled.list[idx] >> 1) != pgno)
(txn->tw.spilled.list[idx] >> 1) != pgno)
return; return;
} else { } else {
if (--idx < 1 || (txn->tw.spilled.list[idx] >> 1) != pgno) if (--idx < 1 || (txn->tw.spilled.list[idx] >> 1) != pgno)
return; return;
txn->tw.spilled.least_removed = (idx < txn->tw.spilled.least_removed) txn->tw.spilled.least_removed = (idx < txn->tw.spilled.least_removed) ? idx : txn->tw.spilled.least_removed;
? idx
: txn->tw.spilled.least_removed;
} }
txn->tw.spilled.list[idx] |= 1; txn->tw.spilled.list[idx] |= 1;
MDBX_PNL_SETSIZE(txn->tw.spilled.list, MDBX_PNL_SETSIZE(txn->tw.spilled.list,
MDBX_PNL_GETSIZE(txn->tw.spilled.list) - MDBX_PNL_GETSIZE(txn->tw.spilled.list) - (idx == MDBX_PNL_GETSIZE(txn->tw.spilled.list)));
(idx == MDBX_PNL_GETSIZE(txn->tw.spilled.list)));
--npages; --npages;
} }
} }
@ -57,8 +49,7 @@ pnl_t spill_purge(MDBX_txn *txn) {
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
static int spill_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp, static int spill_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp, const size_t npages) {
const size_t npages) {
tASSERT(txn, !(txn->flags & MDBX_WRITEMAP)); tASSERT(txn, !(txn->flags & MDBX_WRITEMAP));
#if MDBX_ENABLE_PGOP_STAT #if MDBX_ENABLE_PGOP_STAT
txn->env->lck->pgops.spill.weak += npages; txn->env->lck->pgops.spill.weak += npages;
@ -72,8 +63,7 @@ static int spill_page(MDBX_txn *txn, iov_ctx_t *ctx, page_t *dp,
/* Set unspillable LRU-label for dirty pages watched by txn. /* Set unspillable LRU-label for dirty pages watched by txn.
* Returns the number of pages marked as unspillable. */ * Returns the number of pages marked as unspillable. */
static size_t spill_cursor_keep(const MDBX_txn *const txn, static size_t spill_cursor_keep(const MDBX_txn *const txn, const MDBX_cursor *mc) {
const MDBX_cursor *mc) {
tASSERT(txn, (txn->flags & (MDBX_TXN_RDONLY | MDBX_WRITEMAP)) == 0); tASSERT(txn, (txn->flags & (MDBX_TXN_RDONLY | MDBX_WRITEMAP)) == 0);
size_t keep = 0; size_t keep = 0;
while (!is_poor(mc)) { while (!is_poor(mc)) {
@ -87,8 +77,7 @@ static size_t spill_cursor_keep(const MDBX_txn *const txn,
size_t const n = dpl_search(txn, mp->pgno); size_t const n = dpl_search(txn, mp->pgno);
if (txn->tw.dirtylist->items[n].pgno == mp->pgno && if (txn->tw.dirtylist->items[n].pgno == mp->pgno &&
/* не считаем дважды */ dpl_age(txn, n)) { /* не считаем дважды */ dpl_age(txn, n)) {
size_t *const ptr = ptr_disp(txn->tw.dirtylist->items[n].ptr, size_t *const ptr = ptr_disp(txn->tw.dirtylist->items[n].ptr, -(ptrdiff_t)sizeof(size_t));
-(ptrdiff_t)sizeof(size_t));
*ptr = txn->tw.dirtylru; *ptr = txn->tw.dirtylru;
tASSERT(txn, dpl_age(txn, n) == 0); tASSERT(txn, dpl_age(txn, n) == 0);
++keep; ++keep;
@ -112,8 +101,7 @@ static size_t spill_txn_keep(MDBX_txn *txn, MDBX_cursor *m0) {
size_t keep = m0 ? spill_cursor_keep(txn, m0) : 0; size_t keep = m0 ? spill_cursor_keep(txn, m0) : 0;
TXN_FOREACH_DBI_ALL(txn, dbi) { TXN_FOREACH_DBI_ALL(txn, dbi) {
if (F_ISSET(txn->dbi_state[dbi], DBI_DIRTY | DBI_VALID) && if (F_ISSET(txn->dbi_state[dbi], DBI_DIRTY | DBI_VALID) && txn->dbs[dbi].root != P_INVALID)
txn->dbs[dbi].root != P_INVALID)
for (MDBX_cursor *mc = txn->cursors[dbi]; mc; mc = mc->next) for (MDBX_cursor *mc = txn->cursors[dbi]; mc; mc = mc->next)
if (mc != m0) if (mc != m0)
keep += spill_cursor_keep(txn, mc); keep += spill_cursor_keep(txn, mc);
@ -126,8 +114,7 @@ static size_t spill_txn_keep(MDBX_txn *txn, MDBX_cursor *m0) {
* 0 = should be spilled; * 0 = should be spilled;
* ... * ...
* > 255 = must not be spilled. */ * > 255 = must not be spilled. */
MDBX_NOTHROW_PURE_FUNCTION static unsigned MDBX_NOTHROW_PURE_FUNCTION static unsigned spill_prio(const MDBX_txn *txn, const size_t i, const uint32_t reciprocal) {
spill_prio(const MDBX_txn *txn, const size_t i, const uint32_t reciprocal) {
dpl_t *const dl = txn->tw.dirtylist; dpl_t *const dl = txn->tw.dirtylist;
const uint32_t age = dpl_age(txn, i); const uint32_t age = dpl_age(txn, i);
const size_t npages = dpl_npages(dl, i); const size_t npages = dpl_npages(dl, i);
@ -139,8 +126,7 @@ spill_prio(const MDBX_txn *txn, const size_t i, const uint32_t reciprocal) {
page_t *const dp = dl->items[i].ptr; page_t *const dp = dl->items[i].ptr;
if (dp->flags & (P_LOOSE | P_SPILLED)) { if (dp->flags & (P_LOOSE | P_SPILLED)) {
DEBUG("skip %s %zu page %" PRIaPGNO, DEBUG("skip %s %zu page %" PRIaPGNO, (dp->flags & P_LOOSE) ? "loose" : "parent-spilled", npages, pgno);
(dp->flags & P_LOOSE) ? "loose" : "parent-spilled", npages, pgno);
return 256; return 256;
} }
@ -175,67 +161,49 @@ spill_prio(const MDBX_txn *txn, const size_t i, const uint32_t reciprocal) {
return prio = (unsigned)factor; return prio = (unsigned)factor;
} }
static size_t spill_gate(const MDBX_env *env, intptr_t part, static size_t spill_gate(const MDBX_env *env, intptr_t part, const size_t total) {
const size_t total) { const intptr_t spill_min = env->options.spill_min_denominator
const intptr_t spill_min = ? (total + env->options.spill_min_denominator - 1) / env->options.spill_min_denominator
env->options.spill_min_denominator : 1;
? (total + env->options.spill_min_denominator - 1) /
env->options.spill_min_denominator
: 1;
const intptr_t spill_max = const intptr_t spill_max =
total - (env->options.spill_max_denominator total - (env->options.spill_max_denominator ? total / env->options.spill_max_denominator : 0);
? total / env->options.spill_max_denominator
: 0);
part = (part < spill_max) ? part : spill_max; part = (part < spill_max) ? part : spill_max;
part = (part > spill_min) ? part : spill_min; part = (part > spill_min) ? part : spill_min;
eASSERT(env, part >= 0 && (size_t)part <= total); eASSERT(env, part >= 0 && (size_t)part <= total);
return (size_t)part; return (size_t)part;
} }
__cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0, __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0, const intptr_t wanna_spill_entries,
const intptr_t wanna_spill_entries, const intptr_t wanna_spill_npages, const size_t need) {
const intptr_t wanna_spill_npages,
const size_t need) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
int rc = MDBX_SUCCESS; int rc = MDBX_SUCCESS;
if (unlikely(txn->tw.loose_count >= if (unlikely(txn->tw.loose_count >=
(txn->tw.dirtylist ? txn->tw.dirtylist->pages_including_loose (txn->tw.dirtylist ? txn->tw.dirtylist->pages_including_loose : txn->tw.writemap_dirty_npages)))
: txn->tw.writemap_dirty_npages)))
goto done; goto done;
const size_t dirty_entries = const size_t dirty_entries = txn->tw.dirtylist ? (txn->tw.dirtylist->length - txn->tw.loose_count) : 1;
txn->tw.dirtylist ? (txn->tw.dirtylist->length - txn->tw.loose_count) : 1;
const size_t dirty_npages = const size_t dirty_npages =
(txn->tw.dirtylist ? txn->tw.dirtylist->pages_including_loose (txn->tw.dirtylist ? txn->tw.dirtylist->pages_including_loose : txn->tw.writemap_dirty_npages) -
: txn->tw.writemap_dirty_npages) -
txn->tw.loose_count; txn->tw.loose_count;
const size_t need_spill_entries = const size_t need_spill_entries = spill_gate(txn->env, wanna_spill_entries, dirty_entries);
spill_gate(txn->env, wanna_spill_entries, dirty_entries); const size_t need_spill_npages = spill_gate(txn->env, wanna_spill_npages, dirty_npages);
const size_t need_spill_npages =
spill_gate(txn->env, wanna_spill_npages, dirty_npages);
const size_t need_spill = (need_spill_entries > need_spill_npages) const size_t need_spill = (need_spill_entries > need_spill_npages) ? need_spill_entries : need_spill_npages;
? need_spill_entries
: need_spill_npages;
if (!need_spill) if (!need_spill)
goto done; goto done;
if (txn->flags & MDBX_WRITEMAP) { if (txn->flags & MDBX_WRITEMAP) {
NOTICE("%s-spilling %zu dirty-entries, %zu dirty-npages", "msync", NOTICE("%s-spilling %zu dirty-entries, %zu dirty-npages", "msync", dirty_entries, dirty_npages);
dirty_entries, dirty_npages);
const MDBX_env *env = txn->env; const MDBX_env *env = txn->env;
tASSERT(txn, txn->tw.spilled.list == nullptr); tASSERT(txn, txn->tw.spilled.list == nullptr);
rc = osal_msync(&txn->env->dxb_mmap, 0, rc = osal_msync(&txn->env->dxb_mmap, 0, pgno_align2os_bytes(env, txn->geo.first_unallocated), MDBX_SYNC_KICK);
pgno_align2os_bytes(env, txn->geo.first_unallocated),
MDBX_SYNC_KICK);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
#if MDBX_AVOID_MSYNC #if MDBX_AVOID_MSYNC
MDBX_ANALYSIS_ASSUME(txn->tw.dirtylist != nullptr); MDBX_ANALYSIS_ASSUME(txn->tw.dirtylist != nullptr);
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
env->lck->unsynced_pages.weak += env->lck->unsynced_pages.weak += txn->tw.dirtylist->pages_including_loose - txn->tw.loose_count;
txn->tw.dirtylist->pages_including_loose - txn->tw.loose_count;
dpl_clear(txn->tw.dirtylist); dpl_clear(txn->tw.dirtylist);
txn->tw.dirtyroom = env->options.dp_limit - txn->tw.loose_count; txn->tw.dirtyroom = env->options.dp_limit - txn->tw.loose_count;
for (page_t *lp = txn->tw.loose_pages; lp != nullptr; lp = page_next(lp)) { for (page_t *lp = txn->tw.loose_pages; lp != nullptr; lp = page_next(lp)) {
@ -256,12 +224,10 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
goto done; goto done;
} }
NOTICE("%s-spilling %zu dirty-entries, %zu dirty-npages", "write", NOTICE("%s-spilling %zu dirty-entries, %zu dirty-npages", "write", need_spill_entries, need_spill_npages);
need_spill_entries, need_spill_npages);
MDBX_ANALYSIS_ASSUME(txn->tw.dirtylist != nullptr); MDBX_ANALYSIS_ASSUME(txn->tw.dirtylist != nullptr);
tASSERT(txn, txn->tw.dirtylist->length - txn->tw.loose_count >= 1); tASSERT(txn, txn->tw.dirtylist->length - txn->tw.loose_count >= 1);
tASSERT(txn, txn->tw.dirtylist->pages_including_loose - txn->tw.loose_count >= tASSERT(txn, txn->tw.dirtylist->pages_including_loose - txn->tw.loose_count >= need_spill_npages);
need_spill_npages);
if (!txn->tw.spilled.list) { if (!txn->tw.spilled.list) {
txn->tw.spilled.least_removed = INT_MAX; txn->tw.spilled.least_removed = INT_MAX;
txn->tw.spilled.list = pnl_alloc(need_spill); txn->tw.spilled.list = pnl_alloc(need_spill);
@ -338,10 +304,8 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
for (size_t i = 1; i <= dl->length; ++i) { for (size_t i = 1; i <= dl->length; ++i) {
const unsigned prio = spill_prio(txn, i, reciprocal); const unsigned prio = spill_prio(txn, i, reciprocal);
size_t *const ptr = ptr_disp(dl->items[i].ptr, -(ptrdiff_t)sizeof(size_t)); size_t *const ptr = ptr_disp(dl->items[i].ptr, -(ptrdiff_t)sizeof(size_t));
TRACE("page %" PRIaPGNO TRACE("page %" PRIaPGNO ", lru %zu, is_multi %c, npages %u, age %u of %u, prio %u", dl->items[i].pgno, *ptr,
", lru %zu, is_multi %c, npages %u, age %u of %u, prio %u", (dl->items[i].npages > 1) ? 'Y' : 'N', dpl_npages(dl, i), dpl_age(txn, i), age_max, prio);
dl->items[i].pgno, *ptr, (dl->items[i].npages > 1) ? 'Y' : 'N',
dpl_npages(dl, i), dpl_age(txn, i), age_max, prio);
if (prio < 256) { if (prio < 256) {
radix_entries[prio] += 1; radix_entries[prio] += 1;
spillable_entries += 1; spillable_entries += 1;
@ -354,20 +318,16 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
tASSERT(txn, spillable_npages >= spillable_entries); tASSERT(txn, spillable_npages >= spillable_entries);
pgno_t spilled_entries = 0, spilled_npages = 0; pgno_t spilled_entries = 0, spilled_npages = 0;
if (likely(spillable_entries > 0)) { if (likely(spillable_entries > 0)) {
size_t prio2spill = 0, prio2adjacent = 128, size_t prio2spill = 0, prio2adjacent = 128, amount_entries = radix_entries[0], amount_npages = radix_npages[0];
amount_entries = radix_entries[0], amount_npages = radix_npages[0];
for (size_t i = 1; i < 256; i++) { for (size_t i = 1; i < 256; i++) {
if (amount_entries < need_spill_entries || if (amount_entries < need_spill_entries || amount_npages < need_spill_npages) {
amount_npages < need_spill_npages) {
prio2spill = i; prio2spill = i;
prio2adjacent = i + (257 - i) / 2; prio2adjacent = i + (257 - i) / 2;
amount_entries += radix_entries[i]; amount_entries += radix_entries[i];
amount_npages += radix_npages[i]; amount_npages += radix_npages[i];
} else if (amount_entries + amount_entries < } else if (amount_entries + amount_entries < spillable_entries + need_spill_entries
spillable_entries + need_spill_entries
/* РАВНОЗНАЧНО: amount - need_spill < spillable - amount */ /* РАВНОЗНАЧНО: amount - need_spill < spillable - amount */
|| amount_npages + amount_npages < || amount_npages + amount_npages < spillable_npages + need_spill_npages) {
spillable_npages + need_spill_npages) {
prio2adjacent = i; prio2adjacent = i;
amount_entries += radix_entries[i]; amount_entries += radix_entries[i];
amount_npages += radix_npages[i]; amount_npages += radix_npages[i];
@ -377,44 +337,38 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
VERBOSE("prio2spill %zu, prio2adjacent %zu, spillable %zu/%zu," VERBOSE("prio2spill %zu, prio2adjacent %zu, spillable %zu/%zu,"
" wanna-spill %zu/%zu, amount %zu/%zu", " wanna-spill %zu/%zu, amount %zu/%zu",
prio2spill, prio2adjacent, spillable_entries, spillable_npages, prio2spill, prio2adjacent, spillable_entries, spillable_npages, need_spill_entries, need_spill_npages,
need_spill_entries, need_spill_npages, amount_entries, amount_entries, amount_npages);
amount_npages);
tASSERT(txn, prio2spill < prio2adjacent && prio2adjacent <= 256); tASSERT(txn, prio2spill < prio2adjacent && prio2adjacent <= 256);
iov_ctx_t ctx; iov_ctx_t ctx;
rc = iov_init( rc = iov_init(txn, &ctx, amount_entries, amount_npages,
txn, &ctx, amount_entries, amount_npages,
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
txn->env->ioring.overlapped_fd ? txn->env->ioring.overlapped_fd : txn->env->ioring.overlapped_fd ? txn->env->ioring.overlapped_fd :
#endif #endif
txn->env->lazy_fd, txn->env->lazy_fd,
true); true);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
size_t r = 0, w = 0; size_t r = 0, w = 0;
pgno_t last = 0; pgno_t last = 0;
while (r < dl->length && (spilled_entries < need_spill_entries || while (r < dl->length && (spilled_entries < need_spill_entries || spilled_npages < need_spill_npages)) {
spilled_npages < need_spill_npages)) {
dl->items[++w] = dl->items[++r]; dl->items[++w] = dl->items[++r];
unsigned prio = spill_prio(txn, w, reciprocal); unsigned prio = spill_prio(txn, w, reciprocal);
if (prio > prio2spill && if (prio > prio2spill && (prio >= prio2adjacent || last != dl->items[w].pgno))
(prio >= prio2adjacent || last != dl->items[w].pgno))
continue; continue;
const size_t e = w; const size_t e = w;
last = dpl_endpgno(dl, w); last = dpl_endpgno(dl, w);
while (--w && dpl_endpgno(dl, w) == dl->items[w + 1].pgno && while (--w && dpl_endpgno(dl, w) == dl->items[w + 1].pgno && spill_prio(txn, w, reciprocal) < prio2adjacent)
spill_prio(txn, w, reciprocal) < prio2adjacent)
; ;
for (size_t i = w; ++i <= e;) { for (size_t i = w; ++i <= e;) {
const unsigned npages = dpl_npages(dl, i); const unsigned npages = dpl_npages(dl, i);
prio = spill_prio(txn, i, reciprocal); prio = spill_prio(txn, i, reciprocal);
DEBUG("%sspill[%zu] %u page %" PRIaPGNO " (age %d, prio %u)", DEBUG("%sspill[%zu] %u page %" PRIaPGNO " (age %d, prio %u)", (prio > prio2spill) ? "co-" : "", i, npages,
(prio > prio2spill) ? "co-" : "", i, npages, dl->items[i].pgno, dl->items[i].pgno, dpl_age(txn, i), prio);
dpl_age(txn, i), prio);
tASSERT(txn, prio < 256); tASSERT(txn, prio < 256);
++spilled_entries; ++spilled_entries;
spilled_npages += npages; spilled_npages += npages;
@ -424,8 +378,7 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
} }
} }
VERBOSE("spilled entries %u, spilled npages %u", spilled_entries, VERBOSE("spilled entries %u, spilled npages %u", spilled_entries, spilled_npages);
spilled_npages);
tASSERT(txn, spillable_entries == 0 || spilled_entries > 0); tASSERT(txn, spillable_entries == 0 || spilled_entries > 0);
tASSERT(txn, spilled_npages >= spilled_entries); tASSERT(txn, spilled_npages >= spilled_entries);
@ -449,16 +402,14 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
txn->env->lck->unsynced_pages.weak += spilled_npages; txn->env->lck->unsynced_pages.weak += spilled_npages;
pnl_sort(txn->tw.spilled.list, (size_t)txn->geo.first_unallocated << 1); pnl_sort(txn->tw.spilled.list, (size_t)txn->geo.first_unallocated << 1);
txn->flags |= MDBX_TXN_SPILLS; txn->flags |= MDBX_TXN_SPILLS;
NOTICE("spilled %u dirty-entries, %u dirty-npages, now have %zu dirty-room", NOTICE("spilled %u dirty-entries, %u dirty-npages, now have %zu dirty-room", spilled_entries, spilled_npages,
spilled_entries, spilled_npages, txn->tw.dirtyroom); txn->tw.dirtyroom);
} else { } else {
tASSERT(txn, rc == MDBX_SUCCESS); tASSERT(txn, rc == MDBX_SUCCESS);
for (size_t i = 1; i <= dl->length; ++i) { for (size_t i = 1; i <= dl->length; ++i) {
page_t *dp = dl->items[i].ptr; page_t *dp = dl->items[i].ptr;
VERBOSE( VERBOSE("unspillable[%zu]: pgno %u, npages %u, flags 0x%04X, age %u, prio %u", i, dp->pgno, dpl_npages(dl, i),
"unspillable[%zu]: pgno %u, npages %u, flags 0x%04X, age %u, prio %u", dp->flags, dpl_age(txn, i), spill_prio(txn, i, reciprocal));
i, dp->pgno, dpl_npages(dl, i), dp->flags, dpl_age(txn, i),
spill_prio(txn, i, reciprocal));
} }
} }
@ -468,17 +419,13 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0,
"needed %zu, spillable %zu; " "needed %zu, spillable %zu; "
"spilled %u dirty-entries, now have %zu dirty-room", "spilled %u dirty-entries, now have %zu dirty-room",
dl->length + spilled_entries, dl->length, dl->length + spilled_entries, dl->length,
(txn->parent && txn->parent->tw.dirtylist) (txn->parent && txn->parent->tw.dirtylist) ? (intptr_t)txn->parent->tw.dirtylist->length : -1,
? (intptr_t)txn->parent->tw.dirtylist->length txn->tw.loose_count, need, spillable_entries, spilled_entries, txn->tw.dirtyroom);
: -1,
txn->tw.loose_count, need, spillable_entries, spilled_entries,
txn->tw.dirtyroom);
ENSURE(txn->env, txn->tw.loose_count + txn->tw.dirtyroom > need / 2); ENSURE(txn->env, txn->tw.loose_count + txn->tw.dirtyroom > need / 2);
#endif /* xMDBX_DEBUG_SPILLING */ #endif /* xMDBX_DEBUG_SPILLING */
done: done:
return likely(txn->tw.dirtyroom + txn->tw.loose_count > return likely(txn->tw.dirtyroom + txn->tw.loose_count > ((need > CURSOR_STACK_SIZE) ? CURSOR_STACK_SIZE : need))
((need > CURSOR_STACK_SIZE) ? CURSOR_STACK_SIZE : need))
? MDBX_SUCCESS ? MDBX_SUCCESS
: MDBX_TXN_FULL; : MDBX_TXN_FULL;
} }

View File

@ -7,10 +7,8 @@
MDBX_INTERNAL void spill_remove(MDBX_txn *txn, size_t idx, size_t npages); MDBX_INTERNAL void spill_remove(MDBX_txn *txn, size_t idx, size_t npages);
MDBX_INTERNAL pnl_t spill_purge(MDBX_txn *txn); MDBX_INTERNAL pnl_t spill_purge(MDBX_txn *txn);
MDBX_INTERNAL int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0, MDBX_INTERNAL int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0, const intptr_t wanna_spill_entries,
const intptr_t wanna_spill_entries, const intptr_t wanna_spill_npages, const size_t need);
const intptr_t wanna_spill_npages,
const size_t need);
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
static inline size_t spill_search(const MDBX_txn *txn, pgno_t pgno) { static inline size_t spill_search(const MDBX_txn *txn, pgno_t pgno) {
@ -23,8 +21,7 @@ static inline size_t spill_search(const MDBX_txn *txn, pgno_t pgno) {
return (n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] == pgno) ? n : 0; return (n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] == pgno) ? n : 0;
} }
static inline bool spill_intersect(const MDBX_txn *txn, pgno_t pgno, static inline bool spill_intersect(const MDBX_txn *txn, pgno_t pgno, size_t npages) {
size_t npages) {
const pnl_t pnl = txn->tw.spilled.list; const pnl_t pnl = txn->tw.spilled.list;
if (likely(!pnl)) if (likely(!pnl))
return false; return false;
@ -32,23 +29,18 @@ static inline bool spill_intersect(const MDBX_txn *txn, pgno_t pgno,
if (LOG_ENABLED(MDBX_LOG_EXTRA)) { if (LOG_ENABLED(MDBX_LOG_EXTRA)) {
DEBUG_EXTRA("PNL len %zu [", len); DEBUG_EXTRA("PNL len %zu [", len);
for (size_t i = 1; i <= len; ++i) for (size_t i = 1; i <= len; ++i)
DEBUG_EXTRA_PRINT(" %li", (pnl[i] & 1) ? -(long)(pnl[i] >> 1) DEBUG_EXTRA_PRINT(" %li", (pnl[i] & 1) ? -(long)(pnl[i] >> 1) : (long)(pnl[i] >> 1));
: (long)(pnl[i] >> 1));
DEBUG_EXTRA_PRINT("%s\n", "]"); DEBUG_EXTRA_PRINT("%s\n", "]");
} }
const pgno_t spilled_range_begin = pgno << 1; const pgno_t spilled_range_begin = pgno << 1;
const pgno_t spilled_range_last = ((pgno + (pgno_t)npages) << 1) - 1; const pgno_t spilled_range_last = ((pgno + (pgno_t)npages) << 1) - 1;
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
const size_t n = const size_t n = pnl_search(pnl, spilled_range_begin, (size_t)(MAX_PAGENO + 1) << 1);
pnl_search(pnl, spilled_range_begin, (size_t)(MAX_PAGENO + 1) << 1); tASSERT(txn, n && (n == MDBX_PNL_GETSIZE(pnl) + 1 || spilled_range_begin <= pnl[n]));
tASSERT(txn, n && (n == MDBX_PNL_GETSIZE(pnl) + 1 ||
spilled_range_begin <= pnl[n]));
const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] <= spilled_range_last; const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] <= spilled_range_last;
#else #else
const size_t n = const size_t n = pnl_search(pnl, spilled_range_last, (size_t)MAX_PAGENO + MAX_PAGENO + 1);
pnl_search(pnl, spilled_range_last, (size_t)MAX_PAGENO + MAX_PAGENO + 1); tASSERT(txn, n && (n == MDBX_PNL_GETSIZE(pnl) + 1 || spilled_range_last >= pnl[n]));
tASSERT(txn, n && (n == MDBX_PNL_GETSIZE(pnl) + 1 ||
spilled_range_last >= pnl[n]));
const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] >= spilled_range_begin; const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] >= spilled_range_begin;
#endif #endif
if (ASSERT_ENABLED()) { if (ASSERT_ENABLED()) {
@ -60,17 +52,13 @@ static inline bool spill_intersect(const MDBX_txn *txn, pgno_t pgno,
return rc; return rc;
} }
static inline int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, static inline int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, const size_t need) {
const size_t need) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0); tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, !m0 || cursor_is_tracked(m0)); tASSERT(txn, !m0 || cursor_is_tracked(m0));
const intptr_t wanna_spill_entries = const intptr_t wanna_spill_entries = txn->tw.dirtylist ? (need - txn->tw.dirtyroom - txn->tw.loose_count) : 0;
txn->tw.dirtylist ? (need - txn->tw.dirtyroom - txn->tw.loose_count) : 0;
const intptr_t wanna_spill_npages = const intptr_t wanna_spill_npages =
need + need + (txn->tw.dirtylist ? txn->tw.dirtylist->pages_including_loose : txn->tw.writemap_dirty_npages) -
(txn->tw.dirtylist ? txn->tw.dirtylist->pages_including_loose
: txn->tw.writemap_dirty_npages) -
txn->tw.loose_count - txn->env->options.dp_limit; txn->tw.loose_count - txn->env->options.dp_limit;
/* production mode */ /* production mode */

View File

@ -19,11 +19,8 @@ int tbl_setup(const MDBX_env *env, kvx_t *const kvx, const tree_t *const db) {
kvx->clc.v.lmax = env_valsize_max(env, db->flags); kvx->clc.v.lmax = env_valsize_max(env, db->flags);
if ((db->flags & (MDBX_DUPFIXED | MDBX_INTEGERDUP)) != 0 && db->dupfix_size) { if ((db->flags & (MDBX_DUPFIXED | MDBX_INTEGERDUP)) != 0 && db->dupfix_size) {
if (!MDBX_DISABLE_VALIDATION && if (!MDBX_DISABLE_VALIDATION && unlikely(db->dupfix_size < kvx->clc.v.lmin || db->dupfix_size > kvx->clc.v.lmax)) {
unlikely(db->dupfix_size < kvx->clc.v.lmin || ERROR("db.dupfix_size (%u) <> min/max value-length (%zu/%zu)", db->dupfix_size, kvx->clc.v.lmin, kvx->clc.v.lmax);
db->dupfix_size > kvx->clc.v.lmax)) {
ERROR("db.dupfix_size (%u) <> min/max value-length (%zu/%zu)",
db->dupfix_size, kvx->clc.v.lmin, kvx->clc.v.lmax);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
kvx->clc.v.lmin = kvx->clc.v.lmax = db->dupfix_size; kvx->clc.v.lmin = kvx->clc.v.lmax = db->dupfix_size;
@ -41,10 +38,8 @@ int tbl_fetch(MDBX_txn *txn, size_t dbi) {
rc = tree_search(&couple.outer, &kvx->name, 0); rc = tree_search(&couple.outer, &kvx->name, 0);
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
bailout: bailout:
NOTICE("dbi %zu refs to inaccessible table `%*s` for txn %" PRIaTXN NOTICE("dbi %zu refs to inaccessible table `%*s` for txn %" PRIaTXN " (err %d)", dbi, (int)kvx->name.iov_len,
" (err %d)", (const char *)kvx->name.iov_base, txn->txnid, rc);
dbi, (int)kvx->name.iov_len, (const char *)kvx->name.iov_base,
txn->txnid, rc);
return (rc == MDBX_NOTFOUND) ? MDBX_BAD_DBI : rc; return (rc == MDBX_NOTFOUND) ? MDBX_BAD_DBI : rc;
} }
@ -55,21 +50,18 @@ int tbl_fetch(MDBX_txn *txn, size_t dbi) {
goto bailout; goto bailout;
} }
if (unlikely((node_flags(nsr.node) & (N_DUP | N_TREE)) != N_TREE)) { if (unlikely((node_flags(nsr.node) & (N_DUP | N_TREE)) != N_TREE)) {
NOTICE("dbi %zu refs to not a named table `%*s` for txn %" PRIaTXN " (%s)", NOTICE("dbi %zu refs to not a named table `%*s` for txn %" PRIaTXN " (%s)", dbi, (int)kvx->name.iov_len,
dbi, (int)kvx->name.iov_len, (const char *)kvx->name.iov_base, (const char *)kvx->name.iov_base, txn->txnid, "wrong flags");
txn->txnid, "wrong flags");
return MDBX_INCOMPATIBLE; /* not a named DB */ return MDBX_INCOMPATIBLE; /* not a named DB */
} }
rc = node_read(&couple.outer, nsr.node, &data, rc = node_read(&couple.outer, nsr.node, &data, couple.outer.pg[couple.outer.top]);
couple.outer.pg[couple.outer.top]);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
if (unlikely(data.iov_len != sizeof(tree_t))) { if (unlikely(data.iov_len != sizeof(tree_t))) {
NOTICE("dbi %zu refs to not a named table `%*s` for txn %" PRIaTXN " (%s)", NOTICE("dbi %zu refs to not a named table `%*s` for txn %" PRIaTXN " (%s)", dbi, (int)kvx->name.iov_len,
dbi, (int)kvx->name.iov_len, (const char *)kvx->name.iov_base, (const char *)kvx->name.iov_base, txn->txnid, "wrong rec-size");
txn->txnid, "wrong rec-size");
return MDBX_INCOMPATIBLE; /* not a named DB */ return MDBX_INCOMPATIBLE; /* not a named DB */
} }
@ -80,8 +72,8 @@ int tbl_fetch(MDBX_txn *txn, size_t dbi) {
if (unlikely((db->flags & DB_PERSISTENT_FLAGS) != flags)) { if (unlikely((db->flags & DB_PERSISTENT_FLAGS) != flags)) {
NOTICE("dbi %zu refs to the re-created table `%*s` for txn %" PRIaTXN NOTICE("dbi %zu refs to the re-created table `%*s` for txn %" PRIaTXN
" with different flags (present 0x%X != wanna 0x%X)", " with different flags (present 0x%X != wanna 0x%X)",
dbi, (int)kvx->name.iov_len, (const char *)kvx->name.iov_base, dbi, (int)kvx->name.iov_len, (const char *)kvx->name.iov_base, txn->txnid, db->flags & DB_PERSISTENT_FLAGS,
txn->txnid, db->flags & DB_PERSISTENT_FLAGS, flags); flags);
return MDBX_INCOMPATIBLE; return MDBX_INCOMPATIBLE;
} }
@ -90,8 +82,7 @@ int tbl_fetch(MDBX_txn *txn, size_t dbi) {
const txnid_t pp_txnid = couple.outer.pg[couple.outer.top]->txnid; const txnid_t pp_txnid = couple.outer.pg[couple.outer.top]->txnid;
tASSERT(txn, txn->front_txnid >= pp_txnid); tASSERT(txn, txn->front_txnid >= pp_txnid);
if (unlikely(db->mod_txnid > pp_txnid)) { if (unlikely(db->mod_txnid > pp_txnid)) {
ERROR("db.mod_txnid (%" PRIaTXN ") > page-txnid (%" PRIaTXN ")", ERROR("db.mod_txnid (%" PRIaTXN ") > page-txnid (%" PRIaTXN ")", db->mod_txnid, pp_txnid);
db->mod_txnid, pp_txnid);
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
#endif /* !MDBX_DISABLE_VALIDATION */ #endif /* !MDBX_DISABLE_VALIDATION */

204
src/tls.c
View File

@ -29,21 +29,17 @@ static int uniq_peek(const osal_mmap_t *pending, osal_mmap_t *scan) {
bait = 0 /* hush MSVC warning */; bait = 0 /* hush MSVC warning */;
rc = osal_msync(scan, 0, sizeof(lck_t), MDBX_SYNC_DATA); rc = osal_msync(scan, 0, sizeof(lck_t), MDBX_SYNC_DATA);
if (rc == MDBX_SUCCESS) if (rc == MDBX_SUCCESS)
rc = osal_pread(pending->fd, &bait, sizeof(scan_lck->bait_uniqueness), rc = osal_pread(pending->fd, &bait, sizeof(scan_lck->bait_uniqueness), offsetof(lck_t, bait_uniqueness));
offsetof(lck_t, bait_uniqueness));
} }
if (likely(rc == MDBX_SUCCESS) && if (likely(rc == MDBX_SUCCESS) && bait == atomic_load64(&scan_lck->bait_uniqueness, mo_AcquireRelease))
bait == atomic_load64(&scan_lck->bait_uniqueness, mo_AcquireRelease))
rc = MDBX_RESULT_TRUE; rc = MDBX_RESULT_TRUE;
TRACE("uniq-peek: %s, bait 0x%016" PRIx64 ",%s rc %d", TRACE("uniq-peek: %s, bait 0x%016" PRIx64 ",%s rc %d", pending_lck ? "mem" : "file", bait,
pending_lck ? "mem" : "file", bait,
(rc == MDBX_RESULT_TRUE) ? " found," : (rc ? " FAILED," : ""), rc); (rc == MDBX_RESULT_TRUE) ? " found," : (rc ? " FAILED," : ""), rc);
return rc; return rc;
} }
static int uniq_poke(const osal_mmap_t *pending, osal_mmap_t *scan, static int uniq_poke(const osal_mmap_t *pending, osal_mmap_t *scan, uint64_t *abra) {
uint64_t *abra) {
if (*abra == 0) { if (*abra == 0) {
const uintptr_t tid = osal_thread_self(); const uintptr_t tid = osal_thread_self();
uintptr_t uit = 0; uintptr_t uit = 0;
@ -51,9 +47,7 @@ static int uniq_poke(const osal_mmap_t *pending, osal_mmap_t *scan,
*abra = rrxmrrxmsx_0(osal_monotime() + UINT64_C(5873865991930747) * uit); *abra = rrxmrrxmsx_0(osal_monotime() + UINT64_C(5873865991930747) * uit);
} }
const uint64_t cadabra = const uint64_t cadabra =
rrxmrrxmsx_0(*abra + UINT64_C(7680760450171793) * (unsigned)osal_getpid()) rrxmrrxmsx_0(*abra + UINT64_C(7680760450171793) * (unsigned)osal_getpid()) << 24 | *abra >> 40;
<< 24 |
*abra >> 40;
lck_t *const scan_lck = scan->lck; lck_t *const scan_lck = scan->lck;
atomic_store64(&scan_lck->bait_uniqueness, cadabra, mo_AcquireRelease); atomic_store64(&scan_lck->bait_uniqueness, cadabra, mo_AcquireRelease);
*abra = *abra * UINT64_C(6364136223846793005) + 1; *abra = *abra * UINT64_C(6364136223846793005) + 1;
@ -67,14 +61,12 @@ __cold int rthc_uniq_check(const osal_mmap_t *pending, MDBX_env **found) {
MDBX_env *const scan = rthc_table[i].env; MDBX_env *const scan = rthc_table[i].env;
if (!scan->lck_mmap.lck || &scan->lck_mmap == pending) if (!scan->lck_mmap.lck || &scan->lck_mmap == pending)
continue; continue;
int err = int err = atomic_load64(&scan->lck_mmap.lck->bait_uniqueness, mo_AcquireRelease)
atomic_load64(&scan->lck_mmap.lck->bait_uniqueness, mo_AcquireRelease) ? uniq_peek(pending, &scan->lck_mmap)
? uniq_peek(pending, &scan->lck_mmap) : uniq_poke(pending, &scan->lck_mmap, &salt);
: uniq_poke(pending, &scan->lck_mmap, &salt);
if (err == MDBX_ENODATA) { if (err == MDBX_ENODATA) {
uint64_t length = 0; uint64_t length = 0;
if (likely(osal_filesize(pending->fd, &length) == MDBX_SUCCESS && if (likely(osal_filesize(pending->fd, &length) == MDBX_SUCCESS && length == 0)) {
length == 0)) {
/* LY: skip checking since LCK-file is empty, i.e. just created. */ /* LY: skip checking since LCK-file is empty, i.e. just created. */
DEBUG("%s", "unique (new/empty lck)"); DEBUG("%s", "unique (new/empty lck)");
return MDBX_SUCCESS; return MDBX_SUCCESS;
@ -114,8 +106,7 @@ static osal_thread_key_t rthc_key;
static mdbx_atomic_uint32_t rthc_pending; static mdbx_atomic_uint32_t rthc_pending;
static inline uint64_t rthc_signature(const void *addr, uint8_t kind) { static inline uint64_t rthc_signature(const void *addr, uint8_t kind) {
uint64_t salt = osal_thread_self() * UINT64_C(0xA2F0EEC059629A17) ^ uint64_t salt = osal_thread_self() * UINT64_C(0xA2F0EEC059629A17) ^ UINT64_C(0x01E07C6FDB596497) * (uintptr_t)(addr);
UINT64_C(0x01E07C6FDB596497) * (uintptr_t)(addr);
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return salt << 8 | kind; return salt << 8 | kind;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
@ -128,45 +119,36 @@ static inline uint64_t rthc_signature(const void *addr, uint8_t kind) {
#define MDBX_THREAD_RTHC_REGISTERED(addr) rthc_signature(addr, 0x0D) #define MDBX_THREAD_RTHC_REGISTERED(addr) rthc_signature(addr, 0x0D)
#define MDBX_THREAD_RTHC_COUNTED(addr) rthc_signature(addr, 0xC0) #define MDBX_THREAD_RTHC_COUNTED(addr) rthc_signature(addr, 0xC0)
static __thread uint64_t rthc_thread_state static __thread uint64_t rthc_thread_state
#if __has_attribute(tls_model) && \ #if __has_attribute(tls_model) && (defined(__PIC__) || defined(__pic__) || MDBX_BUILD_SHARED_LIBRARY)
(defined(__PIC__) || defined(__pic__) || MDBX_BUILD_SHARED_LIBRARY)
__attribute__((tls_model("local-dynamic"))) __attribute__((tls_model("local-dynamic")))
#endif #endif
; ;
#if defined(__APPLE__) && defined(__SANITIZE_ADDRESS__) && \ #if defined(__APPLE__) && defined(__SANITIZE_ADDRESS__) && !defined(MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS)
!defined(MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS)
/* Avoid ASAN-trap due the target TLS-variable feed by Darwin's tlv_free() */ /* Avoid ASAN-trap due the target TLS-variable feed by Darwin's tlv_free() */
#define MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS \ #define MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((__no_sanitize_address__, __noinline__))
__attribute__((__no_sanitize_address__, __noinline__))
#else #else
#define MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS inline #define MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS inline
#endif #endif
MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS static uint64_t rthc_read(const void *rthc) { MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS static uint64_t rthc_read(const void *rthc) { return *(volatile uint64_t *)rthc; }
return *(volatile uint64_t *)rthc;
}
MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS static uint64_t MDBX_ATTRIBUTE_NO_SANITIZE_ADDRESS static uint64_t rthc_compare_and_clean(const void *rthc, const uint64_t signature) {
rthc_compare_and_clean(const void *rthc, const uint64_t signature) {
#if MDBX_64BIT_CAS #if MDBX_64BIT_CAS
return atomic_cas64((mdbx_atomic_uint64_t *)rthc, signature, 0); return atomic_cas64((mdbx_atomic_uint64_t *)rthc, signature, 0);
#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return atomic_cas32((mdbx_atomic_uint32_t *)rthc, (uint32_t)signature, 0); return atomic_cas32((mdbx_atomic_uint32_t *)rthc, (uint32_t)signature, 0);
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return atomic_cas32((mdbx_atomic_uint32_t *)rthc, (uint32_t)(signature >> 32), return atomic_cas32((mdbx_atomic_uint32_t *)rthc, (uint32_t)(signature >> 32), 0);
0);
#else #else
#error "FIXME: Unsupported byte order" #error "FIXME: Unsupported byte order"
#endif #endif
} }
static inline int rthc_atexit(void (*dtor)(void *), void *obj, static inline int rthc_atexit(void (*dtor)(void *), void *obj, void *dso_symbol) {
void *dso_symbol) {
#ifndef MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL #ifndef MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL
#if defined(LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL) || \ #if defined(LIBCXXABI_HAS_CXA_THREAD_ATEXIT_IMPL) || defined(HAVE___CXA_THREAD_ATEXIT_IMPL) || \
defined(HAVE___CXA_THREAD_ATEXIT_IMPL) || __GLIBC_PREREQ(2, 18) || \ __GLIBC_PREREQ(2, 18) || defined(BIONIC)
defined(BIONIC)
#define MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL 1 #define MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL 1
#else #else
#define MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL 0 #define MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL 0
@ -174,11 +156,9 @@ static inline int rthc_atexit(void (*dtor)(void *), void *obj,
#endif /* MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL */ #endif /* MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL */
#ifndef MDBX_HAVE_CXA_THREAD_ATEXIT #ifndef MDBX_HAVE_CXA_THREAD_ATEXIT
#if defined(LIBCXXABI_HAS_CXA_THREAD_ATEXIT) || \ #if defined(LIBCXXABI_HAS_CXA_THREAD_ATEXIT) || defined(HAVE___CXA_THREAD_ATEXIT)
defined(HAVE___CXA_THREAD_ATEXIT)
#define MDBX_HAVE_CXA_THREAD_ATEXIT 1 #define MDBX_HAVE_CXA_THREAD_ATEXIT 1
#elif !MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL && \ #elif !MDBX_HAVE_CXA_THREAD_ATEXIT_IMPL && (defined(__linux__) || defined(__gnu_linux__))
(defined(__linux__) || defined(__gnu_linux__))
#define MDBX_HAVE_CXA_THREAD_ATEXIT 1 #define MDBX_HAVE_CXA_THREAD_ATEXIT 1
#else #else
#define MDBX_HAVE_CXA_THREAD_ATEXIT 0 #define MDBX_HAVE_CXA_THREAD_ATEXIT 0
@ -190,13 +170,11 @@ static inline int rthc_atexit(void (*dtor)(void *), void *obj,
#define __cxa_thread_atexit __cxa_thread_atexit_impl #define __cxa_thread_atexit __cxa_thread_atexit_impl
#endif #endif
#if MDBX_HAVE_CXA_THREAD_ATEXIT || defined(__cxa_thread_atexit) #if MDBX_HAVE_CXA_THREAD_ATEXIT || defined(__cxa_thread_atexit)
extern int __cxa_thread_atexit(void (*dtor)(void *), void *obj, extern int __cxa_thread_atexit(void (*dtor)(void *), void *obj, void *dso_symbol) MDBX_WEAK_IMPORT_ATTRIBUTE;
void *dso_symbol) MDBX_WEAK_IMPORT_ATTRIBUTE;
if (&__cxa_thread_atexit) if (&__cxa_thread_atexit)
rc = __cxa_thread_atexit(dtor, obj, dso_symbol); rc = __cxa_thread_atexit(dtor, obj, dso_symbol);
#elif defined(__APPLE__) || defined(_DARWIN_C_SOURCE) #elif defined(__APPLE__) || defined(_DARWIN_C_SOURCE)
extern void _tlv_atexit(void (*termfunc)(void *objAddr), void *objAddr) extern void _tlv_atexit(void (*termfunc)(void *objAddr), void *objAddr) MDBX_WEAK_IMPORT_ATTRIBUTE;
MDBX_WEAK_IMPORT_ATTRIBUTE;
if (&_tlv_atexit) { if (&_tlv_atexit) {
(void)dso_symbol; (void)dso_symbol;
_tlv_atexit(dtor, obj); _tlv_atexit(dtor, obj);
@ -250,8 +228,7 @@ static inline int thread_key_create(osal_thread_key_t *key) {
#else #else
rc = pthread_key_create(key, nullptr); rc = pthread_key_create(key, nullptr);
#endif #endif
TRACE("&key = %p, value %" PRIuPTR ", rc %d", __Wpedantic_format_voidptr(key), TRACE("&key = %p, value %" PRIuPTR ", rc %d", __Wpedantic_format_voidptr(key), (uintptr_t)*key, rc);
(uintptr_t)*key, rc);
return rc; return rc;
} }
@ -259,21 +236,17 @@ void thread_rthc_set(osal_thread_key_t key, const void *value) {
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
ENSURE(nullptr, TlsSetValue(key, (void *)value)); ENSURE(nullptr, TlsSetValue(key, (void *)value));
#else #else
const uint64_t sign_registered = const uint64_t sign_registered = MDBX_THREAD_RTHC_REGISTERED(&rthc_thread_state);
MDBX_THREAD_RTHC_REGISTERED(&rthc_thread_state);
const uint64_t sign_counted = MDBX_THREAD_RTHC_COUNTED(&rthc_thread_state); const uint64_t sign_counted = MDBX_THREAD_RTHC_COUNTED(&rthc_thread_state);
if (value && unlikely(rthc_thread_state != sign_registered && if (value && unlikely(rthc_thread_state != sign_registered && rthc_thread_state != sign_counted)) {
rthc_thread_state != sign_counted)) {
rthc_thread_state = sign_registered; rthc_thread_state = sign_registered;
TRACE("thread registered 0x%" PRIxPTR, osal_thread_self()); TRACE("thread registered 0x%" PRIxPTR, osal_thread_self());
if (rthc_atexit(rthc_thread_dtor, &rthc_thread_state, if (rthc_atexit(rthc_thread_dtor, &rthc_thread_state, (void *)&mdbx_version /* dso_anchor */)) {
(void *)&mdbx_version /* dso_anchor */)) {
ENSURE(nullptr, pthread_setspecific(rthc_key, &rthc_thread_state) == 0); ENSURE(nullptr, pthread_setspecific(rthc_key, &rthc_thread_state) == 0);
rthc_thread_state = sign_counted; rthc_thread_state = sign_counted;
const unsigned count_before = atomic_add32(&rthc_pending, 1); const unsigned count_before = atomic_add32(&rthc_pending, 1);
ENSURE(nullptr, count_before < INT_MAX); ENSURE(nullptr, count_before < INT_MAX);
NOTICE("fallback to pthreads' tsd, key %" PRIuPTR ", count %u", NOTICE("fallback to pthreads' tsd, key %" PRIuPTR ", count %u", (uintptr_t)rthc_key, count_before);
(uintptr_t)rthc_key, count_before);
(void)count_before; (void)count_before;
} }
} }
@ -286,11 +259,9 @@ __cold void rthc_thread_dtor(void *rthc) {
rthc_lock(); rthc_lock();
const uint32_t current_pid = osal_getpid(); const uint32_t current_pid = osal_getpid();
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
TRACE(">> pid %d, thread 0x%" PRIxPTR ", module %p", current_pid, TRACE(">> pid %d, thread 0x%" PRIxPTR ", module %p", current_pid, osal_thread_self(), rthc);
osal_thread_self(), rthc);
#else #else
TRACE(">> pid %d, thread 0x%" PRIxPTR ", rthc %p", current_pid, TRACE(">> pid %d, thread 0x%" PRIxPTR ", rthc %p", current_pid, osal_thread_self(), rthc);
osal_thread_self(), rthc);
#endif #endif
for (size_t i = 0; i < rthc_count; ++i) { for (size_t i = 0; i < rthc_count; ++i) {
@ -306,22 +277,18 @@ __cold void rthc_thread_dtor(void *rthc) {
continue; continue;
#if !defined(_WIN32) && !defined(_WIN64) #if !defined(_WIN32) && !defined(_WIN64)
if (pthread_setspecific(env->me_txkey, nullptr) != 0) { if (pthread_setspecific(env->me_txkey, nullptr) != 0) {
TRACE("== thread 0x%" PRIxPTR TRACE("== thread 0x%" PRIxPTR ", rthc %p: ignore race with tsd-key deletion", osal_thread_self(),
", rthc %p: ignore race with tsd-key deletion", __Wpedantic_format_voidptr(reader));
osal_thread_self(), __Wpedantic_format_voidptr(reader));
continue /* ignore race with tsd-key deletion by mdbx_env_close() */; continue /* ignore race with tsd-key deletion by mdbx_env_close() */;
} }
#endif #endif
TRACE("== thread 0x%" PRIxPTR TRACE("== thread 0x%" PRIxPTR ", rthc %p, [%zi], %p ... %p (%+i), rtch-pid %i, "
", rthc %p, [%zi], %p ... %p (%+i), rtch-pid %i, "
"current-pid %i", "current-pid %i",
osal_thread_self(), __Wpedantic_format_voidptr(reader), i, osal_thread_self(), __Wpedantic_format_voidptr(reader), i, __Wpedantic_format_voidptr(begin),
__Wpedantic_format_voidptr(begin), __Wpedantic_format_voidptr(end), __Wpedantic_format_voidptr(end), (int)(reader - begin), reader->pid.weak, current_pid);
(int)(reader - begin), reader->pid.weak, current_pid);
if (atomic_load32(&reader->pid, mo_Relaxed) == current_pid) { if (atomic_load32(&reader->pid, mo_Relaxed) == current_pid) {
TRACE("==== thread 0x%" PRIxPTR ", rthc %p, cleanup", osal_thread_self(), TRACE("==== thread 0x%" PRIxPTR ", rthc %p, cleanup", osal_thread_self(), __Wpedantic_format_voidptr(reader));
__Wpedantic_format_voidptr(reader));
(void)atomic_cas32(&reader->pid, current_pid, 0); (void)atomic_cas32(&reader->pid, current_pid, 0);
atomic_store32(&env->lck->rdt_refresh_flag, true, mo_Relaxed); atomic_store32(&env->lck->rdt_refresh_flag, true, mo_Relaxed);
} }
@ -334,26 +301,20 @@ __cold void rthc_thread_dtor(void *rthc) {
const uint64_t sign_registered = MDBX_THREAD_RTHC_REGISTERED(rthc); const uint64_t sign_registered = MDBX_THREAD_RTHC_REGISTERED(rthc);
const uint64_t sign_counted = MDBX_THREAD_RTHC_COUNTED(rthc); const uint64_t sign_counted = MDBX_THREAD_RTHC_COUNTED(rthc);
const uint64_t state = rthc_read(rthc); const uint64_t state = rthc_read(rthc);
if (state == sign_registered && if (state == sign_registered && rthc_compare_and_clean(rthc, sign_registered)) {
rthc_compare_and_clean(rthc, sign_registered)) { TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_thread_self(), rthc,
TRACE("== thread 0x%" PRIxPTR osal_getpid(), "registered", state);
", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", } else if (state == sign_counted && rthc_compare_and_clean(rthc, sign_counted)) {
osal_thread_self(), rthc, osal_getpid(), "registered", state); TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_thread_self(), rthc,
} else if (state == sign_counted && osal_getpid(), "counted", state);
rthc_compare_and_clean(rthc, sign_counted)) {
TRACE("== thread 0x%" PRIxPTR
", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")",
osal_thread_self(), rthc, osal_getpid(), "counted", state);
ENSURE(nullptr, atomic_sub32(&rthc_pending, 1) > 0); ENSURE(nullptr, atomic_sub32(&rthc_pending, 1) > 0);
} else { } else {
WARNING("thread 0x%" PRIxPTR WARNING("thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_thread_self(), rthc,
", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_getpid(), "wrong", state);
osal_thread_self(), rthc, osal_getpid(), "wrong", state);
} }
if (atomic_load32(&rthc_pending, mo_AcquireRelease) == 0) { if (atomic_load32(&rthc_pending, mo_AcquireRelease) == 0) {
TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, wake", osal_thread_self(), TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, wake", osal_thread_self(), rthc, osal_getpid());
rthc, osal_getpid());
ENSURE(nullptr, pthread_cond_broadcast(&rthc_cond) == 0); ENSURE(nullptr, pthread_cond_broadcast(&rthc_cond) == 0);
} }
@ -367,8 +328,7 @@ __cold void rthc_thread_dtor(void *rthc) {
} }
__cold int rthc_register(MDBX_env *const env) { __cold int rthc_register(MDBX_env *const env) {
TRACE(">> env %p, rthc_count %u, rthc_limit %u", TRACE(">> env %p, rthc_count %u, rthc_limit %u", __Wpedantic_format_voidptr(env), rthc_count, rthc_limit);
__Wpedantic_format_voidptr(env), rthc_count, rthc_limit);
int rc = MDBX_SUCCESS; int rc = MDBX_SUCCESS;
for (size_t i = 0; i < rthc_count; ++i) for (size_t i = 0; i < rthc_count; ++i)
@ -380,8 +340,7 @@ __cold int rthc_register(MDBX_env *const env) {
env->me_txkey = 0; env->me_txkey = 0;
if (unlikely(rthc_count == rthc_limit)) { if (unlikely(rthc_count == rthc_limit)) {
rthc_entry_t *new_table = rthc_entry_t *new_table =
osal_realloc((rthc_table == rthc_table_static) ? nullptr : rthc_table, osal_realloc((rthc_table == rthc_table_static) ? nullptr : rthc_table, sizeof(rthc_entry_t) * rthc_limit * 2);
sizeof(rthc_entry_t) * rthc_limit * 2);
if (unlikely(new_table == nullptr)) { if (unlikely(new_table == nullptr)) {
rc = MDBX_ENOMEM; rc = MDBX_ENOMEM;
goto bailout; goto bailout;
@ -400,14 +359,12 @@ __cold int rthc_register(MDBX_env *const env) {
} }
rthc_table[rthc_count].env = env; rthc_table[rthc_count].env = env;
TRACE("== [%i] = env %p, key %" PRIuPTR, rthc_count, TRACE("== [%i] = env %p, key %" PRIuPTR, rthc_count, __Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey);
__Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey);
++rthc_count; ++rthc_count;
bailout: bailout:
TRACE("<< env %p, key %" PRIuPTR ", rthc_count %u, rthc_limit %u, rc %d", TRACE("<< env %p, key %" PRIuPTR ", rthc_count %u, rthc_limit %u, rc %d", __Wpedantic_format_voidptr(env),
__Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey, rthc_count, (uintptr_t)env->me_txkey, rthc_count, rthc_limit, rc);
rthc_limit, rc);
return rc; return rc;
} }
@ -418,10 +375,8 @@ __cold static int rthc_drown(MDBX_env *const env) {
if (likely(env->lck_mmap.lck && current_pid == env->pid)) { if (likely(env->lck_mmap.lck && current_pid == env->pid)) {
reader_slot_t *const begin = &env->lck_mmap.lck->rdt[0]; reader_slot_t *const begin = &env->lck_mmap.lck->rdt[0];
reader_slot_t *const end = &env->lck_mmap.lck->rdt[env->max_readers]; reader_slot_t *const end = &env->lck_mmap.lck->rdt[env->max_readers];
TRACE("== %s env %p pid %d, readers %p ...%p, current-pid %d", TRACE("== %s env %p pid %d, readers %p ...%p, current-pid %d", (current_pid == env->pid) ? "cleanup" : "skip",
(current_pid == env->pid) ? "cleanup" : "skip", __Wpedantic_format_voidptr(env), env->pid, __Wpedantic_format_voidptr(begin), __Wpedantic_format_voidptr(end),
__Wpedantic_format_voidptr(env), env->pid,
__Wpedantic_format_voidptr(begin), __Wpedantic_format_voidptr(end),
current_pid); current_pid);
bool cleaned = false; bool cleaned = false;
for (reader_slot_t *r = begin; r < end; ++r) { for (reader_slot_t *r = begin; r < end; ++r) {
@ -434,8 +389,7 @@ __cold static int rthc_drown(MDBX_env *const env) {
if (cleaned) if (cleaned)
atomic_store32(&env->lck_mmap.lck->rdt_refresh_flag, true, mo_Relaxed); atomic_store32(&env->lck_mmap.lck->rdt_refresh_flag, true, mo_Relaxed);
rc = rthc_uniq_check(&env->lck_mmap, &inprocess_neighbor); rc = rthc_uniq_check(&env->lck_mmap, &inprocess_neighbor);
if (!inprocess_neighbor && env->registered_reader_pid && if (!inprocess_neighbor && env->registered_reader_pid && env->lck_mmap.fd != INVALID_HANDLE_VALUE) {
env->lck_mmap.fd != INVALID_HANDLE_VALUE) {
int err = lck_rpid_clear(env); int err = lck_rpid_clear(env);
rc = rc ? rc : err; rc = rc ? rc : err;
} }
@ -446,9 +400,8 @@ __cold static int rthc_drown(MDBX_env *const env) {
} }
__cold int rthc_remove(MDBX_env *const env) { __cold int rthc_remove(MDBX_env *const env) {
TRACE(">>> env %p, key %zu, rthc_count %u, rthc_limit %u", TRACE(">>> env %p, key %zu, rthc_count %u, rthc_limit %u", __Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey,
__Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey, rthc_count, rthc_count, rthc_limit);
rthc_limit);
int rc = MDBX_SUCCESS; int rc = MDBX_SUCCESS;
if (likely(env->pid)) if (likely(env->pid))
@ -469,9 +422,8 @@ __cold int rthc_remove(MDBX_env *const env) {
} }
} }
TRACE("<<< %p, key %zu, rthc_count %u, rthc_limit %u", TRACE("<<< %p, key %zu, rthc_count %u, rthc_limit %u", __Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey,
__Wpedantic_format_voidptr(env), (uintptr_t)env->me_txkey, rthc_count, rthc_count, rthc_limit);
rthc_limit);
return rc; return rc;
} }
@ -508,8 +460,8 @@ __cold void rthc_ctor(void) {
#else #else
ENSURE(nullptr, pthread_atfork(nullptr, nullptr, rthc_afterfork) == 0); ENSURE(nullptr, pthread_atfork(nullptr, nullptr, rthc_afterfork) == 0);
ENSURE(nullptr, pthread_key_create(&rthc_key, rthc_thread_dtor) == 0); ENSURE(nullptr, pthread_key_create(&rthc_key, rthc_thread_dtor) == 0);
TRACE("pid %d, &mdbx_rthc_key = %p, value 0x%x", osal_getpid(), TRACE("pid %d, &mdbx_rthc_key = %p, value 0x%x", osal_getpid(), __Wpedantic_format_voidptr(&rthc_key),
__Wpedantic_format_voidptr(&rthc_key), (unsigned)rthc_key); (unsigned)rthc_key);
#endif #endif
} }
@ -517,33 +469,23 @@ __cold void rthc_dtor(const uint32_t current_pid) {
rthc_lock(); rthc_lock();
#if !defined(_WIN32) && !defined(_WIN64) #if !defined(_WIN32) && !defined(_WIN64)
uint64_t *rthc = pthread_getspecific(rthc_key); uint64_t *rthc = pthread_getspecific(rthc_key);
TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status 0x%08" PRIx64 TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status 0x%08" PRIx64 ", left %d", osal_thread_self(),
", left %d", __Wpedantic_format_voidptr(rthc), current_pid, rthc ? rthc_read(rthc) : ~UINT64_C(0),
osal_thread_self(), __Wpedantic_format_voidptr(rthc), current_pid,
rthc ? rthc_read(rthc) : ~UINT64_C(0),
atomic_load32(&rthc_pending, mo_Relaxed)); atomic_load32(&rthc_pending, mo_Relaxed));
if (rthc) { if (rthc) {
const uint64_t sign_registered = MDBX_THREAD_RTHC_REGISTERED(rthc); const uint64_t sign_registered = MDBX_THREAD_RTHC_REGISTERED(rthc);
const uint64_t sign_counted = MDBX_THREAD_RTHC_COUNTED(rthc); const uint64_t sign_counted = MDBX_THREAD_RTHC_COUNTED(rthc);
const uint64_t state = rthc_read(rthc); const uint64_t state = rthc_read(rthc);
if (state == sign_registered && if (state == sign_registered && rthc_compare_and_clean(rthc, sign_registered)) {
rthc_compare_and_clean(rthc, sign_registered)) { TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_thread_self(),
TRACE("== thread 0x%" PRIxPTR __Wpedantic_format_voidptr(rthc), current_pid, "registered", state);
", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", } else if (state == sign_counted && rthc_compare_and_clean(rthc, sign_counted)) {
osal_thread_self(), __Wpedantic_format_voidptr(rthc), current_pid, TRACE("== thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_thread_self(),
"registered", state); __Wpedantic_format_voidptr(rthc), current_pid, "counted", state);
} else if (state == sign_counted &&
rthc_compare_and_clean(rthc, sign_counted)) {
TRACE("== thread 0x%" PRIxPTR
", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")",
osal_thread_self(), __Wpedantic_format_voidptr(rthc), current_pid,
"counted", state);
ENSURE(nullptr, atomic_sub32(&rthc_pending, 1) > 0); ENSURE(nullptr, atomic_sub32(&rthc_pending, 1) > 0);
} else { } else {
WARNING("thread 0x%" PRIxPTR WARNING("thread 0x%" PRIxPTR ", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", osal_thread_self(),
", rthc %p, pid %d, self-status %s (0x%08" PRIx64 ")", __Wpedantic_format_voidptr(rthc), current_pid, "wrong", state);
osal_thread_self(), __Wpedantic_format_voidptr(rthc), current_pid,
"wrong", state);
} }
} }
@ -558,8 +500,7 @@ __cold void rthc_dtor(const uint32_t current_pid) {
abstime.tv_sec += 600; abstime.tv_sec += 600;
#endif #endif
for (unsigned left; for (unsigned left; (left = atomic_load32(&rthc_pending, mo_AcquireRelease)) > 0;) {
(left = atomic_load32(&rthc_pending, mo_AcquireRelease)) > 0;) {
NOTICE("tls-cleanup: pid %d, pending %u, wait for...", current_pid, left); NOTICE("tls-cleanup: pid %d, pending %u, wait for...", current_pid, left);
const int rc = pthread_cond_timedwait(&rthc_cond, &rthc_mutex, &abstime); const int rc = pthread_cond_timedwait(&rthc_cond, &rthc_mutex, &abstime);
if (rc && rc != EINTR) if (rc && rc != EINTR)
@ -581,9 +522,8 @@ __cold void rthc_dtor(const uint32_t current_pid) {
for (reader_slot_t *reader = begin; reader < end; ++reader) { for (reader_slot_t *reader = begin; reader < end; ++reader) {
TRACE("== [%zi] = key %" PRIuPTR ", %p ... %p, rthc %p (%+i), " TRACE("== [%zi] = key %" PRIuPTR ", %p ... %p, rthc %p (%+i), "
"rthc-pid %i, current-pid %i", "rthc-pid %i, current-pid %i",
i, (uintptr_t)env->me_txkey, __Wpedantic_format_voidptr(begin), i, (uintptr_t)env->me_txkey, __Wpedantic_format_voidptr(begin), __Wpedantic_format_voidptr(end),
__Wpedantic_format_voidptr(end), __Wpedantic_format_voidptr(reader), __Wpedantic_format_voidptr(reader), (int)(reader - begin), reader->pid.weak, current_pid);
(int)(reader - begin), reader->pid.weak, current_pid);
if (atomic_load32(&reader->pid, mo_Relaxed) == current_pid) { if (atomic_load32(&reader->pid, mo_Relaxed) == current_pid) {
(void)atomic_cas32(&reader->pid, current_pid, 0); (void)atomic_cas32(&reader->pid, current_pid, 0);
TRACE("== cleanup %p", __Wpedantic_format_voidptr(reader)); TRACE("== cleanup %p", __Wpedantic_format_voidptr(reader));

View File

@ -30,8 +30,7 @@ static BOOL WINAPI ConsoleBreakHandlerRoutine(DWORD dwCtrlType) {
static uint64_t GetMilliseconds(void) { static uint64_t GetMilliseconds(void) {
LARGE_INTEGER Counter, Frequency; LARGE_INTEGER Counter, Frequency;
return (QueryPerformanceFrequency(&Frequency) && return (QueryPerformanceFrequency(&Frequency) && QueryPerformanceCounter(&Counter))
QueryPerformanceCounter(&Counter))
? Counter.QuadPart * 1000ul / Frequency.QuadPart ? Counter.QuadPart * 1000ul / Frequency.QuadPart
: 0; : 0;
} }
@ -93,9 +92,8 @@ static void lf_flush(void) {
} }
static bool silently(enum MDBX_chk_severity severity) { static bool silently(enum MDBX_chk_severity severity) {
int cutoff = int cutoff = chk.scope ? chk.scope->verbosity >> MDBX_chk_severity_prio_shift
chk.scope ? chk.scope->verbosity >> MDBX_chk_severity_prio_shift : verbose + (MDBX_chk_result >> MDBX_chk_severity_prio_shift);
: verbose + (MDBX_chk_result >> MDBX_chk_severity_prio_shift);
int prio = (severity >> MDBX_chk_severity_prio_shift); int prio = (severity >> MDBX_chk_severity_prio_shift);
if (chk.scope && chk.scope->stage == MDBX_chk_tables && verbose < 2) if (chk.scope && chk.scope->stage == MDBX_chk_tables && verbose < 2)
prio += 1; prio += 1;
@ -125,11 +123,9 @@ static FILE *prefix(enum MDBX_chk_severity severity) {
" ////// " // F +2 " ////// " // F +2
}; };
const bool nl = const bool nl = line_struct.scope_depth != chk.scope_nesting ||
line_struct.scope_depth != chk.scope_nesting || (line_struct.severity != severity && (line_struct.severity != MDBX_chk_processing ||
(line_struct.severity != severity && severity < MDBX_chk_result || severity > MDBX_chk_resolution));
(line_struct.severity != MDBX_chk_processing ||
severity < MDBX_chk_result || severity > MDBX_chk_resolution));
if (nl) if (nl)
lf(); lf();
if (severity < MDBX_chk_warning) if (severity < MDBX_chk_warning)
@ -157,8 +153,7 @@ static void suffix(size_t cookie, const char *str) {
} }
} }
static size_t MDBX_PRINTF_ARGS(2, 3) static size_t MDBX_PRINTF_ARGS(2, 3) print(enum MDBX_chk_severity severity, const char *msg, ...) {
print(enum MDBX_chk_severity severity, const char *msg, ...) {
FILE *out = prefix(severity); FILE *out = prefix(severity);
if (out) { if (out) {
va_list args; va_list args;
@ -171,8 +166,7 @@ static size_t MDBX_PRINTF_ARGS(2, 3)
return 0; return 0;
} }
static FILE *MDBX_PRINTF_ARGS(2, 3) static FILE *MDBX_PRINTF_ARGS(2, 3) print_ln(enum MDBX_chk_severity severity, const char *msg, ...) {
print_ln(enum MDBX_chk_severity severity, const char *msg, ...) {
FILE *out = prefix(severity); FILE *out = prefix(severity);
if (out) { if (out) {
va_list args; va_list args;
@ -185,15 +179,12 @@ static FILE *MDBX_PRINTF_ARGS(2, 3)
return out; return out;
} }
static void logger(MDBX_log_level_t level, const char *function, int line, static void logger(MDBX_log_level_t level, const char *function, int line, const char *fmt, va_list args) {
const char *fmt, va_list args) {
if (level <= MDBX_LOG_ERROR) if (level <= MDBX_LOG_ERROR)
mdbx_env_chk_encount_problem(&chk); mdbx_env_chk_encount_problem(&chk);
const unsigned kind = (level > MDBX_LOG_NOTICE) const unsigned kind =
? level - MDBX_LOG_NOTICE + (level > MDBX_LOG_NOTICE) ? level - MDBX_LOG_NOTICE + (MDBX_chk_extra & MDBX_chk_severity_kind_mask) : level;
(MDBX_chk_extra & MDBX_chk_severity_kind_mask)
: level;
const unsigned prio = kind << MDBX_chk_severity_prio_shift; const unsigned prio = kind << MDBX_chk_severity_prio_shift;
enum MDBX_chk_severity severity = prio + kind; enum MDBX_chk_severity severity = prio + kind;
FILE *out = prefix(severity); FILE *out = prefix(severity);
@ -204,8 +195,8 @@ static void logger(MDBX_log_level_t level, const char *function, int line,
if (have_lf) if (have_lf)
for (size_t i = 0; i < line_struct.scope_depth; ++i) for (size_t i = 0; i < line_struct.scope_depth; ++i)
fputs(" ", out); fputs(" ", out);
fprintf(out, have_lf ? " %s(), %u" : " (%s:%u)", fprintf(out, have_lf ? " %s(), %u" : " (%s:%u)", function + (strncmp(function, "mdbx_", 5) ? 0 : 5),
function + (strncmp(function, "mdbx_", 5) ? 0 : 5), line); line);
lf(); lf();
} else if (have_lf) { } else if (have_lf) {
line_struct.empty = true; line_struct.empty = true;
@ -249,8 +240,8 @@ static bool check_break(MDBX_chk_context_t *ctx) {
return true; return true;
} }
static int scope_push(MDBX_chk_context_t *ctx, MDBX_chk_scope_t *scope, static int scope_push(MDBX_chk_context_t *ctx, MDBX_chk_scope_t *scope, MDBX_chk_scope_t *inner, const char *fmt,
MDBX_chk_scope_t *inner, const char *fmt, va_list args) { va_list args) {
(void)scope; (void)scope;
if (fmt && *fmt) { if (fmt && *fmt) {
FILE *out = prefix(MDBX_chk_processing); FILE *out = prefix(MDBX_chk_processing);
@ -264,22 +255,19 @@ static int scope_push(MDBX_chk_context_t *ctx, MDBX_chk_scope_t *scope,
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
static void scope_pop(MDBX_chk_context_t *ctx, MDBX_chk_scope_t *scope, static void scope_pop(MDBX_chk_context_t *ctx, MDBX_chk_scope_t *scope, MDBX_chk_scope_t *inner) {
MDBX_chk_scope_t *inner) {
(void)ctx; (void)ctx;
(void)scope; (void)scope;
suffix(inner->usr_o.number, inner->subtotal_issues ? "error(s)" : "done"); suffix(inner->usr_o.number, inner->subtotal_issues ? "error(s)" : "done");
flush(); flush();
} }
static MDBX_chk_user_table_cookie_t *table_filter(MDBX_chk_context_t *ctx, static MDBX_chk_user_table_cookie_t *table_filter(MDBX_chk_context_t *ctx, const MDBX_val *name,
const MDBX_val *name,
MDBX_db_flags_t flags) { MDBX_db_flags_t flags) {
(void)ctx; (void)ctx;
(void)flags; (void)flags;
return (!only_table.iov_base || return (!only_table.iov_base ||
(only_table.iov_len == name->iov_len && (only_table.iov_len == name->iov_len && memcmp(only_table.iov_base, name->iov_base, name->iov_len) == 0))
memcmp(only_table.iov_base, name->iov_base, name->iov_len) == 0))
? (void *)(intptr_t)-1 ? (void *)(intptr_t)-1
: nullptr; : nullptr;
} }
@ -293,8 +281,7 @@ static int stage_begin(MDBX_chk_context_t *ctx, enum MDBX_chk_stage stage) {
} }
static int conclude(MDBX_chk_context_t *ctx); static int conclude(MDBX_chk_context_t *ctx);
static int stage_end(MDBX_chk_context_t *ctx, enum MDBX_chk_stage stage, static int stage_end(MDBX_chk_context_t *ctx, enum MDBX_chk_stage stage, int err) {
int err) {
if (stage == MDBX_chk_conclude && !err) if (stage == MDBX_chk_conclude && !err)
err = conclude(ctx); err = conclude(ctx);
suffix(anchor_lineno, err ? "error(s)" : "done"); suffix(anchor_lineno, err ? "error(s)" : "done");
@ -303,14 +290,12 @@ static int stage_end(MDBX_chk_context_t *ctx, enum MDBX_chk_stage stage,
return err; return err;
} }
static MDBX_chk_line_t *print_begin(MDBX_chk_context_t *ctx, static MDBX_chk_line_t *print_begin(MDBX_chk_context_t *ctx, enum MDBX_chk_severity severity) {
enum MDBX_chk_severity severity) {
(void)ctx; (void)ctx;
if (silently(severity)) if (silently(severity))
return nullptr; return nullptr;
if (line_struct.ctx) { if (line_struct.ctx) {
if (line_struct.severity == MDBX_chk_processing && if (line_struct.severity == MDBX_chk_processing && severity >= MDBX_chk_result && severity <= MDBX_chk_resolution &&
severity >= MDBX_chk_result && severity <= MDBX_chk_resolution &&
line_output) line_output)
fputc(' ', line_output); fputc(' ', line_output);
else else
@ -356,39 +341,36 @@ static const MDBX_chk_callbacks_t cb = {.check_break = check_break,
.print_format = print_format}; .print_format = print_format};
static void usage(char *prog) { static void usage(char *prog) {
fprintf( fprintf(stderr,
stderr, "usage: %s "
"usage: %s " "[-V] [-v] [-q] [-c] [-0|1|2] [-w] [-d] [-i] [-s table] [-u|U] dbpath\n"
"[-V] [-v] [-q] [-c] [-0|1|2] [-w] [-d] [-i] [-s table] [-u|U] dbpath\n" " -V\t\tprint version and exit\n"
" -V\t\tprint version and exit\n" " -v\t\tmore verbose, could be repeated upto 9 times for extra details\n"
" -v\t\tmore verbose, could be repeated upto 9 times for extra details\n" " -q\t\tbe quiet\n"
" -q\t\tbe quiet\n" " -c\t\tforce cooperative mode (don't try exclusive)\n"
" -c\t\tforce cooperative mode (don't try exclusive)\n" " -w\t\twrite-mode checking\n"
" -w\t\twrite-mode checking\n" " -d\t\tdisable page-by-page traversal of B-tree\n"
" -d\t\tdisable page-by-page traversal of B-tree\n" " -i\t\tignore wrong order errors (for custom comparators case)\n"
" -i\t\tignore wrong order errors (for custom comparators case)\n" " -s table\tprocess a specific subdatabase only\n"
" -s table\tprocess a specific subdatabase only\n" " -u\t\twarmup database before checking\n"
" -u\t\twarmup database before checking\n" " -U\t\twarmup and try lock database pages in memory before checking\n"
" -U\t\twarmup and try lock database pages in memory before checking\n" " -0|1|2\tforce using specific meta-page 0, or 2 for checking\n"
" -0|1|2\tforce using specific meta-page 0, or 2 for checking\n" " -t\t\tturn to a specified meta-page on successful check\n"
" -t\t\tturn to a specified meta-page on successful check\n" " -T\t\tturn to a specified meta-page EVEN ON UNSUCCESSFUL CHECK!\n",
" -T\t\tturn to a specified meta-page EVEN ON UNSUCCESSFUL CHECK!\n", prog);
prog);
exit(EXIT_INTERRUPTED); exit(EXIT_INTERRUPTED);
} }
static int conclude(MDBX_chk_context_t *ctx) { static int conclude(MDBX_chk_context_t *ctx) {
int err = MDBX_SUCCESS; int err = MDBX_SUCCESS;
if (ctx->result.total_problems == 1 && ctx->result.problems_meta == 1 && if (ctx->result.total_problems == 1 && ctx->result.problems_meta == 1 &&
(chk_flags & (chk_flags & (MDBX_CHK_SKIP_BTREE_TRAVERSAL | MDBX_CHK_SKIP_KV_TRAVERSAL)) == 0 &&
(MDBX_CHK_SKIP_BTREE_TRAVERSAL | MDBX_CHK_SKIP_KV_TRAVERSAL)) == 0 && (env_flags & MDBX_RDONLY) == 0 && !only_table.iov_base && stuck_meta < 0 &&
(env_flags & MDBX_RDONLY) == 0 && !only_table.iov_base && ctx->result.steady_txnid < ctx->result.recent_txnid) {
stuck_meta < 0 && ctx->result.steady_txnid < ctx->result.recent_txnid) { const size_t step_lineno = print(MDBX_chk_resolution,
const size_t step_lineno = "Perform sync-to-disk for make steady checkpoint"
print(MDBX_chk_resolution, " at txn-id #%" PRIi64 "...",
"Perform sync-to-disk for make steady checkpoint" ctx->result.recent_txnid);
" at txn-id #%" PRIi64 "...",
ctx->result.recent_txnid);
flush(); flush();
err = error_fn("walk_pages", mdbx_env_sync_ex(ctx->env, true, false)); err = error_fn("walk_pages", mdbx_env_sync_ex(ctx->env, true, false));
if (err == MDBX_SUCCESS) { if (err == MDBX_SUCCESS) {
@ -398,19 +380,13 @@ static int conclude(MDBX_chk_context_t *ctx) {
} }
} }
if (turn_meta && stuck_meta >= 0 && if (turn_meta && stuck_meta >= 0 && (chk_flags & (MDBX_CHK_SKIP_BTREE_TRAVERSAL | MDBX_CHK_SKIP_KV_TRAVERSAL)) == 0 &&
(chk_flags & !only_table.iov_base && (env_flags & (MDBX_RDONLY | MDBX_EXCLUSIVE)) == MDBX_EXCLUSIVE) {
(MDBX_CHK_SKIP_BTREE_TRAVERSAL | MDBX_CHK_SKIP_KV_TRAVERSAL)) == 0 && const bool successful_check = (err | ctx->result.total_problems | ctx->result.problems_meta) == 0;
!only_table.iov_base &&
(env_flags & (MDBX_RDONLY | MDBX_EXCLUSIVE)) == MDBX_EXCLUSIVE) {
const bool successful_check =
(err | ctx->result.total_problems | ctx->result.problems_meta) == 0;
if (successful_check || force_turn_meta) { if (successful_check || force_turn_meta) {
const size_t step_lineno = print( const size_t step_lineno =
MDBX_chk_resolution, print(MDBX_chk_resolution, "Performing turn to the specified meta-page (%d) due to %s!", stuck_meta,
"Performing turn to the specified meta-page (%d) due to %s!", successful_check ? "successful check" : "the -T option was given");
stuck_meta,
successful_check ? "successful check" : "the -T option was given");
flush(); flush();
err = mdbx_env_turn_for_recovery(ctx->env, stuck_meta); err = mdbx_env_turn_for_recovery(ctx->env, stuck_meta);
if (err != MDBX_SUCCESS) if (err != MDBX_SUCCESS)
@ -475,12 +451,9 @@ int main(int argc, char *argv[]) {
" - build: %s for %s by %s\n" " - build: %s for %s by %s\n"
" - flags: %s\n" " - flags: %s\n"
" - options: %s\n", " - options: %s\n",
mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.tweak, mdbx_version.git.describe,
mdbx_version.tweak, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_version.git.tree, mdbx_sourcery_anchor,
mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_build.datetime, mdbx_build.target, mdbx_build.compiler, mdbx_build.flags, mdbx_build.options);
mdbx_version.git.tree, mdbx_sourcery_anchor, mdbx_build.datetime,
mdbx_build.target, mdbx_build.compiler, mdbx_build.flags,
mdbx_build.options);
return EXIT_SUCCESS; return EXIT_SUCCESS;
case 'v': case 'v':
if (verbose >= 9 && 0) if (verbose >= 9 && 0)
@ -546,8 +519,7 @@ int main(int argc, char *argv[]) {
break; break;
case 'U': case 'U':
warmup = true; warmup = true;
warmup_flags = warmup_flags = MDBX_warmup_force | MDBX_warmup_touchlimit | MDBX_warmup_lock;
MDBX_warmup_force | MDBX_warmup_touchlimit | MDBX_warmup_lock;
break; break;
default: default:
usage(prog); usage(prog);
@ -566,21 +538,17 @@ int main(int argc, char *argv[]) {
} }
if (turn_meta) { if (turn_meta) {
if (stuck_meta < 0) { if (stuck_meta < 0) {
error_fmt( error_fmt("meta-page must be specified (by -0, -1 or -2 options) to turn to "
"meta-page must be specified (by -0, -1 or -2 options) to turn to " "it.");
"it.");
rc = EXIT_INTERRUPTED; rc = EXIT_INTERRUPTED;
} }
if (env_flags & MDBX_RDONLY) { if (env_flags & MDBX_RDONLY) {
error_fmt( error_fmt("write-mode must be enabled to turn to the specified meta-page.");
"write-mode must be enabled to turn to the specified meta-page.");
rc = EXIT_INTERRUPTED; rc = EXIT_INTERRUPTED;
} }
if (only_table.iov_base || (chk_flags & (MDBX_CHK_SKIP_BTREE_TRAVERSAL | if (only_table.iov_base || (chk_flags & (MDBX_CHK_SKIP_BTREE_TRAVERSAL | MDBX_CHK_SKIP_KV_TRAVERSAL))) {
MDBX_CHK_SKIP_KV_TRAVERSAL))) { error_fmt("whole database checking with b-tree traversal are required to turn "
error_fmt( "to the specified meta-page.");
"whole database checking with b-tree traversal are required to turn "
"to the specified meta-page.");
rc = EXIT_INTERRUPTED; rc = EXIT_INTERRUPTED;
} }
} }
@ -604,20 +572,15 @@ int main(int argc, char *argv[]) {
print(MDBX_chk_result, print(MDBX_chk_result,
"mdbx_chk %s (%s, T-%s)\nRunning for %s in 'read-%s' mode with " "mdbx_chk %s (%s, T-%s)\nRunning for %s in 'read-%s' mode with "
"verbosity level %u (%s)...", "verbosity level %u (%s)...",
mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.tree, envname,
mdbx_version.git.tree, envname,
(env_flags & MDBX_RDONLY) ? "only" : "write", verbose, (env_flags & MDBX_RDONLY) ? "only" : "write", verbose,
(verbose > 8) (verbose > 8)
? (MDBX_DEBUG ? "extra details for debugging" ? (MDBX_DEBUG ? "extra details for debugging" : "same as 8 for non-debug builds with MDBX_DEBUG=0")
: "same as 8 for non-debug builds with MDBX_DEBUG=0")
: "of 0..9"); : "of 0..9");
lf_flush(); lf_flush();
mdbx_setup_debug((verbose + MDBX_LOG_WARN < MDBX_LOG_TRACE) mdbx_setup_debug(
? (MDBX_log_level_t)(verbose + MDBX_LOG_WARN) (verbose + MDBX_LOG_WARN < MDBX_LOG_TRACE) ? (MDBX_log_level_t)(verbose + MDBX_LOG_WARN) : MDBX_LOG_TRACE,
: MDBX_LOG_TRACE, MDBX_DBG_DUMP | MDBX_DBG_ASSERT | MDBX_DBG_AUDIT | MDBX_DBG_LEGACY_OVERLAP | MDBX_DBG_DONT_UPGRADE, logger);
MDBX_DBG_DUMP | MDBX_DBG_ASSERT | MDBX_DBG_AUDIT |
MDBX_DBG_LEGACY_OVERLAP | MDBX_DBG_DONT_UPGRADE,
logger);
rc = mdbx_env_create(&env); rc = mdbx_env_create(&env);
if (rc) { if (rc) {
@ -632,18 +595,16 @@ int main(int argc, char *argv[]) {
} }
if (stuck_meta >= 0) { if (stuck_meta >= 0) {
rc = mdbx_env_open_for_recovery(env, envname, stuck_meta, rc = mdbx_env_open_for_recovery(env, envname, stuck_meta, (env_flags & MDBX_RDONLY) ? false : true);
(env_flags & MDBX_RDONLY) ? false : true);
} else { } else {
rc = mdbx_env_open(env, envname, env_flags, 0); rc = mdbx_env_open(env, envname, env_flags, 0);
if ((env_flags & MDBX_EXCLUSIVE) && if ((env_flags & MDBX_EXCLUSIVE) && (rc == MDBX_BUSY ||
(rc == MDBX_BUSY ||
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
rc == ERROR_LOCK_VIOLATION || rc == ERROR_SHARING_VIOLATION rc == ERROR_LOCK_VIOLATION || rc == ERROR_SHARING_VIOLATION
#else #else
rc == EBUSY || rc == EAGAIN rc == EBUSY || rc == EAGAIN
#endif #endif
)) { )) {
env_flags &= ~MDBX_EXCLUSIVE; env_flags &= ~MDBX_EXCLUSIVE;
rc = mdbx_env_open(env, envname, env_flags | MDBX_ACCEDE, 0); rc = mdbx_env_open(env, envname, env_flags | MDBX_ACCEDE, 0);
} }
@ -652,13 +613,10 @@ int main(int argc, char *argv[]) {
if (rc) { if (rc) {
error_fn("mdbx_env_open", rc); error_fn("mdbx_env_open", rc);
if (rc == MDBX_WANNA_RECOVERY && (env_flags & MDBX_RDONLY)) if (rc == MDBX_WANNA_RECOVERY && (env_flags & MDBX_RDONLY))
print_ln(MDBX_chk_result, print_ln(MDBX_chk_result, "Please run %s in the read-write mode (with '-w' option).", prog);
"Please run %s in the read-write mode (with '-w' option).",
prog);
goto bailout; goto bailout;
} }
print_ln(MDBX_chk_verbose, "%s mode", print_ln(MDBX_chk_verbose, "%s mode", (env_flags & MDBX_EXCLUSIVE) ? "monopolistic" : "cooperative");
(env_flags & MDBX_EXCLUSIVE) ? "monopolistic" : "cooperative");
if (warmup) { if (warmup) {
anchor_lineno = print(MDBX_chk_verbose, "warming up..."); anchor_lineno = print(MDBX_chk_verbose, "warming up...");
@ -671,9 +629,7 @@ int main(int argc, char *argv[]) {
suffix(anchor_lineno, rc ? "timeout" : "done"); suffix(anchor_lineno, rc ? "timeout" : "done");
} }
rc = mdbx_env_chk(env, &cb, &chk, chk_flags, rc = mdbx_env_chk(env, &cb, &chk, chk_flags, MDBX_chk_result + (verbose << MDBX_chk_severity_prio_shift), 0);
MDBX_chk_result + (verbose << MDBX_chk_severity_prio_shift),
0);
if (rc) { if (rc) {
if (chk.result.total_problems == 0) if (chk.result.total_problems == 0)
error_fn("mdbx_env_chk", rc); error_fn("mdbx_env_chk", rc);
@ -683,8 +639,7 @@ int main(int argc, char *argv[]) {
bailout: bailout:
if (env) { if (env) {
const bool dont_sync = rc != 0 || chk.result.total_problems || const bool dont_sync = rc != 0 || chk.result.total_problems || (chk_flags & MDBX_CHK_READWRITE) == 0;
(chk_flags & MDBX_CHK_READWRITE) == 0;
mdbx_env_close_ex(env, dont_sync); mdbx_env_close_ex(env, dont_sync);
} }
flush(); flush();
@ -702,21 +657,17 @@ bailout:
error_fn("clock_gettime", errno); error_fn("clock_gettime", errno);
return EXIT_FAILURE_SYS; return EXIT_FAILURE_SYS;
} }
elapsed = timestamp_finish.tv_sec - timestamp_start.tv_sec + elapsed =
(timestamp_finish.tv_nsec - timestamp_start.tv_nsec) * 1e-9; timestamp_finish.tv_sec - timestamp_start.tv_sec + (timestamp_finish.tv_nsec - timestamp_start.tv_nsec) * 1e-9;
#endif /* !WINDOWS */ #endif /* !WINDOWS */
if (chk.result.total_problems) { if (chk.result.total_problems) {
print_ln(MDBX_chk_result, print_ln(MDBX_chk_result, "Total %" PRIuSIZE " error%s detected, elapsed %.3f seconds.", chk.result.total_problems,
"Total %" PRIuSIZE " error%s detected, elapsed %.3f seconds.",
chk.result.total_problems,
(chk.result.total_problems > 1) ? "s are" : " is", elapsed); (chk.result.total_problems > 1) ? "s are" : " is", elapsed);
if (chk.result.problems_meta || chk.result.problems_kv || if (chk.result.problems_meta || chk.result.problems_kv || chk.result.problems_gc)
chk.result.problems_gc)
return EXIT_FAILURE_CHECK_MAJOR; return EXIT_FAILURE_CHECK_MAJOR;
return EXIT_FAILURE_CHECK_MINOR; return EXIT_FAILURE_CHECK_MINOR;
} }
print_ln(MDBX_chk_result, "No error is detected, elapsed %.3f seconds.", print_ln(MDBX_chk_result, "No error is detected, elapsed %.3f seconds.", elapsed);
elapsed);
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }

View File

@ -37,20 +37,19 @@ static void signal_handler(int sig) {
#endif /* !WINDOWS */ #endif /* !WINDOWS */
static void usage(const char *prog) { static void usage(const char *prog) {
fprintf( fprintf(stderr,
stderr, "usage: %s [-V] [-q] [-c] [-d] [-p] [-u|U] src_path [dest_path]\n"
"usage: %s [-V] [-q] [-c] [-d] [-p] [-u|U] src_path [dest_path]\n" " -V\t\tprint version and exit\n"
" -V\t\tprint version and exit\n" " -q\t\tbe quiet\n"
" -q\t\tbe quiet\n" " -c\t\tenable compactification (skip unused pages)\n"
" -c\t\tenable compactification (skip unused pages)\n" " -d\t\tenforce copy to be a dynamic size DB\n"
" -d\t\tenforce copy to be a dynamic size DB\n" " -p\t\tusing transaction parking/ousting during copying MVCC-snapshot\n"
" -p\t\tusing transaction parking/ousting during copying MVCC-snapshot\n" " \t\tto avoid stopping recycling and overflowing the DB\n"
" \t\tto avoid stopping recycling and overflowing the DB\n" " -u\t\twarmup database before copying\n"
" -u\t\twarmup database before copying\n" " -U\t\twarmup and try lock database pages in memory before copying\n"
" -U\t\twarmup and try lock database pages in memory before copying\n" " src_path\tsource database\n"
" src_path\tsource database\n" " dest_path\tdestination (stdout if not specified)\n",
" dest_path\tdestination (stdout if not specified)\n", prog);
prog);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
@ -79,10 +78,8 @@ int main(int argc, char *argv[]) {
warmup = true; warmup = true;
else if (argv[1][1] == 'U' && argv[1][2] == '\0') { else if (argv[1][1] == 'U' && argv[1][2] == '\0') {
warmup = true; warmup = true;
warmup_flags = warmup_flags = MDBX_warmup_force | MDBX_warmup_touchlimit | MDBX_warmup_lock;
MDBX_warmup_force | MDBX_warmup_touchlimit | MDBX_warmup_lock; } else if ((argv[1][1] == 'h' && argv[1][2] == '\0') || strcmp(argv[1], "--help") == 0)
} else if ((argv[1][1] == 'h' && argv[1][2] == '\0') ||
strcmp(argv[1], "--help") == 0)
usage(progname); usage(progname);
else if (argv[1][1] == 'V' && argv[1][2] == '\0') { else if (argv[1][1] == 'V' && argv[1][2] == '\0') {
printf("mdbx_copy version %d.%d.%d.%d\n" printf("mdbx_copy version %d.%d.%d.%d\n"
@ -91,12 +88,9 @@ int main(int argc, char *argv[]) {
" - build: %s for %s by %s\n" " - build: %s for %s by %s\n"
" - flags: %s\n" " - flags: %s\n"
" - options: %s\n", " - options: %s\n",
mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.tweak, mdbx_version.git.describe,
mdbx_version.tweak, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_version.git.tree, mdbx_sourcery_anchor,
mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_build.datetime, mdbx_build.target, mdbx_build.compiler, mdbx_build.flags, mdbx_build.options);
mdbx_version.git.tree, mdbx_sourcery_anchor, mdbx_build.datetime,
mdbx_build.target, mdbx_build.compiler, mdbx_build.flags,
mdbx_build.options);
return EXIT_SUCCESS; return EXIT_SUCCESS;
} else } else
argc = 0; argc = 0;
@ -119,10 +113,9 @@ int main(int argc, char *argv[]) {
#endif /* !WINDOWS */ #endif /* !WINDOWS */
if (!quiet) { if (!quiet) {
fprintf((argc == 2) ? stderr : stdout, fprintf((argc == 2) ? stderr : stdout, "mdbx_copy %s (%s, T-%s)\nRunning for copy %s to %s...\n",
"mdbx_copy %s (%s, T-%s)\nRunning for copy %s to %s...\n", mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.tree, argv[1],
mdbx_version.git.describe, mdbx_version.git.datetime, (argc == 2) ? "stdout" : argv[2]);
mdbx_version.git.tree, argv[1], (argc == 2) ? "stdout" : argv[2]);
fflush(nullptr); fflush(nullptr);
} }
@ -150,8 +143,7 @@ int main(int argc, char *argv[]) {
rc = mdbx_env_copy(env, argv[2], cpflags); rc = mdbx_env_copy(env, argv[2], cpflags);
} }
if (rc) if (rc)
fprintf(stderr, "%s: %s failed, error %d (%s)\n", progname, act, rc, fprintf(stderr, "%s: %s failed, error %d (%s)\n", progname, act, rc, mdbx_strerror(rc));
mdbx_strerror(rc));
mdbx_env_close(env); mdbx_env_close(env);
return rc ? EXIT_FAILURE : EXIT_SUCCESS; return rc ? EXIT_FAILURE : EXIT_SUCCESS;

View File

@ -54,8 +54,7 @@ static void usage(void) {
static void error(const char *func, int rc) { static void error(const char *func, int rc) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, mdbx_strerror(rc));
mdbx_strerror(rc));
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
@ -86,12 +85,9 @@ int main(int argc, char *argv[]) {
" - build: %s for %s by %s\n" " - build: %s for %s by %s\n"
" - flags: %s\n" " - flags: %s\n"
" - options: %s\n", " - options: %s\n",
mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.tweak, mdbx_version.git.describe,
mdbx_version.tweak, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_version.git.tree, mdbx_sourcery_anchor,
mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_build.datetime, mdbx_build.target, mdbx_build.compiler, mdbx_build.flags, mdbx_build.options);
mdbx_version.git.tree, mdbx_sourcery_anchor, mdbx_build.datetime,
mdbx_build.target, mdbx_build.compiler, mdbx_build.flags,
mdbx_build.options);
return EXIT_SUCCESS; return EXIT_SUCCESS;
case 'q': case 'q':
quiet = true; quiet = true;
@ -127,8 +123,7 @@ int main(int argc, char *argv[]) {
envname = argv[optind]; envname = argv[optind];
if (!quiet) { if (!quiet) {
printf("mdbx_drop %s (%s, T-%s)\nRunning for %s/%s...\n", printf("mdbx_drop %s (%s, T-%s)\nRunning for %s/%s...\n", mdbx_version.git.describe, mdbx_version.git.datetime,
mdbx_version.git.describe, mdbx_version.git.datetime,
mdbx_version.git.tree, envname, subname ? subname : "@MAIN"); mdbx_version.git.tree, envname, subname ? subname : "@MAIN");
fflush(nullptr); fflush(nullptr);
} }

View File

@ -95,8 +95,7 @@ bool quiet = false, rescue = false;
const char *prog; const char *prog;
static void error(const char *func, int rc) { static void error(const char *func, int rc) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, mdbx_strerror(rc));
mdbx_strerror(rc));
} }
/* Dump in BDB-compatible format */ /* Dump in BDB-compatible format */
@ -126,10 +125,8 @@ static int dump_tbl(MDBX_txn *txn, MDBX_dbi dbi, char *name) {
if (mode & GLOBAL) { if (mode & GLOBAL) {
mode -= GLOBAL; mode -= GLOBAL;
if (info.mi_geo.upper != info.mi_geo.lower) if (info.mi_geo.upper != info.mi_geo.lower)
printf("geometry=l%" PRIu64 ",c%" PRIu64 ",u%" PRIu64 ",s%" PRIu64 printf("geometry=l%" PRIu64 ",c%" PRIu64 ",u%" PRIu64 ",s%" PRIu64 ",g%" PRIu64 "\n", info.mi_geo.lower,
",g%" PRIu64 "\n", info.mi_geo.current, info.mi_geo.upper, info.mi_geo.shrink, info.mi_geo.grow);
info.mi_geo.lower, info.mi_geo.current, info.mi_geo.upper,
info.mi_geo.shrink, info.mi_geo.grow);
printf("mapsize=%" PRIu64 "\n", info.mi_geo.upper); printf("mapsize=%" PRIu64 "\n", info.mi_geo.upper);
printf("maxreaders=%u\n", info.mi_maxreaders); printf("maxreaders=%u\n", info.mi_maxreaders);
@ -140,8 +137,7 @@ static int dump_tbl(MDBX_txn *txn, MDBX_dbi dbi, char *name) {
return rc; return rc;
} }
if (canary.v) if (canary.v)
printf("canary=v%" PRIu64 ",x%" PRIu64 ",y%" PRIu64 ",z%" PRIu64 "\n", printf("canary=v%" PRIu64 ",x%" PRIu64 ",y%" PRIu64 ",z%" PRIu64 "\n", canary.v, canary.x, canary.y, canary.z);
canary.v, canary.x, canary.y, canary.z);
} }
printf("format=%s\n", mode & PRINT ? "print" : "bytevalue"); printf("format=%s\n", mode & PRINT ? "print" : "bytevalue");
if (name) if (name)
@ -153,10 +149,7 @@ static int dump_tbl(MDBX_txn *txn, MDBX_dbi dbi, char *name) {
else if (!name) else if (!name)
printf("txnid=%" PRIaTXN "\n", mdbx_txn_id(txn)); */ printf("txnid=%" PRIaTXN "\n", mdbx_txn_id(txn)); */
printf("duplicates=%d\n", (flags & (MDBX_DUPSORT | MDBX_DUPFIXED | printf("duplicates=%d\n", (flags & (MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_INTEGERDUP | MDBX_REVERSEDUP)) ? 1 : 0);
MDBX_INTEGERDUP | MDBX_REVERSEDUP))
? 1
: 0);
for (int i = 0; dbflags[i].bit; i++) for (int i = 0; dbflags[i].bit; i++)
if (flags & dbflags[i].bit) if (flags & dbflags[i].bit)
printf("%s=1\n", dbflags[i].name); printf("%s=1\n", dbflags[i].name);
@ -187,8 +180,7 @@ static int dump_tbl(MDBX_txn *txn, MDBX_dbi dbi, char *name) {
} }
} }
while ((rc = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT)) == while ((rc = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT)) == MDBX_SUCCESS) {
MDBX_SUCCESS) {
if (user_break) { if (user_break) {
rc = MDBX_EINTR; rc = MDBX_EINTR;
break; break;
@ -212,31 +204,27 @@ static int dump_tbl(MDBX_txn *txn, MDBX_dbi dbi, char *name) {
} }
static void usage(void) { static void usage(void) {
fprintf( fprintf(stderr,
stderr, "usage: %s "
"usage: %s " "[-V] [-q] [-f file] [-l] [-p] [-r] [-a|-s table] [-u|U] "
"[-V] [-q] [-f file] [-l] [-p] [-r] [-a|-s table] [-u|U] " "dbpath\n"
"dbpath\n" " -V\t\tprint version and exit\n"
" -V\t\tprint version and exit\n" " -q\t\tbe quiet\n"
" -q\t\tbe quiet\n" " -f\t\twrite to file instead of stdout\n"
" -f\t\twrite to file instead of stdout\n" " -l\t\tlist tables and exit\n"
" -l\t\tlist tables and exit\n" " -p\t\tuse printable characters\n"
" -p\t\tuse printable characters\n" " -r\t\trescue mode (ignore errors to dump corrupted DB)\n"
" -r\t\trescue mode (ignore errors to dump corrupted DB)\n" " -a\t\tdump main DB and all tables\n"
" -a\t\tdump main DB and all tables\n" " -s name\tdump only the specified named table\n"
" -s name\tdump only the specified named table\n" " -u\t\twarmup database before dumping\n"
" -u\t\twarmup database before dumping\n" " -U\t\twarmup and try lock database pages in memory before dumping\n"
" -U\t\twarmup and try lock database pages in memory before dumping\n" " \t\tby default dump only the main DB\n",
" \t\tby default dump only the main DB\n", prog);
prog);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
static int equal_or_greater(const MDBX_val *a, const MDBX_val *b) { static int equal_or_greater(const MDBX_val *a, const MDBX_val *b) {
return (a->iov_len == b->iov_len && return (a->iov_len == b->iov_len && memcmp(a->iov_base, b->iov_base, a->iov_len) == 0) ? 0 : 1;
memcmp(a->iov_base, b->iov_base, a->iov_len) == 0)
? 0
: 1;
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
@ -274,12 +262,9 @@ int main(int argc, char *argv[]) {
" - build: %s for %s by %s\n" " - build: %s for %s by %s\n"
" - flags: %s\n" " - flags: %s\n"
" - options: %s\n", " - options: %s\n",
mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.tweak, mdbx_version.git.describe,
mdbx_version.tweak, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_version.git.tree, mdbx_sourcery_anchor,
mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_build.datetime, mdbx_build.target, mdbx_build.compiler, mdbx_build.flags, mdbx_build.options);
mdbx_version.git.tree, mdbx_sourcery_anchor, mdbx_build.datetime,
mdbx_build.target, mdbx_build.compiler, mdbx_build.flags,
mdbx_build.options);
return EXIT_SUCCESS; return EXIT_SUCCESS;
case 'l': case 'l':
list = true; list = true;
@ -292,8 +277,7 @@ int main(int argc, char *argv[]) {
break; break;
case 'f': case 'f':
if (freopen(optarg, "w", stdout) == nullptr) { if (freopen(optarg, "w", stdout) == nullptr) {
fprintf(stderr, "%s: %s: reopen: %s\n", prog, optarg, fprintf(stderr, "%s: %s: reopen: %s\n", prog, optarg, mdbx_strerror(errno));
mdbx_strerror(errno));
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
break; break;
@ -318,8 +302,7 @@ int main(int argc, char *argv[]) {
break; break;
case 'U': case 'U':
warmup = true; warmup = true;
warmup_flags = warmup_flags = MDBX_warmup_force | MDBX_warmup_touchlimit | MDBX_warmup_lock;
MDBX_warmup_force | MDBX_warmup_touchlimit | MDBX_warmup_lock;
break; break;
default: default:
usage(); usage();
@ -344,9 +327,8 @@ int main(int argc, char *argv[]) {
envname = argv[optind]; envname = argv[optind];
if (!quiet) { if (!quiet) {
fprintf(stderr, "mdbx_dump %s (%s, T-%s)\nRunning for %s...\n", fprintf(stderr, "mdbx_dump %s (%s, T-%s)\nRunning for %s...\n", mdbx_version.git.describe,
mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.datetime, mdbx_version.git.tree, envname);
mdbx_version.git.tree, envname);
fflush(nullptr); fflush(nullptr);
} }
@ -364,11 +346,8 @@ int main(int argc, char *argv[]) {
} }
} }
err = mdbx_env_open( err = mdbx_env_open(env, envname, envflags | (rescue ? MDBX_RDONLY | MDBX_EXCLUSIVE | MDBX_VALIDATION : MDBX_RDONLY),
env, envname, 0);
envflags | (rescue ? MDBX_RDONLY | MDBX_EXCLUSIVE | MDBX_VALIDATION
: MDBX_RDONLY),
0);
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
error("mdbx_env_open", err); error("mdbx_env_open", err);
goto env_close; goto env_close;
@ -414,8 +393,7 @@ int main(int argc, char *argv[]) {
bool have_raw = false; bool have_raw = false;
int count = 0; int count = 0;
MDBX_val key; MDBX_val key;
while (MDBX_SUCCESS == while (MDBX_SUCCESS == (err = mdbx_cursor_get(cursor, &key, nullptr, MDBX_NEXT_NODUP))) {
(err = mdbx_cursor_get(cursor, &key, nullptr, MDBX_NEXT_NODUP))) {
if (user_break) { if (user_break) {
err = MDBX_EINTR; err = MDBX_EINTR;
break; break;
@ -434,8 +412,7 @@ int main(int argc, char *argv[]) {
subname[key.iov_len] = '\0'; subname[key.iov_len] = '\0';
MDBX_dbi sub_dbi; MDBX_dbi sub_dbi;
err = mdbx_dbi_open_ex(txn, subname, MDBX_DB_ACCEDE, &sub_dbi, err = mdbx_dbi_open_ex(txn, subname, MDBX_DB_ACCEDE, &sub_dbi, rescue ? equal_or_greater : nullptr,
rescue ? equal_or_greater : nullptr,
rescue ? equal_or_greater : nullptr); rescue ? equal_or_greater : nullptr);
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
if (err == MDBX_INCOMPATIBLE) { if (err == MDBX_INCOMPATIBLE) {
@ -455,8 +432,7 @@ int main(int argc, char *argv[]) {
if (!rescue) if (!rescue)
break; break;
if (!quiet) if (!quiet)
fprintf(stderr, "%s: %s: ignore %s for `%s` and continue\n", prog, fprintf(stderr, "%s: %s: ignore %s for `%s` and continue\n", prog, envname, mdbx_strerror(err), subname);
envname, mdbx_strerror(err), subname);
/* Here is a hack for rescue mode, don't do that: /* Here is a hack for rescue mode, don't do that:
* - we should restart transaction in case error due * - we should restart transaction in case error due
* database corruption; * database corruption;
@ -491,8 +467,7 @@ int main(int argc, char *argv[]) {
err = dump_tbl(txn, MAIN_DBI, nullptr); err = dump_tbl(txn, MAIN_DBI, nullptr);
else if (!count) { else if (!count) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: %s does not contain multiple databases\n", prog, fprintf(stderr, "%s: %s does not contain multiple databases\n", prog, envname);
envname);
err = MDBX_NOTFOUND; err = MDBX_NOTFOUND;
} }
} else { } else {

View File

@ -44,11 +44,10 @@ static size_t lineno;
static void error(const char *func, int rc) { static void error(const char *func, int rc) {
if (!quiet) { if (!quiet) {
if (lineno) if (lineno)
fprintf(stderr, "%s: at input line %" PRIiSIZE ": %s() error %d, %s\n", fprintf(stderr, "%s: at input line %" PRIiSIZE ": %s() error %d, %s\n", prog, lineno, func, rc,
prog, lineno, func, rc, mdbx_strerror(rc));
else
fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc,
mdbx_strerror(rc)); mdbx_strerror(rc));
else
fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, mdbx_strerror(rc));
} }
} }
@ -60,9 +59,7 @@ static char *valstr(char *line, const char *item) {
if (line[len] > ' ') if (line[len] > ' ')
return nullptr; return nullptr;
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected line format for '%s'\n", prog, lineno, item);
"%s: line %" PRIiSIZE ": unexpected line format for '%s'\n", prog,
lineno, item);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
char *ptr = strchr(line, '\n'); char *ptr = strchr(line, '\n');
@ -80,9 +77,7 @@ static bool valnum(char *line, const char *item, uint64_t *value) {
*value = strtoull(str, &end, 0); *value = strtoull(str, &end, 0);
if (end && *end) { if (end && *end) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected number format for '%s'\n", prog, lineno, item);
"%s: line %" PRIiSIZE ": unexpected number format for '%s'\n",
prog, lineno, item);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
return true; return true;
@ -95,8 +90,7 @@ static bool valbool(char *line, const char *item, bool *value) {
if (u64 > 1) { if (u64 > 1) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected value for '%s'\n", fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected value for '%s'\n", prog, lineno, item);
prog, lineno, item);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
*value = u64 != 0; *value = u64 != 0;
@ -129,11 +123,10 @@ typedef struct flagbit {
#define S(s) STRLENOF(s), s #define S(s) STRLENOF(s), s
flagbit dbflags[] = { flagbit dbflags[] = {{MDBX_REVERSEKEY, S("reversekey")}, {MDBX_DUPSORT, S("duplicates")},
{MDBX_REVERSEKEY, S("reversekey")}, {MDBX_DUPSORT, S("duplicates")}, {MDBX_DUPSORT, S("dupsort")}, {MDBX_INTEGERKEY, S("integerkey")},
{MDBX_DUPSORT, S("dupsort")}, {MDBX_INTEGERKEY, S("integerkey")}, {MDBX_DUPFIXED, S("dupfix")}, {MDBX_INTEGERDUP, S("integerdup")},
{MDBX_DUPFIXED, S("dupfix")}, {MDBX_INTEGERDUP, S("integerdup")}, {MDBX_REVERSEDUP, S("reversedup")}, {0, 0, nullptr}};
{MDBX_REVERSEDUP, S("reversedup")}, {0, 0, nullptr}};
static int readhdr(void) { static int readhdr(void) {
/* reset parameters */ /* reset parameters */
@ -158,10 +151,8 @@ static int readhdr(void) {
if (valnum(dbuf.iov_base, "VERSION", &u64)) { if (valnum(dbuf.iov_base, "VERSION", &u64)) {
if (u64 != 3) { if (u64 != 3) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unsupported value %" PRIu64 " for %s\n", prog, lineno, u64,
"%s: line %" PRIiSIZE ": unsupported value %" PRIu64 "VERSION");
" for %s\n",
prog, lineno, u64, "VERSION");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
continue; continue;
@ -170,16 +161,12 @@ static int readhdr(void) {
if (valnum(dbuf.iov_base, "db_pagesize", &u64)) { if (valnum(dbuf.iov_base, "db_pagesize", &u64)) {
if (!(mode & GLOBAL) && envinfo.mi_dxb_pagesize != u64) { if (!(mode & GLOBAL) && envinfo.mi_dxb_pagesize != u64) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore value %" PRIu64 " for '%s' in non-global context\n", prog,
"%s: line %" PRIiSIZE ": ignore value %" PRIu64 lineno, u64, "db_pagesize");
" for '%s' in non-global context\n",
prog, lineno, u64, "db_pagesize");
} else if (u64 < MDBX_MIN_PAGESIZE || u64 > MDBX_MAX_PAGESIZE) { } else if (u64 < MDBX_MIN_PAGESIZE || u64 > MDBX_MAX_PAGESIZE) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore unsupported value %" PRIu64 " for %s\n", prog, lineno, u64,
"%s: line %" PRIiSIZE ": ignore unsupported value %" PRIu64 "db_pagesize");
" for %s\n",
prog, lineno, u64, "db_pagesize");
} else } else
envinfo.mi_dxb_pagesize = (uint32_t)u64; envinfo.mi_dxb_pagesize = (uint32_t)u64;
continue; continue;
@ -196,9 +183,7 @@ static int readhdr(void) {
continue; continue;
} }
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unsupported value '%s' for %s\n", prog, lineno, str, "format");
"%s: line %" PRIiSIZE ": unsupported value '%s' for %s\n", prog,
lineno, str, "format");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
@ -220,9 +205,7 @@ static int readhdr(void) {
if (str) { if (str) {
if (strcmp(str, "btree") != 0) { if (strcmp(str, "btree") != 0) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unsupported value '%s' for %s\n", prog, lineno, str, "type");
"%s: line %" PRIiSIZE ": unsupported value '%s' for %s\n",
prog, lineno, str, "type");
free(subname); free(subname);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
@ -232,10 +215,8 @@ static int readhdr(void) {
if (valnum(dbuf.iov_base, "mapaddr", &u64)) { if (valnum(dbuf.iov_base, "mapaddr", &u64)) {
if (u64) { if (u64) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 " for %s\n", prog, lineno, u64,
"%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 "mapaddr");
" for %s\n",
prog, lineno, u64, "mapaddr");
} }
continue; continue;
} }
@ -243,16 +224,12 @@ static int readhdr(void) {
if (valnum(dbuf.iov_base, "mapsize", &u64)) { if (valnum(dbuf.iov_base, "mapsize", &u64)) {
if (!(mode & GLOBAL)) { if (!(mode & GLOBAL)) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore value %" PRIu64 " for '%s' in non-global context\n", prog,
"%s: line %" PRIiSIZE ": ignore value %" PRIu64 lineno, u64, "mapsize");
" for '%s' in non-global context\n",
prog, lineno, u64, "mapsize");
} else if (u64 < MIN_MAPSIZE || u64 > MAX_MAPSIZE64) { } else if (u64 < MIN_MAPSIZE || u64 > MAX_MAPSIZE64) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 " for %s\n", prog, lineno, u64,
"%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 "mapsize");
" for %s\n",
prog, lineno, u64, "mapsize");
} else } else
envinfo.mi_mapsize = (size_t)u64; envinfo.mi_mapsize = (size_t)u64;
continue; continue;
@ -261,16 +238,12 @@ static int readhdr(void) {
if (valnum(dbuf.iov_base, "maxreaders", &u64)) { if (valnum(dbuf.iov_base, "maxreaders", &u64)) {
if (!(mode & GLOBAL)) { if (!(mode & GLOBAL)) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore value %" PRIu64 " for '%s' in non-global context\n", prog,
"%s: line %" PRIiSIZE ": ignore value %" PRIu64 lineno, u64, "maxreaders");
" for '%s' in non-global context\n",
prog, lineno, u64, "maxreaders");
} else if (u64 < 1 || u64 > MDBX_READERS_LIMIT) { } else if (u64 < 1 || u64 > MDBX_READERS_LIMIT) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 " for %s\n", prog, lineno, u64,
"%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 "maxreaders");
" for %s\n",
prog, lineno, u64, "maxreaders");
} else } else
envinfo.mi_maxreaders = (int)u64; envinfo.mi_maxreaders = (int)u64;
continue; continue;
@ -279,10 +252,8 @@ static int readhdr(void) {
if (valnum(dbuf.iov_base, "txnid", &u64)) { if (valnum(dbuf.iov_base, "txnid", &u64)) {
if (u64 < MIN_TXNID || u64 > MAX_TXNID) { if (u64 < MIN_TXNID || u64 > MAX_TXNID) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 " for %s\n", prog, lineno, u64,
"%s: line %" PRIiSIZE ": ignore unsupported value 0x%" PRIx64 "txnid");
" for %s\n",
prog, lineno, u64, "txnid");
} else } else
txnid = u64; txnid = u64;
continue; continue;
@ -301,16 +272,11 @@ static int readhdr(void) {
"%s: line %" PRIiSIZE ": ignore values %s" "%s: line %" PRIiSIZE ": ignore values %s"
" for '%s' in non-global context\n", " for '%s' in non-global context\n",
prog, lineno, str, "geometry"); prog, lineno, str, "geometry");
} else if (sscanf(str, } else if (sscanf(str, "l%" PRIu64 ",c%" PRIu64 ",u%" PRIu64 ",s%" PRIu64 ",g%" PRIu64, &envinfo.mi_geo.lower,
"l%" PRIu64 ",c%" PRIu64 ",u%" PRIu64 ",s%" PRIu64 &envinfo.mi_geo.current, &envinfo.mi_geo.upper, &envinfo.mi_geo.shrink,
",g%" PRIu64,
&envinfo.mi_geo.lower, &envinfo.mi_geo.current,
&envinfo.mi_geo.upper, &envinfo.mi_geo.shrink,
&envinfo.mi_geo.grow) != 5) { &envinfo.mi_geo.grow) != 5) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected line format for '%s'\n", prog, lineno, "geometry");
"%s: line %" PRIiSIZE ": unexpected line format for '%s'\n",
prog, lineno, "geometry");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
continue; continue;
@ -324,12 +290,10 @@ static int readhdr(void) {
"%s: line %" PRIiSIZE ": ignore values %s" "%s: line %" PRIiSIZE ": ignore values %s"
" for '%s' in non-global context\n", " for '%s' in non-global context\n",
prog, lineno, str, "canary"); prog, lineno, str, "canary");
} else if (sscanf(str, "v%" PRIu64 ",x%" PRIu64 ",y%" PRIu64 ",z%" PRIu64, } else if (sscanf(str, "v%" PRIu64 ",x%" PRIu64 ",y%" PRIu64 ",z%" PRIu64, &canary.v, &canary.x, &canary.y,
&canary.v, &canary.x, &canary.y, &canary.z) != 4) { &canary.z) != 4) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected line format for '%s'\n", prog, lineno, "canary");
"%s: line %" PRIiSIZE ": unexpected line format for '%s'\n",
prog, lineno, "canary");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
continue; continue;
@ -353,9 +317,8 @@ static int readhdr(void) {
} }
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": unrecognized keyword ignored: %s\n", prog, lineno,
"%s: line %" PRIiSIZE ": unrecognized keyword ignored: %s\n", (char *)dbuf.iov_base);
prog, lineno, (char *)dbuf.iov_base);
next:; next:;
} }
return EOF; return EOF;
@ -363,8 +326,7 @@ static int readhdr(void) {
static int badend(void) { static int badend(void) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected end of input\n", prog, fprintf(stderr, "%s: line %" PRIiSIZE ": unexpected end of input\n", prog, lineno);
lineno);
return errno ? errno : MDBX_ENODATA; return errno ? errno : MDBX_ENODATA;
} }
@ -416,9 +378,7 @@ __hot static int readline(MDBX_val *out, MDBX_val *buf) {
buf->iov_base = osal_realloc(buf->iov_base, buf->iov_len * 2); buf->iov_base = osal_realloc(buf->iov_base, buf->iov_len * 2);
if (!buf->iov_base) { if (!buf->iov_base) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "%s: line %" PRIiSIZE ": out of memory, line too long\n", prog, lineno);
"%s: line %" PRIiSIZE ": out of memory, line too long\n", prog,
lineno);
return MDBX_ENOMEM; return MDBX_ENOMEM;
} }
c1 = buf->iov_base; c1 = buf->iov_base;
@ -490,10 +450,7 @@ static void usage(void) {
} }
static int equal_or_greater(const MDBX_val *a, const MDBX_val *b) { static int equal_or_greater(const MDBX_val *a, const MDBX_val *b) {
return (a->iov_len == b->iov_len && return (a->iov_len == b->iov_len && memcmp(a->iov_base, b->iov_base, a->iov_len) == 0) ? 0 : 1;
memcmp(a->iov_base, b->iov_base, a->iov_len) == 0)
? 0
: 1;
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
@ -530,12 +487,9 @@ int main(int argc, char *argv[]) {
" - build: %s for %s by %s\n" " - build: %s for %s by %s\n"
" - flags: %s\n" " - flags: %s\n"
" - options: %s\n", " - options: %s\n",
mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.tweak, mdbx_version.git.describe,
mdbx_version.tweak, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_version.git.tree, mdbx_sourcery_anchor,
mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_build.datetime, mdbx_build.target, mdbx_build.compiler, mdbx_build.flags, mdbx_build.options);
mdbx_version.git.tree, mdbx_sourcery_anchor, mdbx_build.datetime,
mdbx_build.target, mdbx_build.compiler, mdbx_build.flags,
mdbx_build.options);
return EXIT_SUCCESS; return EXIT_SUCCESS;
case 'a': case 'a':
putflags |= MDBX_APPEND; putflags |= MDBX_APPEND;
@ -543,8 +497,7 @@ int main(int argc, char *argv[]) {
case 'f': case 'f':
if (freopen(optarg, "r", stdin) == nullptr) { if (freopen(optarg, "r", stdin) == nullptr) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: %s: open: %s\n", prog, optarg, fprintf(stderr, "%s: %s: open: %s\n", prog, optarg, mdbx_strerror(errno));
mdbx_strerror(errno));
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
break; break;
@ -592,8 +545,7 @@ int main(int argc, char *argv[]) {
envname = argv[optind]; envname = argv[optind];
if (!quiet) if (!quiet)
printf("mdbx_load %s (%s, T-%s)\nRunning for %s...\n", printf("mdbx_load %s (%s, T-%s)\nRunning for %s...\n", mdbx_version.git.describe, mdbx_version.git.datetime,
mdbx_version.git.describe, mdbx_version.git.datetime,
mdbx_version.git.tree, envname); mdbx_version.git.tree, envname);
fflush(nullptr); fflush(nullptr);
@ -638,25 +590,22 @@ int main(int argc, char *argv[]) {
if (envinfo.mi_geo.current | envinfo.mi_mapsize) { if (envinfo.mi_geo.current | envinfo.mi_mapsize) {
if (envinfo.mi_geo.current) { if (envinfo.mi_geo.current) {
err = mdbx_env_set_geometry( err = mdbx_env_set_geometry(env, (intptr_t)envinfo.mi_geo.lower, (intptr_t)envinfo.mi_geo.current,
env, (intptr_t)envinfo.mi_geo.lower, (intptr_t)envinfo.mi_geo.current, (intptr_t)envinfo.mi_geo.upper, (intptr_t)envinfo.mi_geo.shrink,
(intptr_t)envinfo.mi_geo.upper, (intptr_t)envinfo.mi_geo.shrink, (intptr_t)envinfo.mi_geo.grow,
(intptr_t)envinfo.mi_geo.grow, envinfo.mi_dxb_pagesize ? (intptr_t)envinfo.mi_dxb_pagesize : -1);
envinfo.mi_dxb_pagesize ? (intptr_t)envinfo.mi_dxb_pagesize : -1);
} else { } else {
if (envinfo.mi_mapsize > MAX_MAPSIZE) { if (envinfo.mi_mapsize > MAX_MAPSIZE) {
if (!quiet) if (!quiet)
fprintf( fprintf(stderr,
stderr, "Database size is too large for current system (mapsize=%" PRIu64
"Database size is too large for current system (mapsize=%" PRIu64 " is great than system-limit %zu)\n",
" is great than system-limit %zu)\n", envinfo.mi_mapsize, (size_t)MAX_MAPSIZE);
envinfo.mi_mapsize, (size_t)MAX_MAPSIZE);
goto bailout; goto bailout;
} }
err = mdbx_env_set_geometry( err = mdbx_env_set_geometry(env, (intptr_t)envinfo.mi_mapsize, (intptr_t)envinfo.mi_mapsize,
env, (intptr_t)envinfo.mi_mapsize, (intptr_t)envinfo.mi_mapsize, (intptr_t)envinfo.mi_mapsize, 0, 0,
(intptr_t)envinfo.mi_mapsize, 0, 0, envinfo.mi_dxb_pagesize ? (intptr_t)envinfo.mi_dxb_pagesize : -1);
envinfo.mi_dxb_pagesize ? (intptr_t)envinfo.mi_dxb_pagesize : -1);
} }
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
error("mdbx_env_set_geometry", err); error("mdbx_env_set_geometry", err);
@ -673,8 +622,7 @@ int main(int argc, char *argv[]) {
kbuf.iov_len = mdbx_env_get_maxvalsize_ex(env, 0) + (size_t)1; kbuf.iov_len = mdbx_env_get_maxvalsize_ex(env, 0) + (size_t)1;
if (kbuf.iov_len >= INTPTR_MAX / 2) { if (kbuf.iov_len >= INTPTR_MAX / 2) {
if (!quiet) if (!quiet)
fprintf(stderr, "mdbx_env_get_maxkeysize() failed, returns %zu\n", fprintf(stderr, "mdbx_env_get_maxkeysize() failed, returns %zu\n", kbuf.iov_len);
kbuf.iov_len);
goto bailout; goto bailout;
} }
@ -709,10 +657,9 @@ int main(int argc, char *argv[]) {
} }
const char *const dbi_name = subname ? subname : "@MAIN"; const char *const dbi_name = subname ? subname : "@MAIN";
err = err = mdbx_dbi_open_ex(txn, subname, dbi_flags | MDBX_CREATE, &dbi,
mdbx_dbi_open_ex(txn, subname, dbi_flags | MDBX_CREATE, &dbi, (putflags & MDBX_APPEND) ? equal_or_greater : nullptr,
(putflags & MDBX_APPEND) ? equal_or_greater : nullptr, (putflags & MDBX_APPEND) ? equal_or_greater : nullptr);
(putflags & MDBX_APPEND) ? equal_or_greater : nullptr);
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
error("mdbx_dbi_open_ex", err); error("mdbx_dbi_open_ex", err);
goto bailout; goto bailout;
@ -726,9 +673,7 @@ int main(int argc, char *argv[]) {
} }
if (present_sequence > sequence) { if (present_sequence > sequence) {
if (!quiet) if (!quiet)
fprintf(stderr, fprintf(stderr, "present sequence for '%s' value (%" PRIu64 ") is greater than loaded (%" PRIu64 ")\n",
"present sequence for '%s' value (%" PRIu64
") is greater than loaded (%" PRIu64 ")\n",
dbi_name, present_sequence, sequence); dbi_name, present_sequence, sequence);
err = MDBX_RESULT_TRUE; err = MDBX_RESULT_TRUE;
goto bailout; goto bailout;
@ -750,8 +695,7 @@ int main(int argc, char *argv[]) {
} }
if (putflags & MDBX_APPEND) if (putflags & MDBX_APPEND)
putflags = (dbi_flags & MDBX_DUPSORT) ? putflags | MDBX_APPENDDUP putflags = (dbi_flags & MDBX_DUPSORT) ? putflags | MDBX_APPENDDUP : putflags & ~MDBX_APPENDDUP;
: putflags & ~MDBX_APPENDDUP;
err = mdbx_cursor_open(txn, dbi, &mc); err = mdbx_cursor_open(txn, dbi, &mc);
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {
@ -770,8 +714,7 @@ int main(int argc, char *argv[]) {
err = readline(&data, &dbuf); err = readline(&data, &dbuf);
if (err) { if (err) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: line %" PRIiSIZE ": failed to read key value\n", fprintf(stderr, "%s: line %" PRIiSIZE ": failed to read key value\n", prog, lineno);
prog, lineno);
goto bailout; goto bailout;
} }
@ -780,8 +723,7 @@ int main(int argc, char *argv[]) {
continue; continue;
if (err == MDBX_BAD_VALSIZE && rescue) { if (err == MDBX_BAD_VALSIZE && rescue) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: skip line %" PRIiSIZE ": due %s\n", prog, lineno, fprintf(stderr, "%s: skip line %" PRIiSIZE ": due %s\n", prog, lineno, mdbx_strerror(err));
mdbx_strerror(err));
continue; continue;
} }
if (unlikely(err != MDBX_SUCCESS)) { if (unlikely(err != MDBX_SUCCESS)) {

View File

@ -61,27 +61,24 @@ static void usage(const char *prog) {
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
static int reader_list_func(void *ctx, int num, int slot, mdbx_pid_t pid, static int reader_list_func(void *ctx, int num, int slot, mdbx_pid_t pid, mdbx_tid_t thread, uint64_t txnid,
mdbx_tid_t thread, uint64_t txnid, uint64_t lag, uint64_t lag, size_t bytes_used, size_t bytes_retained) {
size_t bytes_used, size_t bytes_retained) {
(void)ctx; (void)ctx;
if (num == 1) if (num == 1)
printf("Reader Table\n" printf("Reader Table\n"
" #\tslot\t%6s %*s %20s %10s %13s %13s\n", " #\tslot\t%6s %*s %20s %10s %13s %13s\n",
"pid", (int)sizeof(size_t) * 2, "thread", "txnid", "lag", "used", "pid", (int)sizeof(size_t) * 2, "thread", "txnid", "lag", "used", "retained");
"retained");
if (thread < (mdbx_tid_t)((intptr_t)MDBX_TID_TXN_OUSTED)) if (thread < (mdbx_tid_t)((intptr_t)MDBX_TID_TXN_OUSTED))
printf(" %3d)\t[%d]\t%6" PRIdSIZE " %*" PRIxPTR, num, slot, (size_t)pid, printf(" %3d)\t[%d]\t%6" PRIdSIZE " %*" PRIxPTR, num, slot, (size_t)pid, (int)sizeof(size_t) * 2,
(int)sizeof(size_t) * 2, (uintptr_t)thread); (uintptr_t)thread);
else else
printf(" %3d)\t[%d]\t%6" PRIdSIZE " %sed", num, slot, (size_t)pid, printf(" %3d)\t[%d]\t%6" PRIdSIZE " %sed", num, slot, (size_t)pid,
(thread == (mdbx_tid_t)((uintptr_t)MDBX_TID_TXN_PARKED)) ? "park" (thread == (mdbx_tid_t)((uintptr_t)MDBX_TID_TXN_PARKED)) ? "park" : "oust");
: "oust");
if (txnid) if (txnid)
printf(" %20" PRIu64 " %10" PRIu64 " %12.1fM %12.1fM\n", txnid, lag, printf(" %20" PRIu64 " %10" PRIu64 " %12.1fM %12.1fM\n", txnid, lag, bytes_used / 1048576.0,
bytes_used / 1048576.0, bytes_retained / 1048576.0); bytes_retained / 1048576.0);
else else
printf(" %20s %10s %13s %13s\n", "-", "0", "0", "0"); printf(" %20s %10s %13s %13s\n", "-", "0", "0", "0");
@ -92,8 +89,7 @@ const char *prog;
bool quiet = false; bool quiet = false;
static void error(const char *func, int rc) { static void error(const char *func, int rc) {
if (!quiet) if (!quiet)
fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, fprintf(stderr, "%s: %s() error %d %s\n", prog, func, rc, mdbx_strerror(rc));
mdbx_strerror(rc));
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
@ -129,12 +125,9 @@ int main(int argc, char *argv[]) {
" - build: %s for %s by %s\n" " - build: %s for %s by %s\n"
" - flags: %s\n" " - flags: %s\n"
" - options: %s\n", " - options: %s\n",
mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.major, mdbx_version.minor, mdbx_version.patch, mdbx_version.tweak, mdbx_version.git.describe,
mdbx_version.tweak, mdbx_version.git.describe, mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_version.git.tree, mdbx_sourcery_anchor,
mdbx_version.git.datetime, mdbx_version.git.commit, mdbx_build.datetime, mdbx_build.target, mdbx_build.compiler, mdbx_build.flags, mdbx_build.options);
mdbx_version.git.tree, mdbx_sourcery_anchor, mdbx_build.datetime,
mdbx_build.target, mdbx_build.compiler, mdbx_build.flags,
mdbx_build.options);
return EXIT_SUCCESS; return EXIT_SUCCESS;
case 'q': case 'q':
quiet = true; quiet = true;
@ -187,8 +180,7 @@ int main(int argc, char *argv[]) {
envname = argv[optind]; envname = argv[optind];
envname = argv[optind]; envname = argv[optind];
if (!quiet) { if (!quiet) {
printf("mdbx_stat %s (%s, T-%s)\nRunning for %s...\n", printf("mdbx_stat %s (%s, T-%s)\nRunning for %s...\n", mdbx_version.git.describe, mdbx_version.git.datetime,
mdbx_version.git.describe, mdbx_version.git.datetime,
mdbx_version.git.tree, envname); mdbx_version.git.tree, envname);
fflush(nullptr); fflush(nullptr);
} }
@ -232,39 +224,27 @@ int main(int argc, char *argv[]) {
if (pgop) { if (pgop) {
printf("Page Operations (for current session):\n"); printf("Page Operations (for current session):\n");
printf(" New: %8" PRIu64 "\t// quantity of a new pages added\n", printf(" New: %8" PRIu64 "\t// quantity of a new pages added\n", mei.mi_pgop_stat.newly);
mei.mi_pgop_stat.newly); printf(" CoW: %8" PRIu64 "\t// quantity of pages copied for altering\n", mei.mi_pgop_stat.cow);
printf(" CoW: %8" PRIu64
"\t// quantity of pages copied for altering\n",
mei.mi_pgop_stat.cow);
printf(" Clone: %8" PRIu64 "\t// quantity of parent's dirty pages " printf(" Clone: %8" PRIu64 "\t// quantity of parent's dirty pages "
"clones for nested transactions\n", "clones for nested transactions\n",
mei.mi_pgop_stat.clone); mei.mi_pgop_stat.clone);
printf(" Split: %8" PRIu64 printf(" Split: %8" PRIu64 "\t// page splits during insertions or updates\n", mei.mi_pgop_stat.split);
"\t// page splits during insertions or updates\n", printf(" Merge: %8" PRIu64 "\t// page merges during deletions or updates\n", mei.mi_pgop_stat.merge);
mei.mi_pgop_stat.split);
printf(" Merge: %8" PRIu64
"\t// page merges during deletions or updates\n",
mei.mi_pgop_stat.merge);
printf(" Spill: %8" PRIu64 "\t// quantity of spilled/ousted `dirty` " printf(" Spill: %8" PRIu64 "\t// quantity of spilled/ousted `dirty` "
"pages during large transactions\n", "pages during large transactions\n",
mei.mi_pgop_stat.spill); mei.mi_pgop_stat.spill);
printf(" Unspill: %8" PRIu64 "\t// quantity of unspilled/redone `dirty` " printf(" Unspill: %8" PRIu64 "\t// quantity of unspilled/redone `dirty` "
"pages during large transactions\n", "pages during large transactions\n",
mei.mi_pgop_stat.unspill); mei.mi_pgop_stat.unspill);
printf(" WOP: %8" PRIu64 printf(" WOP: %8" PRIu64 "\t// number of explicit write operations (not a pages) to a disk\n",
"\t// number of explicit write operations (not a pages) to a disk\n",
mei.mi_pgop_stat.wops); mei.mi_pgop_stat.wops);
printf(" PreFault: %8" PRIu64 printf(" PreFault: %8" PRIu64 "\t// number of prefault write operations (not a pages)\n",
"\t// number of prefault write operations (not a pages)\n",
mei.mi_pgop_stat.prefault); mei.mi_pgop_stat.prefault);
printf(" mInCore: %8" PRIu64 "\t// number of mincore() calls\n", printf(" mInCore: %8" PRIu64 "\t// number of mincore() calls\n", mei.mi_pgop_stat.mincore);
mei.mi_pgop_stat.mincore); printf(" mSync: %8" PRIu64 "\t// number of explicit msync-to-disk operations (not a pages)\n",
printf(" mSync: %8" PRIu64
"\t// number of explicit msync-to-disk operations (not a pages)\n",
mei.mi_pgop_stat.msync); mei.mi_pgop_stat.msync);
printf(" fSync: %8" PRIu64 printf(" fSync: %8" PRIu64 "\t// number of explicit fsync-to-disk operations (not a pages)\n",
"\t// number of explicit fsync-to-disk operations (not a pages)\n",
mei.mi_pgop_stat.fsync); mei.mi_pgop_stat.fsync);
} }
@ -272,18 +252,15 @@ int main(int argc, char *argv[]) {
printf("Environment Info\n"); printf("Environment Info\n");
printf(" Pagesize: %u\n", mei.mi_dxb_pagesize); printf(" Pagesize: %u\n", mei.mi_dxb_pagesize);
if (mei.mi_geo.lower != mei.mi_geo.upper) { if (mei.mi_geo.lower != mei.mi_geo.upper) {
printf(" Dynamic datafile: %" PRIu64 "..%" PRIu64 " bytes (+%" PRIu64 printf(" Dynamic datafile: %" PRIu64 "..%" PRIu64 " bytes (+%" PRIu64 "/-%" PRIu64 "), %" PRIu64 "..%" PRIu64
"/-%" PRIu64 "), %" PRIu64 "..%" PRIu64 " pages (+%" PRIu64 " pages (+%" PRIu64 "/-%" PRIu64 ")\n",
"/-%" PRIu64 ")\n", mei.mi_geo.lower, mei.mi_geo.upper, mei.mi_geo.grow, mei.mi_geo.shrink,
mei.mi_geo.lower, mei.mi_geo.upper, mei.mi_geo.grow, mei.mi_geo.lower / mei.mi_dxb_pagesize, mei.mi_geo.upper / mei.mi_dxb_pagesize,
mei.mi_geo.shrink, mei.mi_geo.lower / mei.mi_dxb_pagesize, mei.mi_geo.grow / mei.mi_dxb_pagesize, mei.mi_geo.shrink / mei.mi_dxb_pagesize);
mei.mi_geo.upper / mei.mi_dxb_pagesize, printf(" Current mapsize: %" PRIu64 " bytes, %" PRIu64 " pages \n", mei.mi_mapsize,
mei.mi_geo.grow / mei.mi_dxb_pagesize, mei.mi_mapsize / mei.mi_dxb_pagesize);
mei.mi_geo.shrink / mei.mi_dxb_pagesize); printf(" Current datafile: %" PRIu64 " bytes, %" PRIu64 " pages\n", mei.mi_geo.current,
printf(" Current mapsize: %" PRIu64 " bytes, %" PRIu64 " pages \n", mei.mi_geo.current / mei.mi_dxb_pagesize);
mei.mi_mapsize, mei.mi_mapsize / mei.mi_dxb_pagesize);
printf(" Current datafile: %" PRIu64 " bytes, %" PRIu64 " pages\n",
mei.mi_geo.current, mei.mi_geo.current / mei.mi_dxb_pagesize);
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
if (mei.mi_geo.shrink && mei.mi_geo.current != mei.mi_geo.upper) if (mei.mi_geo.shrink && mei.mi_geo.current != mei.mi_geo.upper)
printf(" WARNING: Due Windows system limitations a " printf(" WARNING: Due Windows system limitations a "
@ -293,12 +270,11 @@ int main(int argc, char *argv[]) {
"until it will be closed or reopened in read-write mode.\n"); "until it will be closed or reopened in read-write mode.\n");
#endif #endif
} else { } else {
printf(" Fixed datafile: %" PRIu64 " bytes, %" PRIu64 " pages\n", printf(" Fixed datafile: %" PRIu64 " bytes, %" PRIu64 " pages\n", mei.mi_geo.current,
mei.mi_geo.current, mei.mi_geo.current / mei.mi_dxb_pagesize); mei.mi_geo.current / mei.mi_dxb_pagesize);
} }
printf(" Last transaction ID: %" PRIu64 "\n", mei.mi_recent_txnid); printf(" Last transaction ID: %" PRIu64 "\n", mei.mi_recent_txnid);
printf(" Latter reader transaction ID: %" PRIu64 " (%" PRIi64 ")\n", printf(" Latter reader transaction ID: %" PRIu64 " (%" PRIi64 ")\n", mei.mi_latter_reader_txnid,
mei.mi_latter_reader_txnid,
mei.mi_latter_reader_txnid - mei.mi_recent_txnid); mei.mi_latter_reader_txnid - mei.mi_recent_txnid);
printf(" Max readers: %u\n", mei.mi_maxreaders); printf(" Max readers: %u\n", mei.mi_maxreaders);
printf(" Number of reader slots uses: %u\n", mei.mi_numreaders); printf(" Number of reader slots uses: %u\n", mei.mi_numreaders);
@ -352,8 +328,7 @@ int main(int argc, char *argv[]) {
pgno_t pages = 0, *iptr; pgno_t pages = 0, *iptr;
pgno_t reclaimable = 0; pgno_t reclaimable = 0;
MDBX_val key, data; MDBX_val key, data;
while (MDBX_SUCCESS == while (MDBX_SUCCESS == (rc = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT))) {
(rc = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT))) {
if (user_break) { if (user_break) {
rc = MDBX_EINTR; rc = MDBX_EINTR;
break; break;
@ -367,29 +342,23 @@ int main(int argc, char *argv[]) {
if (freinfo > 1) { if (freinfo > 1) {
char *bad = ""; char *bad = "";
pgno_t prev = pgno_t prev = MDBX_PNL_ASCENDING ? NUM_METAS - 1 : (pgno_t)mei.mi_last_pgno + 1;
MDBX_PNL_ASCENDING ? NUM_METAS - 1 : (pgno_t)mei.mi_last_pgno + 1;
pgno_t span = 1; pgno_t span = 1;
for (unsigned i = 0; i < number; ++i) { for (unsigned i = 0; i < number; ++i) {
pgno_t pg = iptr[i]; pgno_t pg = iptr[i];
if (MDBX_PNL_DISORDERED(prev, pg)) if (MDBX_PNL_DISORDERED(prev, pg))
bad = " [bad sequence]"; bad = " [bad sequence]";
prev = pg; prev = pg;
while (i + span < number && while (i + span < number && iptr[i + span] == (MDBX_PNL_ASCENDING ? pgno_add(pg, span) : pgno_sub(pg, span)))
iptr[i + span] == (MDBX_PNL_ASCENDING ? pgno_add(pg, span)
: pgno_sub(pg, span)))
++span; ++span;
} }
printf(" Transaction %" PRIaTXN ", %" PRIaPGNO printf(" Transaction %" PRIaTXN ", %" PRIaPGNO " pages, maxspan %" PRIaPGNO "%s\n", *(txnid_t *)key.iov_base,
" pages, maxspan %" PRIaPGNO "%s\n", number, span, bad);
*(txnid_t *)key.iov_base, number, span, bad);
if (freinfo > 2) { if (freinfo > 2) {
for (unsigned i = 0; i < number; i += span) { for (unsigned i = 0; i < number; i += span) {
const pgno_t pg = iptr[i]; const pgno_t pg = iptr[i];
for (span = 1; for (span = 1;
i + span < number && i + span < number && iptr[i + span] == (MDBX_PNL_ASCENDING ? pgno_add(pg, span) : pgno_sub(pg, span));
iptr[i + span] == (MDBX_PNL_ASCENDING ? pgno_add(pg, span)
: pgno_sub(pg, span));
++span) ++span)
; ;
if (span > 1) if (span > 1)
@ -443,8 +412,7 @@ int main(int argc, char *argv[]) {
value = reclaimable; value = reclaimable;
printf(" Reclaimable: %" PRIu64 " %.1f%%\n", value, value / percent); printf(" Reclaimable: %" PRIu64 " %.1f%%\n", value, value / percent);
value = mei.mi_mapsize / mei.mi_dxb_pagesize - (mei.mi_last_pgno + 1) + value = mei.mi_mapsize / mei.mi_dxb_pagesize - (mei.mi_last_pgno + 1) + reclaimable;
reclaimable;
printf(" Available: %" PRIu64 " %.1f%%\n", value, value / percent); printf(" Available: %" PRIu64 " %.1f%%\n", value, value / percent);
} else } else
printf(" GC: %" PRIaPGNO " pages\n", pages); printf(" GC: %" PRIaPGNO " pages\n", pages);
@ -474,8 +442,7 @@ int main(int argc, char *argv[]) {
} }
MDBX_val key; MDBX_val key;
while (MDBX_SUCCESS == while (MDBX_SUCCESS == (rc = mdbx_cursor_get(cursor, &key, nullptr, MDBX_NEXT_NODUP))) {
(rc = mdbx_cursor_get(cursor, &key, nullptr, MDBX_NEXT_NODUP))) {
MDBX_dbi xdbi; MDBX_dbi xdbi;
if (memchr(key.iov_base, '\0', key.iov_len)) if (memchr(key.iov_base, '\0', key.iov_len))
continue; continue;

View File

@ -11,12 +11,12 @@
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(push, 1) #pragma warning(push, 1)
#pragma warning(disable : 4548) /* expression before comma has no effect; \ #pragma warning(disable : 4548) /* expression before comma has no effect; \
expected expression with side - effect */ expected expression with side - effect */
#pragma warning(disable : 4530) /* C++ exception handler used, but unwind \ #pragma warning(disable : 4530) /* C++ exception handler used, but unwind \
* semantics are not enabled. Specify /EHsc */ * semantics are not enabled. Specify /EHsc */
#pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \ #pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \
* mode specified; termination on exception is \ * mode specified; termination on exception is \
* not guaranteed. Specify /EHsc */ * not guaranteed. Specify /EHsc */
#if !defined(_CRT_SECURE_NO_WARNINGS) #if !defined(_CRT_SECURE_NO_WARNINGS)
#define _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS
@ -70,8 +70,7 @@ int getopt(int argc, char *const argv[], const char *opts) {
if (argv[optind][sp + 1] != '\0') if (argv[optind][sp + 1] != '\0')
optarg = &argv[optind++][sp + 1]; optarg = &argv[optind++][sp + 1];
else if (++optind >= argc) { else if (++optind >= argc) {
fprintf(stderr, "%s: %s -- %c\n", argv[0], "option requires an argument", fprintf(stderr, "%s: %s -- %c\n", argv[0], "option requires an argument", c);
c);
sp = 1; sp = 1;
return '?'; return '?';
} else } else

View File

@ -5,8 +5,7 @@
#include "internals.h" #include "internals.h"
static MDBX_cursor *cursor_clone(const MDBX_cursor *csrc, static MDBX_cursor *cursor_clone(const MDBX_cursor *csrc, cursor_couple_t *couple) {
cursor_couple_t *couple) {
cASSERT(csrc, csrc->txn->txnid >= csrc->txn->env->lck->cached_oldest.weak); cASSERT(csrc, csrc->txn->txnid >= csrc->txn->env->lck->cached_oldest.weak);
couple->outer.next = nullptr; couple->outer.next = nullptr;
couple->outer.backup = nullptr; couple->outer.backup = nullptr;
@ -40,13 +39,10 @@ static MDBX_cursor *cursor_clone(const MDBX_cursor *csrc,
void recalculate_merge_thresholds(MDBX_env *env) { void recalculate_merge_thresholds(MDBX_env *env) {
const size_t bytes = page_space(env); const size_t bytes = page_space(env);
env->merge_threshold = env->merge_threshold = (uint16_t)(bytes - (bytes * env->options.merge_threshold_16dot16_percent >> 16));
(uint16_t)(bytes -
(bytes * env->options.merge_threshold_16dot16_percent >> 16));
env->merge_threshold_gc = env->merge_threshold_gc =
(uint16_t)(bytes - ((env->options.merge_threshold_16dot16_percent > 19005) (uint16_t)(bytes - ((env->options.merge_threshold_16dot16_percent > 19005) ? bytes / 3 /* 33 % */
? bytes / 3 /* 33 % */ : bytes / 4 /* 25 % */));
: bytes / 4 /* 25 % */));
} }
int tree_drop(MDBX_cursor *mc, const bool may_have_tables) { int tree_drop(MDBX_cursor *mc, const bool may_have_tables) {
@ -60,9 +56,8 @@ int tree_drop(MDBX_cursor *mc, const bool may_have_tables) {
if (!(may_have_tables | mc->tree->large_pages)) if (!(may_have_tables | mc->tree->large_pages))
cursor_pop(mc); cursor_pop(mc);
rc = pnl_need(&txn->tw.retired_pages, (size_t)mc->tree->branch_pages + rc = pnl_need(&txn->tw.retired_pages,
(size_t)mc->tree->leaf_pages + (size_t)mc->tree->branch_pages + (size_t)mc->tree->leaf_pages + (size_t)mc->tree->large_pages);
(size_t)mc->tree->large_pages);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -100,9 +95,7 @@ int tree_drop(MDBX_cursor *mc, const bool may_have_tables) {
cASSERT(mc, mc->top + 1 < mc->tree->height); cASSERT(mc, mc->top + 1 < mc->tree->height);
mc->checking |= z_retiring; mc->checking |= z_retiring;
const unsigned pagetype = (is_frozen(txn, mp) ? P_FROZEN : 0) + const unsigned pagetype = (is_frozen(txn, mp) ? P_FROZEN : 0) +
((mc->top + 2 == mc->tree->height) ((mc->top + 2 == mc->tree->height) ? (mc->checking & (P_LEAF | P_DUPFIX)) : P_BRANCH);
? (mc->checking & (P_LEAF | P_DUPFIX))
: P_BRANCH);
for (size_t i = 0; i < nkeys; i++) { for (size_t i = 0; i < nkeys; i++) {
node_t *node = page_node(mp, i); node_t *node = page_node(mp, i);
tASSERT(txn, (node_flags(node) & (N_BIG | N_TREE | N_DUP)) == 0); tASSERT(txn, (node_flags(node) & (N_BIG | N_TREE | N_DUP)) == 0);
@ -153,8 +146,7 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
cASSERT(csrc, csrc->top == cdst->top); cASSERT(csrc, csrc->top == cdst->top);
if (unlikely(page_type(psrc) != page_type(pdst))) { if (unlikely(page_type(psrc) != page_type(pdst))) {
bailout: bailout:
ERROR("Wrong or mismatch pages's types (src %d, dst %d) to move node", ERROR("Wrong or mismatch pages's types (src %d, dst %d) to move node", page_type(psrc), page_type(pdst));
page_type(psrc), page_type(pdst));
csrc->txn->flags |= MDBX_TXN_ERROR; csrc->txn->flags |= MDBX_TXN_ERROR;
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
@ -225,8 +217,7 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
mn->top = top; mn->top = top;
mn->ki[mn->top] = 0; mn->ki[mn->top] = 0;
const intptr_t delta = EVEN_CEIL(key.iov_len) - const intptr_t delta = EVEN_CEIL(key.iov_len) - EVEN_CEIL(node_ks(page_node(mn->pg[mn->top], 0)));
EVEN_CEIL(node_ks(page_node(mn->pg[mn->top], 0)));
const intptr_t needed = branch_size(cdst->txn->env, &key4move) + delta; const intptr_t needed = branch_size(cdst->txn->env, &key4move) + delta;
const intptr_t have = page_room(pdst); const intptr_t have = page_room(pdst);
if (unlikely(needed > have)) if (unlikely(needed > have))
@ -255,10 +246,8 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
pdst = cdst->pg[cdst->top]; pdst = cdst->pg[cdst->top];
} }
DEBUG("moving %s-node %u [%s] on page %" PRIaPGNO DEBUG("moving %s-node %u [%s] on page %" PRIaPGNO " to node %u on page %" PRIaPGNO, "branch", csrc->ki[csrc->top],
" to node %u on page %" PRIaPGNO, DKEY_DEBUG(&key4move), psrc->pgno, cdst->ki[cdst->top], pdst->pgno);
"branch", csrc->ki[csrc->top], DKEY_DEBUG(&key4move), psrc->pgno,
cdst->ki[cdst->top], pdst->pgno);
/* Add the node to the destination page. */ /* Add the node to the destination page. */
rc = node_add_branch(cdst, cdst->ki[cdst->top], &key4move, srcpg); rc = node_add_branch(cdst, cdst->ki[cdst->top], &key4move, srcpg);
} break; } break;
@ -275,13 +264,10 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
data.iov_base = node_data(srcnode); data.iov_base = node_data(srcnode);
key4move.iov_len = node_ks(srcnode); key4move.iov_len = node_ks(srcnode);
key4move.iov_base = node_key(srcnode); key4move.iov_base = node_key(srcnode);
DEBUG("moving %s-node %u [%s] on page %" PRIaPGNO DEBUG("moving %s-node %u [%s] on page %" PRIaPGNO " to node %u on page %" PRIaPGNO, "leaf", csrc->ki[csrc->top],
" to node %u on page %" PRIaPGNO, DKEY_DEBUG(&key4move), psrc->pgno, cdst->ki[cdst->top], pdst->pgno);
"leaf", csrc->ki[csrc->top], DKEY_DEBUG(&key4move), psrc->pgno,
cdst->ki[cdst->top], pdst->pgno);
/* Add the node to the destination page. */ /* Add the node to the destination page. */
rc = node_add_leaf(cdst, cdst->ki[cdst->top], &key4move, &data, rc = node_add_leaf(cdst, cdst->ki[cdst->top], &key4move, &data, node_flags(srcnode));
node_flags(srcnode));
} break; } break;
case P_LEAF | P_DUPFIX: { case P_LEAF | P_DUPFIX: {
@ -290,12 +276,9 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
return rc; return rc;
psrc = csrc->pg[csrc->top]; psrc = csrc->pg[csrc->top];
pdst = cdst->pg[cdst->top]; pdst = cdst->pg[cdst->top];
key4move = key4move = page_dupfix_key(psrc, csrc->ki[csrc->top], csrc->tree->dupfix_size);
page_dupfix_key(psrc, csrc->ki[csrc->top], csrc->tree->dupfix_size); DEBUG("moving %s-node %u [%s] on page %" PRIaPGNO " to node %u on page %" PRIaPGNO, "leaf2", csrc->ki[csrc->top],
DEBUG("moving %s-node %u [%s] on page %" PRIaPGNO DKEY_DEBUG(&key4move), psrc->pgno, cdst->ki[cdst->top], pdst->pgno);
" to node %u on page %" PRIaPGNO,
"leaf2", csrc->ki[csrc->top], DKEY_DEBUG(&key4move), psrc->pgno,
cdst->ki[cdst->top], pdst->pgno);
/* Add the node to the destination page. */ /* Add the node to the destination page. */
rc = node_add_dupfix(cdst, cdst->ki[cdst->top], &key4move); rc = node_add_dupfix(cdst, cdst->ki[cdst->top], &key4move);
} break; } break;
@ -329,13 +312,11 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
if (!is_related(csrc, m3)) if (!is_related(csrc, m3))
continue; continue;
if (m3 != cdst && m3->pg[csrc->top] == pdst && if (m3 != cdst && m3->pg[csrc->top] == pdst && m3->ki[csrc->top] >= cdst->ki[csrc->top]) {
m3->ki[csrc->top] >= cdst->ki[csrc->top]) {
m3->ki[csrc->top] += 1; m3->ki[csrc->top] += 1;
} }
if (/* m3 != csrc && */ m3->pg[csrc->top] == psrc && if (/* m3 != csrc && */ m3->pg[csrc->top] == psrc && m3->ki[csrc->top] == csrc->ki[csrc->top]) {
m3->ki[csrc->top] == csrc->ki[csrc->top]) {
m3->pg[csrc->top] = pdst; m3->pg[csrc->top] = pdst;
m3->ki[csrc->top] = cdst->ki[cdst->top]; m3->ki[csrc->top] = cdst->ki[cdst->top];
cASSERT(csrc, csrc->top > 0); cASSERT(csrc, csrc->top > 0);
@ -387,8 +368,7 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
key.iov_len = node_ks(srcnode); key.iov_len = node_ks(srcnode);
key.iov_base = node_key(srcnode); key.iov_base = node_key(srcnode);
} }
DEBUG("update separator for source page %" PRIaPGNO " to [%s]", DEBUG("update separator for source page %" PRIaPGNO " to [%s]", psrc->pgno, DKEY_DEBUG(&key));
psrc->pgno, DKEY_DEBUG(&key));
cursor_couple_t couple; cursor_couple_t couple;
MDBX_cursor *const mn = cursor_clone(csrc, &couple); MDBX_cursor *const mn = cursor_clone(csrc, &couple);
@ -423,8 +403,7 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) {
key.iov_len = node_ks(srcnode); key.iov_len = node_ks(srcnode);
key.iov_base = node_key(srcnode); key.iov_base = node_key(srcnode);
} }
DEBUG("update separator for destination page %" PRIaPGNO " to [%s]", DEBUG("update separator for destination page %" PRIaPGNO " to [%s]", pdst->pgno, DKEY_DEBUG(&key));
pdst->pgno, DKEY_DEBUG(&key));
cursor_couple_t couple; cursor_couple_t couple;
MDBX_cursor *const mn = cursor_clone(cdst, &couple); MDBX_cursor *const mn = cursor_clone(cdst, &couple);
cASSERT(cdst, mn->top > 0); cASSERT(cdst, mn->top > 0);
@ -465,12 +444,10 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
cASSERT(csrc, csrc->clc == cdst->clc && csrc->tree == cdst->tree); cASSERT(csrc, csrc->clc == cdst->clc && csrc->tree == cdst->tree);
cASSERT(csrc, csrc->top > 0); /* can't merge root page */ cASSERT(csrc, csrc->top > 0); /* can't merge root page */
cASSERT(cdst, cdst->top > 0); cASSERT(cdst, cdst->top > 0);
cASSERT(cdst, cdst->top + 1 < cdst->tree->height || cASSERT(cdst, cdst->top + 1 < cdst->tree->height || is_leaf(cdst->pg[cdst->tree->height - 1]));
is_leaf(cdst->pg[cdst->tree->height - 1])); cASSERT(csrc, csrc->top + 1 < csrc->tree->height || is_leaf(csrc->pg[csrc->tree->height - 1]));
cASSERT(csrc, csrc->top + 1 < csrc->tree->height || cASSERT(cdst,
is_leaf(csrc->pg[csrc->tree->height - 1])); csrc->txn->env->options.prefer_waf_insteadof_balance || page_room(pdst) >= page_used(cdst->txn->env, psrc));
cASSERT(cdst, csrc->txn->env->options.prefer_waf_insteadof_balance ||
page_room(pdst) >= page_used(cdst->txn->env, psrc));
const int pagetype = page_type(psrc); const int pagetype = page_type(psrc);
/* Move all nodes from src to dst */ /* Move all nodes from src to dst */
@ -560,10 +537,8 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
} }
pdst = cdst->pg[cdst->top]; pdst = cdst->pg[cdst->top];
DEBUG("dst page %" PRIaPGNO " now has %zu keys (%u.%u%% filled)", DEBUG("dst page %" PRIaPGNO " now has %zu keys (%u.%u%% filled)", pdst->pgno, page_numkeys(pdst),
pdst->pgno, page_numkeys(pdst), page_fill_percentum_x10(cdst->txn->env, pdst) / 10, page_fill_percentum_x10(cdst->txn->env, pdst) % 10);
page_fill_percentum_x10(cdst->txn->env, pdst) / 10,
page_fill_percentum_x10(cdst->txn->env, pdst) % 10);
cASSERT(csrc, psrc == csrc->pg[csrc->top]); cASSERT(csrc, psrc == csrc->pg[csrc->top]);
cASSERT(cdst, pdst == cdst->pg[cdst->top]); cASSERT(cdst, pdst == cdst->pg[cdst->top]);
@ -598,11 +573,8 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
m3->pg[csrc->top] = pdst; m3->pg[csrc->top] = pdst;
m3->ki[csrc->top] += (indx_t)dst_nkeys; m3->ki[csrc->top] += (indx_t)dst_nkeys;
m3->ki[csrc->top - 1] = cdst->ki[csrc->top - 1]; m3->ki[csrc->top - 1] = cdst->ki[csrc->top - 1];
} else if (m3->pg[csrc->top - 1] == csrc->pg[csrc->top - 1] && } else if (m3->pg[csrc->top - 1] == csrc->pg[csrc->top - 1] && m3->ki[csrc->top - 1] > csrc->ki[csrc->top - 1]) {
m3->ki[csrc->top - 1] > csrc->ki[csrc->top - 1]) { cASSERT(m3, m3->ki[csrc->top - 1] > 0 && m3->ki[csrc->top - 1] <= page_numkeys(m3->pg[csrc->top - 1]));
cASSERT(m3, m3->ki[csrc->top - 1] > 0 &&
m3->ki[csrc->top - 1] <=
page_numkeys(m3->pg[csrc->top - 1]));
m3->ki[csrc->top - 1] -= 1; m3->ki[csrc->top - 1] -= 1;
} }
@ -641,8 +613,7 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
if (is_leaf(cdst->pg[cdst->top])) { if (is_leaf(cdst->pg[cdst->top])) {
/* LY: don't touch cursor if top-page is a LEAF */ /* LY: don't touch cursor if top-page is a LEAF */
cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) || cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) || page_type(cdst->pg[cdst->top]) == pagetype);
page_type(cdst->pg[cdst->top]) == pagetype);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -656,8 +627,7 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
if (top_page == cdst->pg[cdst->top]) { if (top_page == cdst->pg[cdst->top]) {
/* LY: don't touch cursor if prev top-page already on the top */ /* LY: don't touch cursor if prev top-page already on the top */
cASSERT(cdst, cdst->ki[cdst->top] == top_indx); cASSERT(cdst, cdst->ki[cdst->top] == top_indx);
cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) || cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) || page_type(cdst->pg[cdst->top]) == pagetype);
page_type(cdst->pg[cdst->top]) == pagetype);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -671,18 +641,15 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
cASSERT(cdst, cdst->ki[new_top] == top_indx); cASSERT(cdst, cdst->ki[new_top] == top_indx);
/* LY: restore cursor stack */ /* LY: restore cursor stack */
cdst->top = (int8_t)new_top; cdst->top = (int8_t)new_top;
cASSERT(cdst, cdst->top + 1 < cdst->tree->height || cASSERT(cdst, cdst->top + 1 < cdst->tree->height || is_leaf(cdst->pg[cdst->tree->height - 1]));
is_leaf(cdst->pg[cdst->tree->height - 1])); cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) || page_type(cdst->pg[cdst->top]) == pagetype);
cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) ||
page_type(cdst->pg[cdst->top]) == pagetype);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
page_t *const stub_page = (page_t *)(~(uintptr_t)top_page); page_t *const stub_page = (page_t *)(~(uintptr_t)top_page);
const indx_t stub_indx = top_indx; const indx_t stub_indx = top_indx;
if (save_height > cdst->tree->height && if (save_height > cdst->tree->height && ((cdst->pg[save_top] == top_page && cdst->ki[save_top] == top_indx) ||
((cdst->pg[save_top] == top_page && cdst->ki[save_top] == top_indx) || (cdst->pg[save_top] == stub_page && cdst->ki[save_top] == stub_indx))) {
(cdst->pg[save_top] == stub_page && cdst->ki[save_top] == stub_indx))) {
/* LY: restore cursor stack */ /* LY: restore cursor stack */
cdst->pg[new_top] = top_page; cdst->pg[new_top] = top_page;
cdst->ki[new_top] = top_indx; cdst->ki[new_top] = top_indx;
@ -691,10 +658,8 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) {
cdst->ki[new_top + 1] = INT16_MAX; cdst->ki[new_top + 1] = INT16_MAX;
#endif #endif
cdst->top = (int8_t)new_top; cdst->top = (int8_t)new_top;
cASSERT(cdst, cdst->top + 1 < cdst->tree->height || cASSERT(cdst, cdst->top + 1 < cdst->tree->height || is_leaf(cdst->pg[cdst->tree->height - 1]));
is_leaf(cdst->pg[cdst->tree->height - 1])); cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) || page_type(cdst->pg[cdst->top]) == pagetype);
cASSERT(cdst, is_leaf(cdst->pg[cdst->top]) ||
page_type(cdst->pg[cdst->top]) == pagetype);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -707,8 +672,7 @@ bailout:
int tree_rebalance(MDBX_cursor *mc) { int tree_rebalance(MDBX_cursor *mc) {
cASSERT(mc, cursor_is_tracked(mc)); cASSERT(mc, cursor_is_tracked(mc));
cASSERT(mc, mc->top >= 0); cASSERT(mc, mc->top >= 0);
cASSERT(mc, mc->top + 1 < mc->tree->height || cASSERT(mc, mc->top + 1 < mc->tree->height || is_leaf(mc->pg[mc->tree->height - 1]));
is_leaf(mc->pg[mc->tree->height - 1]));
const page_t *const tp = mc->pg[mc->top]; const page_t *const tp = mc->pg[mc->top];
const uint8_t pagetype = page_type(tp); const uint8_t pagetype = page_type(tp);
@ -716,29 +680,22 @@ int tree_rebalance(MDBX_cursor *mc) {
const size_t minkeys = (pagetype & P_BRANCH) + (size_t)1; const size_t minkeys = (pagetype & P_BRANCH) + (size_t)1;
/* Pages emptier than this are candidates for merging. */ /* Pages emptier than this are candidates for merging. */
size_t room_threshold = likely(mc->tree != &mc->txn->dbs[FREE_DBI]) size_t room_threshold =
? mc->txn->env->merge_threshold likely(mc->tree != &mc->txn->dbs[FREE_DBI]) ? mc->txn->env->merge_threshold : mc->txn->env->merge_threshold_gc;
: mc->txn->env->merge_threshold_gc;
const size_t numkeys = page_numkeys(tp); const size_t numkeys = page_numkeys(tp);
const size_t room = page_room(tp); const size_t room = page_room(tp);
DEBUG("rebalancing %s page %" PRIaPGNO DEBUG("rebalancing %s page %" PRIaPGNO " (has %zu keys, fill %u.%u%%, used %zu, room %zu bytes)",
" (has %zu keys, fill %u.%u%%, used %zu, room %zu bytes)", is_leaf(tp) ? "leaf" : "branch", tp->pgno, numkeys, page_fill_percentum_x10(mc->txn->env, tp) / 10,
is_leaf(tp) ? "leaf" : "branch", tp->pgno, numkeys, page_fill_percentum_x10(mc->txn->env, tp) % 10, page_used(mc->txn->env, tp), room);
page_fill_percentum_x10(mc->txn->env, tp) / 10,
page_fill_percentum_x10(mc->txn->env, tp) % 10,
page_used(mc->txn->env, tp), room);
cASSERT(mc, is_modifable(mc->txn, tp)); cASSERT(mc, is_modifable(mc->txn, tp));
if (unlikely(numkeys < minkeys)) { if (unlikely(numkeys < minkeys)) {
DEBUG("page %" PRIaPGNO " must be merged due keys < %zu threshold", DEBUG("page %" PRIaPGNO " must be merged due keys < %zu threshold", tp->pgno, minkeys);
tp->pgno, minkeys);
} else if (unlikely(room > room_threshold)) { } else if (unlikely(room > room_threshold)) {
DEBUG("page %" PRIaPGNO " should be merged due room %zu > %zu threshold", DEBUG("page %" PRIaPGNO " should be merged due room %zu > %zu threshold", tp->pgno, room, room_threshold);
tp->pgno, room, room_threshold);
} else { } else {
DEBUG("no need to rebalance page %" PRIaPGNO ", room %zu < %zu threshold", DEBUG("no need to rebalance page %" PRIaPGNO ", room %zu < %zu threshold", tp->pgno, room, room_threshold);
tp->pgno, room, room_threshold);
cASSERT(mc, mc->tree->items > 0); cASSERT(mc, mc->tree->items > 0);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -752,11 +709,9 @@ int tree_rebalance(MDBX_cursor *mc) {
DEBUG("%s", "tree is completely empty"); DEBUG("%s", "tree is completely empty");
cASSERT(mc, is_leaf(mp)); cASSERT(mc, is_leaf(mp));
cASSERT(mc, (*cursor_dbi_state(mc) & DBI_DIRTY) != 0); cASSERT(mc, (*cursor_dbi_state(mc) & DBI_DIRTY) != 0);
cASSERT(mc, mc->tree->branch_pages == 0 && mc->tree->large_pages == 0 && cASSERT(mc, mc->tree->branch_pages == 0 && mc->tree->large_pages == 0 && mc->tree->leaf_pages == 1);
mc->tree->leaf_pages == 1);
/* Adjust cursors pointing to mp */ /* Adjust cursors pointing to mp */
for (MDBX_cursor *m2 = mc->txn->cursors[cursor_dbi(mc)]; m2; for (MDBX_cursor *m2 = mc->txn->cursors[cursor_dbi(mc)]; m2; m2 = m2->next) {
m2 = m2->next) {
MDBX_cursor *m3 = (mc->flags & z_inner) ? &m2->subcur->cursor : m2; MDBX_cursor *m3 = (mc->flags & z_inner) ? &m2->subcur->cursor : m2;
if (!is_poor(m3) && m3->pg[0] == mp) { if (!is_poor(m3) && m3->pg[0] == mp) {
be_poor(m3); be_poor(m3);
@ -790,8 +745,7 @@ int tree_rebalance(MDBX_cursor *mc) {
} }
/* Adjust other cursors pointing to mp */ /* Adjust other cursors pointing to mp */
for (MDBX_cursor *m2 = mc->txn->cursors[cursor_dbi(mc)]; m2; for (MDBX_cursor *m2 = mc->txn->cursors[cursor_dbi(mc)]; m2; m2 = m2->next) {
m2 = m2->next) {
MDBX_cursor *m3 = (mc->flags & z_inner) ? &m2->subcur->cursor : m2; MDBX_cursor *m3 = (mc->flags & z_inner) ? &m2->subcur->cursor : m2;
if (is_related(mc, m3) && m3->pg[0] == mp) { if (is_related(mc, m3) && m3->pg[0] == mp) {
for (intptr_t i = 0; i < mc->tree->height; i++) { for (intptr_t i = 0; i < mc->tree->height; i++) {
@ -801,14 +755,11 @@ int tree_rebalance(MDBX_cursor *mc) {
m3->top -= 1; m3->top -= 1;
} }
} }
cASSERT(mc, is_leaf(mc->pg[mc->top]) || cASSERT(mc, is_leaf(mc->pg[mc->top]) || page_type(mc->pg[mc->top]) == pagetype);
page_type(mc->pg[mc->top]) == pagetype); cASSERT(mc, mc->top + 1 < mc->tree->height || is_leaf(mc->pg[mc->tree->height - 1]));
cASSERT(mc, mc->top + 1 < mc->tree->height ||
is_leaf(mc->pg[mc->tree->height - 1]));
return page_retire(mc, mp); return page_retire(mc, mp);
} }
DEBUG("root page %" PRIaPGNO " doesn't need rebalancing (flags 0x%x)", DEBUG("root page %" PRIaPGNO " doesn't need rebalancing (flags 0x%x)", mp->pgno, mp->flags);
mp->pgno, mp->flags);
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
@ -829,17 +780,14 @@ int tree_rebalance(MDBX_cursor *mc) {
page_t *left = nullptr, *right = nullptr; page_t *left = nullptr, *right = nullptr;
if (mn->ki[pre_top] > 0) { if (mn->ki[pre_top] > 0) {
rc = rc = page_get(mn, node_pgno(page_node(mn->pg[pre_top], mn->ki[pre_top] - 1)), &left, mc->pg[mc->top]->txnid);
page_get(mn, node_pgno(page_node(mn->pg[pre_top], mn->ki[pre_top] - 1)),
&left, mc->pg[mc->top]->txnid);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
cASSERT(mc, page_type(left) == page_type(mc->pg[mc->top])); cASSERT(mc, page_type(left) == page_type(mc->pg[mc->top]));
} }
if (mn->ki[pre_top] + (size_t)1 < page_numkeys(mn->pg[pre_top])) { if (mn->ki[pre_top] + (size_t)1 < page_numkeys(mn->pg[pre_top])) {
rc = page_get( rc = page_get(mn, node_pgno(page_node(mn->pg[pre_top], mn->ki[pre_top] + (size_t)1)), &right,
mn, node_pgno(page_node(mn->pg[pre_top], mn->ki[pre_top] + (size_t)1)), mc->pg[mc->top]->txnid);
&right, mc->pg[mc->top]->txnid);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
cASSERT(mc, page_type(right) == page_type(mc->pg[mc->top])); cASSERT(mc, page_type(right) == page_type(mc->pg[mc->top]));
@ -857,8 +805,7 @@ int tree_rebalance(MDBX_cursor *mc) {
bool involve = !(left && right); bool involve = !(left && right);
retry: retry:
cASSERT(mc, mc->top > 0); cASSERT(mc, mc->top > 0);
if (left_room > room_threshold && left_room >= right_room && if (left_room > room_threshold && left_room >= right_room && (is_modifable(mc->txn, left) || involve)) {
(is_modifable(mc->txn, left) || involve)) {
/* try merge with left */ /* try merge with left */
cASSERT(mc, left_nkeys >= minkeys); cASSERT(mc, left_nkeys >= minkeys);
mn->pg[mn->top] = left; mn->pg[mn->top] = left;
@ -878,8 +825,7 @@ retry:
return rc; return rc;
} }
} }
if (right_room > room_threshold && if (right_room > room_threshold && (is_modifable(mc->txn, right) || involve)) {
(is_modifable(mc->txn, right) || involve)) {
/* try merge with right */ /* try merge with right */
cASSERT(mc, right_nkeys >= minkeys); cASSERT(mc, right_nkeys >= minkeys);
mn->pg[mn->top] = right; mn->pg[mn->top] = right;
@ -897,8 +843,7 @@ retry:
} }
} }
if (left_nkeys > minkeys && if (left_nkeys > minkeys && (right_nkeys <= left_nkeys || right_room >= left_room) &&
(right_nkeys <= left_nkeys || right_room >= left_room) &&
(is_modifable(mc->txn, left) || involve)) { (is_modifable(mc->txn, left) || involve)) {
/* try move from left */ /* try move from left */
mn->pg[mn->top] = left; mn->pg[mn->top] = left;
@ -939,16 +884,13 @@ retry:
return MDBX_SUCCESS; return MDBX_SUCCESS;
} }
if (mc->txn->env->options.prefer_waf_insteadof_balance && if (mc->txn->env->options.prefer_waf_insteadof_balance && likely(room_threshold > 0)) {
likely(room_threshold > 0)) {
room_threshold = 0; room_threshold = 0;
goto retry; goto retry;
} }
if (likely(!involve) && if (likely(!involve) &&
(likely(mc->tree != &mc->txn->dbs[FREE_DBI]) || mc->txn->tw.loose_pages || (likely(mc->tree != &mc->txn->dbs[FREE_DBI]) || mc->txn->tw.loose_pages || MDBX_PNL_GETSIZE(mc->txn->tw.relist) ||
MDBX_PNL_GETSIZE(mc->txn->tw.relist) || (mc->flags & z_gcu_preparation) || (mc->txn->flags & txn_gc_drained) || room_threshold)) {
(mc->flags & z_gcu_preparation) || (mc->txn->flags & txn_gc_drained) ||
room_threshold)) {
involve = true; involve = true;
goto retry; goto retry;
} }
@ -957,17 +899,14 @@ retry:
goto retry; goto retry;
} }
ERROR("Unable to merge/rebalance %s page %" PRIaPGNO ERROR("Unable to merge/rebalance %s page %" PRIaPGNO " (has %zu keys, fill %u.%u%%, used %zu, room %zu bytes)",
" (has %zu keys, fill %u.%u%%, used %zu, room %zu bytes)", is_leaf(tp) ? "leaf" : "branch", tp->pgno, numkeys, page_fill_percentum_x10(mc->txn->env, tp) / 10,
is_leaf(tp) ? "leaf" : "branch", tp->pgno, numkeys, page_fill_percentum_x10(mc->txn->env, tp) % 10, page_used(mc->txn->env, tp), room);
page_fill_percentum_x10(mc->txn->env, tp) / 10,
page_fill_percentum_x10(mc->txn->env, tp) % 10,
page_used(mc->txn->env, tp), room);
return MDBX_PROBLEM; return MDBX_PROBLEM;
} }
int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, MDBX_val *const newdata, pgno_t newpgno,
MDBX_val *const newdata, pgno_t newpgno, const unsigned naf) { const unsigned naf) {
unsigned flags; unsigned flags;
int rc = MDBX_SUCCESS, foliage = 0; int rc = MDBX_SUCCESS, foliage = 0;
MDBX_env *const env = mc->txn->env; MDBX_env *const env = mc->txn->env;
@ -988,11 +927,8 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
STATIC_ASSERT(P_BRANCH == 1); STATIC_ASSERT(P_BRANCH == 1);
const size_t minkeys = (mp->flags & P_BRANCH) + (size_t)1; const size_t minkeys = (mp->flags & P_BRANCH) + (size_t)1;
DEBUG(">> splitting %s-page %" PRIaPGNO DEBUG(">> splitting %s-page %" PRIaPGNO " and adding %zu+%zu [%s] at %i, nkeys %zi", is_leaf(mp) ? "leaf" : "branch",
" and adding %zu+%zu [%s] at %i, nkeys %zi", mp->pgno, newkey->iov_len, newdata ? newdata->iov_len : 0, DKEY_DEBUG(newkey), mc->ki[mc->top], nkeys);
is_leaf(mp) ? "leaf" : "branch", mp->pgno, newkey->iov_len,
newdata ? newdata->iov_len : 0, DKEY_DEBUG(newkey), mc->ki[mc->top],
nkeys);
cASSERT(mc, nkeys + 1 >= minkeys * 2); cASSERT(mc, nkeys + 1 >= minkeys * 2);
/* Create a new sibling page. */ /* Create a new sibling page. */
@ -1057,10 +993,8 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
mn->ki[mn->top] = 0; mn->ki[mn->top] = 0;
mn->ki[prev_top] = mc->ki[prev_top] + 1; mn->ki[prev_top] = mc->ki[prev_top] + 1;
size_t split_indx = size_t split_indx = (newindx < nkeys) ? /* split at the middle */ (nkeys + 1) >> 1
(newindx < nkeys) : /* split at the end (i.e. like append-mode ) */ nkeys - minkeys + 1;
? /* split at the middle */ (nkeys + 1) >> 1
: /* split at the end (i.e. like append-mode ) */ nkeys - minkeys + 1;
eASSERT(env, split_indx >= minkeys && split_indx <= nkeys - minkeys + 1); eASSERT(env, split_indx >= minkeys && split_indx <= nkeys - minkeys + 1);
cASSERT(mc, !is_branch(mp) || newindx > 0); cASSERT(mc, !is_branch(mp) || newindx > 0);
@ -1094,11 +1028,9 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
if (foliage) { if (foliage) {
TRACE("pure-left: foliage %u, top %i, ptop %zu, split_indx %zi, " TRACE("pure-left: foliage %u, top %i, ptop %zu, split_indx %zi, "
"minkeys %zi, sepkey %s, parent-room %zu, need4split %zu", "minkeys %zi, sepkey %s, parent-room %zu, need4split %zu",
foliage, mc->top, prev_top, split_indx, minkeys, foliage, mc->top, prev_top, split_indx, minkeys, DKEY_DEBUG(&sepkey), page_room(mc->pg[prev_top]),
DKEY_DEBUG(&sepkey), page_room(mc->pg[prev_top]),
branch_size(env, &sepkey)); branch_size(env, &sepkey));
TRACE("pure-left: newkey %s, newdata %s, newindx %zu", TRACE("pure-left: newkey %s, newdata %s, newindx %zu", DKEY_DEBUG(newkey), DVAL_DEBUG(newdata), newindx);
DKEY_DEBUG(newkey), DVAL_DEBUG(newdata), newindx);
} }
} }
} }
@ -1112,8 +1044,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
sepkey = *newkey; sepkey = *newkey;
} else if (unlikely(pure_left)) { } else if (unlikely(pure_left)) {
/* newindx == split_indx == 0 */ /* newindx == split_indx == 0 */
TRACE("pure-left: no-split, but add new pure page at the %s", TRACE("pure-left: no-split, but add new pure page at the %s", "left/before");
"left/before");
cASSERT(mc, newindx == 0 && split_indx == 0 && minkeys == 1); cASSERT(mc, newindx == 0 && split_indx == 0 && minkeys == 1);
TRACE("pure-left: old-first-key is %s", DKEY_DEBUG(&sepkey)); TRACE("pure-left: old-first-key is %s", DKEY_DEBUG(&sepkey));
} else { } else {
@ -1139,8 +1070,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
void *const ins = page_dupfix_ptr(mp, mc->ki[mc->top], ksize); void *const ins = page_dupfix_ptr(mp, mc->ki[mc->top], ksize);
memcpy(sister->entries, split, rsize); memcpy(sister->entries, split, rsize);
sepkey.iov_base = sister->entries; sepkey.iov_base = sister->entries;
memmove(ptr_disp(ins, ksize), ins, memmove(ptr_disp(ins, ksize), ins, (split_indx - mc->ki[mc->top]) * ksize);
(split_indx - mc->ki[mc->top]) * ksize);
memcpy(ins, newkey->iov_base, ksize); memcpy(ins, newkey->iov_base, ksize);
cASSERT(mc, UINT16_MAX - mp->lower >= (int)sizeof(indx_t)); cASSERT(mc, UINT16_MAX - mp->lower >= (int)sizeof(indx_t));
mp->lower += sizeof(indx_t); mp->lower += sizeof(indx_t);
@ -1151,16 +1081,14 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
memcpy(sister->entries, split, distance * ksize); memcpy(sister->entries, split, distance * ksize);
void *const ins = page_dupfix_ptr(sister, distance, ksize); void *const ins = page_dupfix_ptr(sister, distance, ksize);
memcpy(ins, newkey->iov_base, ksize); memcpy(ins, newkey->iov_base, ksize);
memcpy(ptr_disp(ins, ksize), ptr_disp(split, distance * ksize), memcpy(ptr_disp(ins, ksize), ptr_disp(split, distance * ksize), rsize - distance * ksize);
rsize - distance * ksize);
cASSERT(mc, UINT16_MAX - sister->lower >= (int)sizeof(indx_t)); cASSERT(mc, UINT16_MAX - sister->lower >= (int)sizeof(indx_t));
sister->lower += sizeof(indx_t); sister->lower += sizeof(indx_t);
cASSERT(mc, sister->upper >= ksize - sizeof(indx_t)); cASSERT(mc, sister->upper >= ksize - sizeof(indx_t));
sister->upper -= (indx_t)(ksize - sizeof(indx_t)); sister->upper -= (indx_t)(ksize - sizeof(indx_t));
cASSERT(mc, distance <= (int)UINT16_MAX); cASSERT(mc, distance <= (int)UINT16_MAX);
mc->ki[mc->top] = (indx_t)distance; mc->ki[mc->top] = (indx_t)distance;
cASSERT(mc, cASSERT(mc, (((ksize & page_numkeys(sister)) ^ sister->upper) & 1) == 0);
(((ksize & page_numkeys(sister)) ^ sister->upper) & 1) == 0);
} }
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
@ -1180,8 +1108,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
} }
const size_t max_space = page_space(env); const size_t max_space = page_space(env);
const size_t new_size = is_leaf(mp) ? leaf_size(env, newkey, newdata) const size_t new_size = is_leaf(mp) ? leaf_size(env, newkey, newdata) : branch_size(env, newkey);
: branch_size(env, newkey);
/* prepare to insert */ /* prepare to insert */
size_t i = 0; size_t i = 0;
@ -1218,8 +1145,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
split_indx += mp->flags & P_BRANCH; split_indx += mp->flags & P_BRANCH;
} }
eASSERT(env, split_indx >= minkeys && split_indx <= nkeys + 1 - minkeys); eASSERT(env, split_indx >= minkeys && split_indx <= nkeys + 1 - minkeys);
const size_t dim_nodes = const size_t dim_nodes = (newindx >= split_indx) ? split_indx : nkeys - split_indx;
(newindx >= split_indx) ? split_indx : nkeys - split_indx;
const size_t dim_used = (sizeof(indx_t) + NODESIZE + 1) * dim_nodes; const size_t dim_used = (sizeof(indx_t) + NODESIZE + 1) * dim_nodes;
if (new_size >= dim_used) { if (new_size >= dim_used) {
/* Search for best acceptable split point */ /* Search for best acceptable split point */
@ -1239,15 +1165,13 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
node_t *node = ptr_disp(mp, tmp_ki_copy->entries[i] + PAGEHDRSZ); node_t *node = ptr_disp(mp, tmp_ki_copy->entries[i] + PAGEHDRSZ);
size = NODESIZE + node_ks(node) + sizeof(indx_t); size = NODESIZE + node_ks(node) + sizeof(indx_t);
if (is_leaf(mp)) if (is_leaf(mp))
size += size += (node_flags(node) & N_BIG) ? sizeof(pgno_t) : node_ds(node);
(node_flags(node) & N_BIG) ? sizeof(pgno_t) : node_ds(node);
size = EVEN_CEIL(size); size = EVEN_CEIL(size);
} }
before += size; before += size;
after -= size; after -= size;
TRACE("step %zu, size %zu, before %zu, after %zu, max %zu", i, size, TRACE("step %zu, size %zu, before %zu, after %zu, max %zu", i, size, before, after, max_space);
before, after, max_space);
if (before <= max_space && after <= max_space) { if (before <= max_space && after <= max_space) {
const size_t split = i + (dir > 0); const size_t split = i + (dir > 0);
@ -1271,8 +1195,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
sepkey = *newkey; sepkey = *newkey;
if (split_indx != newindx) { if (split_indx != newindx) {
node_t *node = node_t *node = ptr_disp(mp, tmp_ki_copy->entries[split_indx] + PAGEHDRSZ);
ptr_disp(mp, tmp_ki_copy->entries[split_indx] + PAGEHDRSZ);
sepkey.iov_len = node_ks(node); sepkey.iov_len = node_ks(node);
sepkey.iov_base = node_key(node); sepkey.iov_base = node_key(node);
} }
@ -1308,8 +1231,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
/* Right page might now have changed parent. /* Right page might now have changed parent.
* Check if left page also changed parent. */ * Check if left page also changed parent. */
if (mn->pg[prev_top] != mc->pg[prev_top] && if (mn->pg[prev_top] != mc->pg[prev_top] && mc->ki[prev_top] >= page_numkeys(mc->pg[prev_top])) {
mc->ki[prev_top] >= page_numkeys(mc->pg[prev_top])) {
for (intptr_t i = 0; i < prev_top; i++) { for (intptr_t i = 0; i < prev_top; i++) {
mc->pg[i] = mn->pg[i]; mc->pg[i] = mn->pg[i];
mc->ki[i] = mn->ki[i]; mc->ki[i] = mn->ki[i];
@ -1334,14 +1256,11 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
page_t *ptop_page = mc->pg[prev_top]; page_t *ptop_page = mc->pg[prev_top];
TRACE("pure-left: adding to parent page %u node[%u] left-leaf page #%u key " TRACE("pure-left: adding to parent page %u node[%u] left-leaf page #%u key "
"%s", "%s",
ptop_page->pgno, mc->ki[prev_top], sister->pgno, ptop_page->pgno, mc->ki[prev_top], sister->pgno, DKEY(mc->ki[prev_top] ? newkey : nullptr));
DKEY(mc->ki[prev_top] ? newkey : nullptr));
assert(mc->top == prev_top + 1); assert(mc->top == prev_top + 1);
mc->top = (uint8_t)prev_top; mc->top = (uint8_t)prev_top;
rc = node_add_branch(mc, mc->ki[prev_top], rc = node_add_branch(mc, mc->ki[prev_top], mc->ki[prev_top] ? newkey : nullptr, sister->pgno);
mc->ki[prev_top] ? newkey : nullptr, sister->pgno); cASSERT(mc, mp == mc->pg[prev_top + 1] && newindx == mc->ki[prev_top + 1] && prev_top == mc->top);
cASSERT(mc, mp == mc->pg[prev_top + 1] && newindx == mc->ki[prev_top + 1] &&
prev_top == mc->top);
if (likely(rc == MDBX_SUCCESS) && mc->ki[prev_top] == 0) { if (likely(rc == MDBX_SUCCESS) && mc->ki[prev_top] == 0) {
node_t *node = page_node(mc->pg[prev_top], 1); node_t *node = page_node(mc->pg[prev_top], 1);
@ -1351,12 +1270,10 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
mc->ki[prev_top] = 1; mc->ki[prev_top] = 1;
rc = tree_propagate_key(mc, &sepkey); rc = tree_propagate_key(mc, &sepkey);
cASSERT(mc, mc->top == prev_top && mc->ki[prev_top] == 1); cASSERT(mc, mc->top == prev_top && mc->ki[prev_top] == 1);
cASSERT(mc, cASSERT(mc, mp == mc->pg[prev_top + 1] && newindx == mc->ki[prev_top + 1]);
mp == mc->pg[prev_top + 1] && newindx == mc->ki[prev_top + 1]);
mc->ki[prev_top] = 0; mc->ki[prev_top] = 0;
} else { } else {
TRACE("pure-left: no-need-update prev-first key on parent %s", TRACE("pure-left: no-need-update prev-first key on parent %s", DKEY(&sepkey));
DKEY(&sepkey));
} }
mc->top++; mc->top++;
@ -1367,8 +1284,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
cASSERT(mc, node_pgno(node) == mp->pgno && mc->pg[prev_top] == ptop_page); cASSERT(mc, node_pgno(node) == mp->pgno && mc->pg[prev_top] == ptop_page);
} else { } else {
mn->top -= 1; mn->top -= 1;
TRACE("add-to-parent the right-entry[%u] for new sibling-page", TRACE("add-to-parent the right-entry[%u] for new sibling-page", mn->ki[prev_top]);
mn->ki[prev_top]);
rc = node_add_branch(mn, mn->ki[prev_top], &sepkey, sister->pgno); rc = node_add_branch(mn, mn->ki[prev_top], &sepkey, sister->pgno);
mn->top += 1; mn->top += 1;
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -1403,8 +1319,8 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
sepkey = get_key(page_node(mc->pg[mc->top - i], mc->ki[mc->top - i])); sepkey = get_key(page_node(mc->pg[mc->top - i], mc->ki[mc->top - i]));
if (mc->clc->k.cmp(newkey, &sepkey) < 0) { if (mc->clc->k.cmp(newkey, &sepkey) < 0) {
mc->top -= (int8_t)i; mc->top -= (int8_t)i;
DEBUG("pure-left: update new-first on parent [%i] page %u key %s", DEBUG("pure-left: update new-first on parent [%i] page %u key %s", mc->ki[mc->top], mc->pg[mc->top]->pgno,
mc->ki[mc->top], mc->pg[mc->top]->pgno, DKEY(newkey)); DKEY(newkey));
rc = tree_propagate_key(mc, newkey); rc = tree_propagate_key(mc, newkey);
mc->top += (int8_t)i; mc->top += (int8_t)i;
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -1474,16 +1390,14 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
} }
} while (ii != split_indx); } while (ii != split_indx);
TRACE("ii %zu, nkeys %zu, n %zu, pgno #%u", ii, nkeys, n, TRACE("ii %zu, nkeys %zu, n %zu, pgno #%u", ii, nkeys, n, mc->pg[mc->top]->pgno);
mc->pg[mc->top]->pgno);
nkeys = page_numkeys(tmp_ki_copy); nkeys = page_numkeys(tmp_ki_copy);
for (size_t i = 0; i < nkeys; i++) for (size_t i = 0; i < nkeys; i++)
mp->entries[i] = tmp_ki_copy->entries[i]; mp->entries[i] = tmp_ki_copy->entries[i];
mp->lower = tmp_ki_copy->lower; mp->lower = tmp_ki_copy->lower;
mp->upper = tmp_ki_copy->upper; mp->upper = tmp_ki_copy->upper;
memcpy(page_node(mp, nkeys - 1), page_node(tmp_ki_copy, nkeys - 1), memcpy(page_node(mp, nkeys - 1), page_node(tmp_ki_copy, nkeys - 1), env->ps - tmp_ki_copy->upper - PAGEHDRSZ);
env->ps - tmp_ki_copy->upper - PAGEHDRSZ);
/* reset back to original page */ /* reset back to original page */
if (newindx < split_indx) { if (newindx < split_indx) {
@ -1492,8 +1406,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
mc->pg[mc->top] = sister; mc->pg[mc->top] = sister;
mc->ki[prev_top]++; mc->ki[prev_top]++;
/* Make sure ki is still valid. */ /* Make sure ki is still valid. */
if (mn->pg[prev_top] != mc->pg[prev_top] && if (mn->pg[prev_top] != mc->pg[prev_top] && mc->ki[prev_top] >= page_numkeys(mc->pg[prev_top])) {
mc->ki[prev_top] >= page_numkeys(mc->pg[prev_top])) {
for (intptr_t i = 0; i <= prev_top; i++) { for (intptr_t i = 0; i <= prev_top; i++) {
mc->pg[i] = mn->pg[i]; mc->pg[i] = mn->pg[i];
mc->ki[i] = mn->ki[i]; mc->ki[i] = mn->ki[i];
@ -1504,8 +1417,7 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
mc->pg[mc->top] = sister; mc->pg[mc->top] = sister;
mc->ki[prev_top]++; mc->ki[prev_top]++;
/* Make sure ki is still valid. */ /* Make sure ki is still valid. */
if (mn->pg[prev_top] != mc->pg[prev_top] && if (mn->pg[prev_top] != mc->pg[prev_top] && mc->ki[prev_top] >= page_numkeys(mc->pg[prev_top])) {
mc->ki[prev_top] >= page_numkeys(mc->pg[prev_top])) {
for (intptr_t i = 0; i <= prev_top; i++) { for (intptr_t i = 0; i <= prev_top; i++) {
mc->pg[i] = mn->pg[i]; mc->pg[i] = mn->pg[i];
mc->ki[i] = mn->ki[i]; mc->ki[i] = mn->ki[i];
@ -1545,16 +1457,14 @@ int page_split(MDBX_cursor *mc, const MDBX_val *const newkey,
m3->pg[i] = mn->pg[i]; m3->pg[i] = mn->pg[i];
} }
} }
} else if (!did_split_parent && m3->top >= prev_top && } else if (!did_split_parent && m3->top >= prev_top && m3->pg[prev_top] == mc->pg[prev_top] &&
m3->pg[prev_top] == mc->pg[prev_top] &&
m3->ki[prev_top] >= mc->ki[prev_top]) { m3->ki[prev_top] >= mc->ki[prev_top]) {
m3->ki[prev_top]++; /* also for the `pure-left` case */ m3->ki[prev_top]++; /* also for the `pure-left` case */
} }
if (inner_pointed(m3) && is_leaf(mp)) if (inner_pointed(m3) && is_leaf(mp))
cursor_inner_refresh(m3, m3->pg[mc->top], m3->ki[mc->top]); cursor_inner_refresh(m3, m3->pg[mc->top], m3->ki[mc->top]);
} }
TRACE("mp #%u left: %zd, sister #%u left: %zd", mp->pgno, page_room(mp), TRACE("mp #%u left: %zd, sister #%u left: %zd", mp->pgno, page_room(mp), sister->pgno, page_room(sister));
sister->pgno, page_room(sister));
done: done:
if (tmp_ki_copy) if (tmp_ki_copy)
@ -1596,8 +1506,8 @@ int tree_propagate_key(MDBX_cursor *mc, const MDBX_val *key) {
MDBX_val k2; MDBX_val k2;
k2.iov_base = node_key(node); k2.iov_base = node_key(node);
k2.iov_len = node_ks(node); k2.iov_len = node_ks(node);
DEBUG("update key %zi (offset %zu) [%s] to [%s] on page %" PRIaPGNO, indx, DEBUG("update key %zi (offset %zu) [%s] to [%s] on page %" PRIaPGNO, indx, ptr, DVAL_DEBUG(&k2), DKEY_DEBUG(key),
ptr, DVAL_DEBUG(&k2), DKEY_DEBUG(key), mp->pgno); mp->pgno);
#endif /* MDBX_DEBUG */ #endif /* MDBX_DEBUG */
/* Sizes must be 2-byte aligned. */ /* Sizes must be 2-byte aligned. */

View File

@ -6,8 +6,7 @@
static inline size_t txl_size2bytes(const size_t size) { static inline size_t txl_size2bytes(const size_t size) {
assert(size > 0 && size <= txl_max * 2); assert(size > 0 && size <= txl_max * 2);
size_t bytes = size_t bytes =
ceil_powerof2(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(txnid_t) * (size + 2), ceil_powerof2(MDBX_ASSUME_MALLOC_OVERHEAD + sizeof(txnid_t) * (size + 2), txl_granulate * sizeof(txnid_t)) -
txl_granulate * sizeof(txnid_t)) -
MDBX_ASSUME_MALLOC_OVERHEAD; MDBX_ASSUME_MALLOC_OVERHEAD;
return bytes; return bytes;
} }
@ -38,11 +37,9 @@ MDBX_INTERNAL void txl_free(txl_t txl) {
osal_free(txl - 1); osal_free(txl - 1);
} }
MDBX_INTERNAL int txl_reserve(txl_t __restrict *__restrict ptxl, MDBX_INTERNAL int txl_reserve(txl_t __restrict *__restrict ptxl, const size_t wanna) {
const size_t wanna) {
const size_t allocated = (size_t)MDBX_PNL_ALLOCLEN(*ptxl); const size_t allocated = (size_t)MDBX_PNL_ALLOCLEN(*ptxl);
assert(MDBX_PNL_GETSIZE(*ptxl) <= txl_max && assert(MDBX_PNL_GETSIZE(*ptxl) <= txl_max && MDBX_PNL_ALLOCLEN(*ptxl) >= MDBX_PNL_GETSIZE(*ptxl));
MDBX_PNL_ALLOCLEN(*ptxl) >= MDBX_PNL_GETSIZE(*ptxl));
if (likely(allocated >= wanna)) if (likely(allocated >= wanna))
return MDBX_SUCCESS; return MDBX_SUCCESS;
@ -51,9 +48,7 @@ MDBX_INTERNAL int txl_reserve(txl_t __restrict *__restrict ptxl,
return MDBX_TXN_FULL; return MDBX_TXN_FULL;
} }
const size_t size = (wanna + wanna - allocated < txl_max) const size_t size = (wanna + wanna - allocated < txl_max) ? wanna + wanna - allocated : txl_max;
? wanna + wanna - allocated
: txl_max;
size_t bytes = txl_size2bytes(size); size_t bytes = txl_size2bytes(size);
txl_t txl = osal_realloc(*ptxl - 1, bytes); txl_t txl = osal_realloc(*ptxl - 1, bytes);
if (likely(txl)) { if (likely(txl)) {
@ -68,14 +63,11 @@ MDBX_INTERNAL int txl_reserve(txl_t __restrict *__restrict ptxl,
return MDBX_ENOMEM; return MDBX_ENOMEM;
} }
static __always_inline int __must_check_result static __always_inline int __must_check_result txl_need(txl_t __restrict *__restrict ptxl, size_t num) {
txl_need(txl_t __restrict *__restrict ptxl, size_t num) { assert(MDBX_PNL_GETSIZE(*ptxl) <= txl_max && MDBX_PNL_ALLOCLEN(*ptxl) >= MDBX_PNL_GETSIZE(*ptxl));
assert(MDBX_PNL_GETSIZE(*ptxl) <= txl_max &&
MDBX_PNL_ALLOCLEN(*ptxl) >= MDBX_PNL_GETSIZE(*ptxl));
assert(num <= PAGELIST_LIMIT); assert(num <= PAGELIST_LIMIT);
const size_t wanna = (size_t)MDBX_PNL_GETSIZE(*ptxl) + num; const size_t wanna = (size_t)MDBX_PNL_GETSIZE(*ptxl) + num;
return likely(MDBX_PNL_ALLOCLEN(*ptxl) >= wanna) ? MDBX_SUCCESS return likely(MDBX_PNL_ALLOCLEN(*ptxl) >= wanna) ? MDBX_SUCCESS : txl_reserve(ptxl, wanna);
: txl_reserve(ptxl, wanna);
} }
static __always_inline void txl_xappend(txl_t __restrict txl, txnid_t id) { static __always_inline void txl_xappend(txl_t __restrict txl, txnid_t id) {
@ -86,12 +78,9 @@ static __always_inline void txl_xappend(txl_t __restrict txl, txnid_t id) {
#define TXNID_SORT_CMP(first, last) ((first) > (last)) #define TXNID_SORT_CMP(first, last) ((first) > (last))
SORT_IMPL(txnid_sort, false, txnid_t, TXNID_SORT_CMP) SORT_IMPL(txnid_sort, false, txnid_t, TXNID_SORT_CMP)
MDBX_INTERNAL void txl_sort(txl_t txl) { MDBX_INTERNAL void txl_sort(txl_t txl) { txnid_sort(MDBX_PNL_BEGIN(txl), MDBX_PNL_END(txl)); }
txnid_sort(MDBX_PNL_BEGIN(txl), MDBX_PNL_END(txl));
}
MDBX_INTERNAL int __must_check_result txl_append(txl_t __restrict *ptxl, MDBX_INTERNAL int __must_check_result txl_append(txl_t __restrict *ptxl, txnid_t id) {
txnid_t id) {
if (unlikely(MDBX_PNL_GETSIZE(*ptxl) == MDBX_PNL_ALLOCLEN(*ptxl))) { if (unlikely(MDBX_PNL_GETSIZE(*ptxl) == MDBX_PNL_ALLOCLEN(*ptxl))) {
int rc = txl_need(ptxl, txl_granulate); int rc = txl_need(ptxl, txl_granulate);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))

View File

@ -11,8 +11,7 @@ typedef const txnid_t *const_txl_t;
enum txl_rules { enum txl_rules {
txl_granulate = 32, txl_granulate = 32,
txl_initial = txl_initial = txl_granulate - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(txnid_t),
txl_granulate - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(txnid_t),
txl_max = (1u << 26) - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(txnid_t) txl_max = (1u << 26) - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(txnid_t)
}; };
@ -20,7 +19,6 @@ MDBX_INTERNAL txl_t txl_alloc(void);
MDBX_INTERNAL void txl_free(txl_t txl); MDBX_INTERNAL void txl_free(txl_t txl);
MDBX_INTERNAL int __must_check_result txl_append(txl_t __restrict *ptxl, MDBX_INTERNAL int __must_check_result txl_append(txl_t __restrict *ptxl, txnid_t id);
txnid_t id);
MDBX_INTERNAL void txl_sort(txl_t txl); MDBX_INTERNAL void txl_sort(txl_t txl);

539
src/txn.c

File diff suppressed because it is too large Load Diff

View File

@ -6,22 +6,17 @@
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
* Unaligned access */ * Unaligned access */
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t field_alignment(size_t alignment_baseline,
field_alignment(size_t alignment_baseline, size_t field_offset) { size_t field_offset) {
size_t merge = alignment_baseline | (size_t)field_offset; size_t merge = alignment_baseline | (size_t)field_offset;
return merge & -(int)merge; return merge & -(int)merge;
} }
/* read-thunk for UB-sanitizer */ /* read-thunk for UB-sanitizer */
MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t MDBX_NOTHROW_PURE_FUNCTION static inline uint8_t peek_u8(const uint8_t *__restrict ptr) { return *ptr; }
peek_u8(const uint8_t *__restrict ptr) {
return *ptr;
}
/* write-thunk for UB-sanitizer */ /* write-thunk for UB-sanitizer */
static inline void poke_u8(uint8_t *__restrict ptr, const uint8_t v) { static inline void poke_u8(uint8_t *__restrict ptr, const uint8_t v) { *ptr = v; }
*ptr = v;
}
static inline void *bcopy_2(void *__restrict dst, const void *__restrict src) { static inline void *bcopy_2(void *__restrict dst, const void *__restrict src) {
uint8_t *__restrict d = (uint8_t *)dst; uint8_t *__restrict d = (uint8_t *)dst;
@ -31,8 +26,7 @@ static inline void *bcopy_2(void *__restrict dst, const void *__restrict src) {
return d; return d;
} }
static inline void *bcopy_4(void *const __restrict dst, static inline void *bcopy_4(void *const __restrict dst, const void *const __restrict src) {
const void *const __restrict src) {
uint8_t *__restrict d = (uint8_t *)dst; uint8_t *__restrict d = (uint8_t *)dst;
const uint8_t *__restrict s = (uint8_t *)src; const uint8_t *__restrict s = (uint8_t *)src;
d[0] = s[0]; d[0] = s[0];
@ -42,8 +36,7 @@ static inline void *bcopy_4(void *const __restrict dst,
return d; return d;
} }
static inline void *bcopy_8(void *const __restrict dst, static inline void *bcopy_8(void *const __restrict dst, const void *const __restrict src) {
const void *const __restrict src) {
uint8_t *__restrict d = (uint8_t *)dst; uint8_t *__restrict d = (uint8_t *)dst;
const uint8_t *__restrict s = (uint8_t *)src; const uint8_t *__restrict s = (uint8_t *)src;
d[0] = s[0]; d[0] = s[0];
@ -57,14 +50,13 @@ static inline void *bcopy_8(void *const __restrict dst,
return d; return d;
} }
MDBX_NOTHROW_PURE_FUNCTION static inline uint16_t MDBX_NOTHROW_PURE_FUNCTION static inline uint16_t unaligned_peek_u16(const size_t expected_alignment,
unaligned_peek_u16(const size_t expected_alignment, const void *const ptr) { const void *const ptr) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
if (MDBX_UNALIGNED_OK >= 2 || (expected_alignment % sizeof(uint16_t)) == 0) if (MDBX_UNALIGNED_OK >= 2 || (expected_alignment % sizeof(uint16_t)) == 0)
return *(const uint16_t *)ptr; return *(const uint16_t *)ptr;
else { else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
return *(const __unaligned uint16_t *)ptr; return *(const __unaligned uint16_t *)ptr;
#else #else
uint16_t v; uint16_t v;
@ -74,15 +66,12 @@ unaligned_peek_u16(const size_t expected_alignment, const void *const ptr) {
} }
} }
static inline void unaligned_poke_u16(const size_t expected_alignment, static inline void unaligned_poke_u16(const size_t expected_alignment, void *const __restrict ptr, const uint16_t v) {
void *const __restrict ptr,
const uint16_t v) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
if (MDBX_UNALIGNED_OK >= 2 || (expected_alignment % sizeof(v)) == 0) if (MDBX_UNALIGNED_OK >= 2 || (expected_alignment % sizeof(v)) == 0)
*(uint16_t *)ptr = v; *(uint16_t *)ptr = v;
else { else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
*((uint16_t __unaligned *)ptr) = v; *((uint16_t __unaligned *)ptr) = v;
#else #else
bcopy_2((uint8_t *)ptr, (const uint8_t *)&v); bcopy_2((uint8_t *)ptr, (const uint8_t *)&v);
@ -90,21 +79,17 @@ static inline void unaligned_poke_u16(const size_t expected_alignment,
} }
} }
MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t unaligned_peek_u32(const size_t expected_alignment,
unaligned_peek_u32(const size_t expected_alignment, const void *const __restrict ptr) {
const void *const __restrict ptr) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
if (MDBX_UNALIGNED_OK >= 4 || (expected_alignment % sizeof(uint32_t)) == 0) if (MDBX_UNALIGNED_OK >= 4 || (expected_alignment % sizeof(uint32_t)) == 0)
return *(const uint32_t *)ptr; return *(const uint32_t *)ptr;
else if ((expected_alignment % sizeof(uint16_t)) == 0) { else if ((expected_alignment % sizeof(uint16_t)) == 0) {
const uint16_t lo = const uint16_t lo = ((const uint16_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__];
((const uint16_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__]; const uint16_t hi = ((const uint16_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
const uint16_t hi =
((const uint16_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
return lo | (uint32_t)hi << 16; return lo | (uint32_t)hi << 16;
} else { } else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
return *(const __unaligned uint32_t *)ptr; return *(const __unaligned uint32_t *)ptr;
#else #else
uint32_t v; uint32_t v;
@ -114,19 +99,15 @@ unaligned_peek_u32(const size_t expected_alignment,
} }
} }
static inline void unaligned_poke_u32(const size_t expected_alignment, static inline void unaligned_poke_u32(const size_t expected_alignment, void *const __restrict ptr, const uint32_t v) {
void *const __restrict ptr,
const uint32_t v) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
if (MDBX_UNALIGNED_OK >= 4 || (expected_alignment % sizeof(v)) == 0) if (MDBX_UNALIGNED_OK >= 4 || (expected_alignment % sizeof(v)) == 0)
*(uint32_t *)ptr = v; *(uint32_t *)ptr = v;
else if ((expected_alignment % sizeof(uint16_t)) == 0) { else if ((expected_alignment % sizeof(uint16_t)) == 0) {
((uint16_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__] = (uint16_t)v; ((uint16_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__] = (uint16_t)v;
((uint16_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__] = ((uint16_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__] = (uint16_t)(v >> 16);
(uint16_t)(v >> 16);
} else { } else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
*((uint32_t __unaligned *)ptr) = v; *((uint32_t __unaligned *)ptr) = v;
#else #else
bcopy_4((uint8_t *)ptr, (const uint8_t *)&v); bcopy_4((uint8_t *)ptr, (const uint8_t *)&v);
@ -134,21 +115,17 @@ static inline void unaligned_poke_u32(const size_t expected_alignment,
} }
} }
MDBX_NOTHROW_PURE_FUNCTION static inline uint64_t MDBX_NOTHROW_PURE_FUNCTION static inline uint64_t unaligned_peek_u64(const size_t expected_alignment,
unaligned_peek_u64(const size_t expected_alignment, const void *const __restrict ptr) {
const void *const __restrict ptr) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(uint64_t)) == 0) if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(uint64_t)) == 0)
return *(const uint64_t *)ptr; return *(const uint64_t *)ptr;
else if ((expected_alignment % sizeof(uint32_t)) == 0) { else if ((expected_alignment % sizeof(uint32_t)) == 0) {
const uint32_t lo = const uint32_t lo = ((const uint32_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__];
((const uint32_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__]; const uint32_t hi = ((const uint32_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
const uint32_t hi =
((const uint32_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
return lo | (uint64_t)hi << 32; return lo | (uint64_t)hi << 32;
} else { } else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
return *(const __unaligned uint64_t *)ptr; return *(const __unaligned uint64_t *)ptr;
#else #else
uint64_t v; uint64_t v;
@ -158,40 +135,32 @@ unaligned_peek_u64(const size_t expected_alignment,
} }
} }
static inline uint64_t static inline uint64_t unaligned_peek_u64_volatile(const size_t expected_alignment,
unaligned_peek_u64_volatile(const size_t expected_alignment, const volatile void *const __restrict ptr) {
const volatile void *const __restrict ptr) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
assert(expected_alignment % sizeof(uint32_t) == 0); assert(expected_alignment % sizeof(uint32_t) == 0);
if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(uint64_t)) == 0) if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(uint64_t)) == 0)
return *(const volatile uint64_t *)ptr; return *(const volatile uint64_t *)ptr;
else { else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
return *(const volatile __unaligned uint64_t *)ptr; return *(const volatile __unaligned uint64_t *)ptr;
#else #else
const uint32_t lo = ((const volatile uint32_t *) const uint32_t lo = ((const volatile uint32_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__];
ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__]; const uint32_t hi = ((const volatile uint32_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
const uint32_t hi = ((const volatile uint32_t *)
ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__];
return lo | (uint64_t)hi << 32; return lo | (uint64_t)hi << 32;
#endif /* _MSC_VER || __unaligned */ #endif /* _MSC_VER || __unaligned */
} }
} }
static inline void unaligned_poke_u64(const size_t expected_alignment, static inline void unaligned_poke_u64(const size_t expected_alignment, void *const __restrict ptr, const uint64_t v) {
void *const __restrict ptr,
const uint64_t v) {
assert((uintptr_t)ptr % expected_alignment == 0); assert((uintptr_t)ptr % expected_alignment == 0);
if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(v)) == 0) if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(v)) == 0)
*(uint64_t *)ptr = v; *(uint64_t *)ptr = v;
else if ((expected_alignment % sizeof(uint32_t)) == 0) { else if ((expected_alignment % sizeof(uint32_t)) == 0) {
((uint32_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__] = (uint32_t)v; ((uint32_t *)ptr)[__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__] = (uint32_t)v;
((uint32_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__] = ((uint32_t *)ptr)[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__] = (uint32_t)(v >> 32);
(uint32_t)(v >> 32);
} else { } else {
#if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || \ #if defined(__unaligned) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
defined(_M_X64) || defined(_M_IA64)
*((uint64_t __unaligned *)ptr) = v; *((uint64_t __unaligned *)ptr) = v;
#else #else
bcopy_8((uint8_t *)ptr, (const uint8_t *)&v); bcopy_8((uint8_t *)ptr, (const uint8_t *)&v);
@ -199,28 +168,22 @@ static inline void unaligned_poke_u64(const size_t expected_alignment,
} }
} }
#define UNALIGNED_PEEK_8(ptr, struct, field) \ #define UNALIGNED_PEEK_8(ptr, struct, field) peek_u8(ptr_disp(ptr, offsetof(struct, field)))
peek_u8(ptr_disp(ptr, offsetof(struct, field))) #define UNALIGNED_POKE_8(ptr, struct, field, value) poke_u8(ptr_disp(ptr, offsetof(struct, field)), value)
#define UNALIGNED_POKE_8(ptr, struct, field, value) \
poke_u8(ptr_disp(ptr, offsetof(struct, field)), value)
#define UNALIGNED_PEEK_16(ptr, struct, field) \ #define UNALIGNED_PEEK_16(ptr, struct, field) unaligned_peek_u16(1, ptr_disp(ptr, offsetof(struct, field)))
unaligned_peek_u16(1, ptr_disp(ptr, offsetof(struct, field))) #define UNALIGNED_POKE_16(ptr, struct, field, value) \
#define UNALIGNED_POKE_16(ptr, struct, field, value) \
unaligned_poke_u16(1, ptr_disp(ptr, offsetof(struct, field)), value) unaligned_poke_u16(1, ptr_disp(ptr, offsetof(struct, field)), value)
#define UNALIGNED_PEEK_32(ptr, struct, field) \ #define UNALIGNED_PEEK_32(ptr, struct, field) unaligned_peek_u32(1, ptr_disp(ptr, offsetof(struct, field)))
unaligned_peek_u32(1, ptr_disp(ptr, offsetof(struct, field))) #define UNALIGNED_POKE_32(ptr, struct, field, value) \
#define UNALIGNED_POKE_32(ptr, struct, field, value) \
unaligned_poke_u32(1, ptr_disp(ptr, offsetof(struct, field)), value) unaligned_poke_u32(1, ptr_disp(ptr, offsetof(struct, field)), value)
#define UNALIGNED_PEEK_64(ptr, struct, field) \ #define UNALIGNED_PEEK_64(ptr, struct, field) unaligned_peek_u64(1, ptr_disp(ptr, offsetof(struct, field)))
unaligned_peek_u64(1, ptr_disp(ptr, offsetof(struct, field))) #define UNALIGNED_POKE_64(ptr, struct, field, value) \
#define UNALIGNED_POKE_64(ptr, struct, field, value) \
unaligned_poke_u64(1, ptr_disp(ptr, offsetof(struct, field)), value) unaligned_poke_u64(1, ptr_disp(ptr, offsetof(struct, field)), value)
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t peek_pgno(const void *const __restrict ptr) {
peek_pgno(const void *const __restrict ptr) {
if (sizeof(pgno_t) == sizeof(uint32_t)) if (sizeof(pgno_t) == sizeof(uint32_t))
return (pgno_t)unaligned_peek_u32(1, ptr); return (pgno_t)unaligned_peek_u32(1, ptr);
else if (sizeof(pgno_t) == sizeof(uint64_t)) else if (sizeof(pgno_t) == sizeof(uint64_t))

View File

@ -3,10 +3,8 @@
#include "internals.h" #include "internals.h"
MDBX_MAYBE_UNUSED MDBX_NOTHROW_CONST_FUNCTION MDBX_INTERNAL unsigned MDBX_MAYBE_UNUSED MDBX_NOTHROW_CONST_FUNCTION MDBX_INTERNAL unsigned log2n_powerof2(size_t value_uintptr) {
log2n_powerof2(size_t value_uintptr) { assert(value_uintptr > 0 && value_uintptr < INT32_MAX && is_powerof2(value_uintptr));
assert(value_uintptr > 0 && value_uintptr < INT32_MAX &&
is_powerof2(value_uintptr));
assert((value_uintptr & -(intptr_t)value_uintptr) == value_uintptr); assert((value_uintptr & -(intptr_t)value_uintptr) == value_uintptr);
const uint32_t value_uint32 = (uint32_t)value_uintptr; const uint32_t value_uint32 = (uint32_t)value_uintptr;
#if __GNUC_PREREQ(4, 1) || __has_builtin(__builtin_ctz) #if __GNUC_PREREQ(4, 1) || __has_builtin(__builtin_ctz)
@ -18,9 +16,8 @@ log2n_powerof2(size_t value_uintptr) {
_BitScanForward(&index, value_uint32); _BitScanForward(&index, value_uint32);
return index; return index;
#else #else
static const uint8_t debruijn_ctz32[32] = { static const uint8_t debruijn_ctz32[32] = {0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
return debruijn_ctz32[(uint32_t)(value_uint32 * 0x077CB531ul) >> 27]; return debruijn_ctz32[(uint32_t)(value_uint32 * 0x077CB531ul) >> 27];
#endif #endif
} }

View File

@ -27,46 +27,36 @@
/* Pointer distance as signed number of bytes */ /* Pointer distance as signed number of bytes */
#define ptr_dist(more, less) (((intptr_t)(more)) - ((intptr_t)(less))) #define ptr_dist(more, less) (((intptr_t)(more)) - ((intptr_t)(less)))
#define MDBX_ASAN_POISON_MEMORY_REGION(addr, size) \ #define MDBX_ASAN_POISON_MEMORY_REGION(addr, size) \
do { \ do { \
TRACE("POISON_MEMORY_REGION(%p, %zu) at %u", (void *)(addr), \ TRACE("POISON_MEMORY_REGION(%p, %zu) at %u", (void *)(addr), (size_t)(size), __LINE__); \
(size_t)(size), __LINE__); \ ASAN_POISON_MEMORY_REGION(addr, size); \
ASAN_POISON_MEMORY_REGION(addr, size); \
} while (0) } while (0)
#define MDBX_ASAN_UNPOISON_MEMORY_REGION(addr, size) \ #define MDBX_ASAN_UNPOISON_MEMORY_REGION(addr, size) \
do { \ do { \
TRACE("UNPOISON_MEMORY_REGION(%p, %zu) at %u", (void *)(addr), \ TRACE("UNPOISON_MEMORY_REGION(%p, %zu) at %u", (void *)(addr), (size_t)(size), __LINE__); \
(size_t)(size), __LINE__); \ ASAN_UNPOISON_MEMORY_REGION(addr, size); \
ASAN_UNPOISON_MEMORY_REGION(addr, size); \
} while (0) } while (0)
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t branchless_abs(intptr_t value) {
branchless_abs(intptr_t value) {
assert(value > INT_MIN); assert(value > INT_MIN);
const size_t expanded_sign = const size_t expanded_sign = (size_t)(value >> (sizeof(value) * CHAR_BIT - 1));
(size_t)(value >> (sizeof(value) * CHAR_BIT - 1));
return ((size_t)value + expanded_sign) ^ expanded_sign; return ((size_t)value + expanded_sign) ^ expanded_sign;
} }
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline bool MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline bool is_powerof2(size_t x) { return (x & (x - 1)) == 0; }
is_powerof2(size_t x) {
return (x & (x - 1)) == 0;
}
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t floor_powerof2(size_t value, size_t granularity) {
floor_powerof2(size_t value, size_t granularity) {
assert(is_powerof2(granularity)); assert(is_powerof2(granularity));
return value & ~(granularity - 1); return value & ~(granularity - 1);
} }
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED static inline size_t ceil_powerof2(size_t value, size_t granularity) {
ceil_powerof2(size_t value, size_t granularity) {
return floor_powerof2(value + granularity - 1, granularity); return floor_powerof2(value + granularity - 1, granularity);
} }
MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED MDBX_INTERNAL unsigned MDBX_NOTHROW_CONST_FUNCTION MDBX_MAYBE_UNUSED MDBX_INTERNAL unsigned log2n_powerof2(size_t value_uintptr);
log2n_powerof2(size_t value_uintptr);
MDBX_NOTHROW_CONST_FUNCTION MDBX_INTERNAL uint64_t rrxmrrxmsx_0(uint64_t v); MDBX_NOTHROW_CONST_FUNCTION MDBX_INTERNAL uint64_t rrxmrrxmsx_0(uint64_t v);
@ -75,8 +65,7 @@ struct monotime_cache {
int expire_countdown; int expire_countdown;
}; };
MDBX_MAYBE_UNUSED static inline uint64_t MDBX_MAYBE_UNUSED static inline uint64_t monotime_since_cached(uint64_t begin_timestamp, struct monotime_cache *cache) {
monotime_since_cached(uint64_t begin_timestamp, struct monotime_cache *cache) {
if (cache->expire_countdown) if (cache->expire_countdown)
cache->expire_countdown -= 1; cache->expire_countdown -= 1;
else { else {

View File

@ -3,8 +3,7 @@
#include "internals.h" #include "internals.h"
#if MDBX_VERSION_MAJOR != ${MDBX_VERSION_MAJOR} || \ #if MDBX_VERSION_MAJOR != ${MDBX_VERSION_MAJOR} || MDBX_VERSION_MINOR != ${MDBX_VERSION_MINOR}
MDBX_VERSION_MINOR != ${MDBX_VERSION_MINOR}
#error "API version mismatch! Had `git fetch --tags` done?" #error "API version mismatch! Had `git fetch --tags` done?"
#endif #endif
@ -18,8 +17,7 @@ __dll_export
#endif #endif
#ifdef __attribute_externally_visible__ #ifdef __attribute_externally_visible__
__attribute_externally_visible__ __attribute_externally_visible__
#elif (defined(__GNUC__) && !defined(__clang__)) || \ #elif (defined(__GNUC__) && !defined(__clang__)) || __has_attribute(__externally_visible__)
__has_attribute(__externally_visible__)
__attribute__((__externally_visible__)) __attribute__((__externally_visible__))
#endif #endif
const struct MDBX_version_info mdbx_version = { const struct MDBX_version_info mdbx_version = {
@ -29,8 +27,7 @@ __dll_export
${MDBX_VERSION_TWEAK}, ${MDBX_VERSION_TWEAK},
"@MDBX_VERSION_PRERELEASE@", /* pre-release suffix of SemVer "@MDBX_VERSION_PRERELEASE@", /* pre-release suffix of SemVer
@MDBX_VERSION_PURE@ */ @MDBX_VERSION_PURE@ */
{"@MDBX_GIT_TIMESTAMP@", "@MDBX_GIT_TREE@", "@MDBX_GIT_COMMIT@", {"@MDBX_GIT_TIMESTAMP@", "@MDBX_GIT_TREE@", "@MDBX_GIT_COMMIT@", "@MDBX_GIT_DESCRIBE@"},
"@MDBX_GIT_DESCRIBE@"},
sourcery}; sourcery};
__dll_export __dll_export
@ -41,8 +38,7 @@ __dll_export
#endif #endif
#ifdef __attribute_externally_visible__ #ifdef __attribute_externally_visible__
__attribute_externally_visible__ __attribute_externally_visible__
#elif (defined(__GNUC__) && !defined(__clang__)) || \ #elif (defined(__GNUC__) && !defined(__clang__)) || __has_attribute(__externally_visible__)
__has_attribute(__externally_visible__)
__attribute__((__externally_visible__)) __attribute__((__externally_visible__))
#endif #endif
const char *const mdbx_sourcery_anchor = sourcery; const char *const mdbx_sourcery_anchor = sourcery;

View File

@ -41,19 +41,16 @@ static page_type_t walk_subpage_type(const page_t *sp) {
} }
/* Depth-first tree traversal. */ /* Depth-first tree traversal. */
__cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno, __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno, txnid_t parent_txnid) {
txnid_t parent_txnid) {
assert(pgno != P_INVALID); assert(pgno != P_INVALID);
page_t *mp = nullptr; page_t *mp = nullptr;
int err = page_get(ctx->cursor, pgno, &mp, parent_txnid); int err = page_get(ctx->cursor, pgno, &mp, parent_txnid);
const page_type_t type = walk_page_type(mp); const page_type_t type = walk_page_type(mp);
const size_t nentries = mp ? page_numkeys(mp) : 0; const size_t nentries = mp ? page_numkeys(mp) : 0;
size_t header_size = size_t header_size = (mp && !is_dupfix_leaf(mp)) ? PAGEHDRSZ + mp->lower : PAGEHDRSZ;
(mp && !is_dupfix_leaf(mp)) ? PAGEHDRSZ + mp->lower : PAGEHDRSZ;
size_t payload_size = 0; size_t payload_size = 0;
size_t unused_size = size_t unused_size = (mp ? page_room(mp) : ctx->txn->env->ps - header_size) - payload_size;
(mp ? page_room(mp) : ctx->txn->env->ps - header_size) - payload_size;
size_t align_bytes = 0; size_t align_bytes = 0;
for (size_t i = 0; err == MDBX_SUCCESS && i < nentries; ++i) { for (size_t i = 0; err == MDBX_SUCCESS && i < nentries; ++i) {
@ -89,12 +86,10 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
assert(err == MDBX_SUCCESS); assert(err == MDBX_SUCCESS);
pgr_t lp = page_get_large(ctx->cursor, large_pgno, mp->txnid); pgr_t lp = page_get_large(ctx->cursor, large_pgno, mp->txnid);
const size_t npages = const size_t npages = ((err = lp.err) == MDBX_SUCCESS) ? lp.page->pages : 1;
((err = lp.err) == MDBX_SUCCESS) ? lp.page->pages : 1;
const size_t pagesize = pgno2bytes(ctx->txn->env, npages); const size_t pagesize = pgno2bytes(ctx->txn->env, npages);
const size_t over_unused = pagesize - over_payload - over_header; const size_t over_unused = pagesize - over_payload - over_header;
const int rc = ctx->visitor(large_pgno, npages, ctx->userctx, ctx->deep, const int rc = ctx->visitor(large_pgno, npages, ctx->userctx, ctx->deep, tbl, pagesize, page_large, err, 1,
tbl, pagesize, page_large, err, 1,
over_payload, over_header, over_unused); over_payload, over_header, over_unused);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc; return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc;
@ -104,8 +99,7 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
case N_TREE /* sub-db */: { case N_TREE /* sub-db */: {
if (unlikely(node_data_size != sizeof(tree_t))) { if (unlikely(node_data_size != sizeof(tree_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid table node size", (unsigned)node_data_size);
"invalid table node size", (unsigned)node_data_size);
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} }
@ -115,8 +109,7 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
case N_TREE | N_DUP /* dupsorted sub-tree */: case N_TREE | N_DUP /* dupsorted sub-tree */:
if (unlikely(node_data_size != sizeof(tree_t))) { if (unlikely(node_data_size != sizeof(tree_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid sub-tree node size", (unsigned)node_data_size);
"invalid sub-tree node size", (unsigned)node_data_size);
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} }
@ -126,8 +119,7 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
case N_DUP /* short sub-page */: { case N_DUP /* short sub-page */: {
if (unlikely(node_data_size <= PAGEHDRSZ || (node_data_size & 1))) { if (unlikely(node_data_size <= PAGEHDRSZ || (node_data_size & 1))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid sub-page node size", (unsigned)node_data_size);
"invalid sub-page node size", (unsigned)node_data_size);
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
break; break;
@ -137,14 +129,12 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
const page_type_t subtype = walk_subpage_type(sp); const page_type_t subtype = walk_subpage_type(sp);
const size_t nsubkeys = page_numkeys(sp); const size_t nsubkeys = page_numkeys(sp);
if (unlikely(subtype == page_sub_broken)) { if (unlikely(subtype == page_sub_broken)) {
ERROR("%s/%d: %s 0x%x", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s 0x%x", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid sub-page flags", sp->flags);
"invalid sub-page flags", sp->flags);
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} }
size_t subheader_size = size_t subheader_size = is_dupfix_leaf(sp) ? PAGEHDRSZ : PAGEHDRSZ + sp->lower;
is_dupfix_leaf(sp) ? PAGEHDRSZ : PAGEHDRSZ + sp->lower;
size_t subunused_size = page_room(sp); size_t subunused_size = page_room(sp);
size_t subpayload_size = 0; size_t subpayload_size = 0;
size_t subalign_bytes = 0; size_t subalign_bytes = 0;
@ -161,18 +151,15 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
subpayload_size += subnode_size; subpayload_size += subnode_size;
subalign_bytes += subnode_size & 1; subalign_bytes += subnode_size & 1;
if (unlikely(node_flags(subnode) != 0)) { if (unlikely(node_flags(subnode) != 0)) {
ERROR("%s/%d: %s 0x%x", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s 0x%x", "MDBX_CORRUPTED", MDBX_CORRUPTED, "unexpected sub-node flags", node_flags(subnode));
"unexpected sub-node flags", node_flags(subnode));
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} }
} }
} }
const int rc = const int rc = ctx->visitor(pgno, 0, ctx->userctx, ctx->deep + 1, tbl, node_data_size, subtype, err, nsubkeys,
ctx->visitor(pgno, 0, ctx->userctx, ctx->deep + 1, tbl, subpayload_size, subheader_size, subunused_size + subalign_bytes);
node_data_size, subtype, err, nsubkeys, subpayload_size,
subheader_size, subunused_size + subalign_bytes);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc; return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc;
header_size += subheader_size; header_size += subheader_size;
@ -182,16 +169,14 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
} break; } break;
default: default:
ERROR("%s/%d: %s 0x%x", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s 0x%x", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid node flags", node_flags(node));
"invalid node flags", node_flags(node));
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} }
} }
const int rc = ctx->visitor( const int rc = ctx->visitor(pgno, 1, ctx->userctx, ctx->deep, tbl, ctx->txn->env->ps, type, err, nentries,
pgno, 1, ctx->userctx, ctx->deep, tbl, ctx->txn->env->ps, type, err, payload_size, header_size, unused_size + align_bytes);
nentries, payload_size, header_size, unused_size + align_bytes);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc; return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc;
@ -220,8 +205,7 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
case N_TREE /* sub-db */: case N_TREE /* sub-db */:
if (unlikely(node_ds(node) != sizeof(tree_t))) { if (unlikely(node_ds(node) != sizeof(tree_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid sub-tree node size", (unsigned)node_ds(node));
"invalid sub-tree node size", (unsigned)node_ds(node));
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} else { } else {
@ -238,8 +222,8 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
case N_TREE | N_DUP /* dupsorted sub-tree */: case N_TREE | N_DUP /* dupsorted sub-tree */:
if (unlikely(node_ds(node) != sizeof(tree_t))) { if (unlikely(node_ds(node) != sizeof(tree_t))) {
ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, ERROR("%s/%d: %s %u", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid dupsort sub-tree node size",
"invalid dupsort sub-tree node size", (unsigned)node_ds(node)); (unsigned)node_ds(node));
assert(err == MDBX_CORRUPTED); assert(err == MDBX_CORRUPTED);
err = MDBX_CORRUPTED; err = MDBX_CORRUPTED;
} else { } else {
@ -248,8 +232,7 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
assert(err == MDBX_SUCCESS); assert(err == MDBX_SUCCESS);
err = cursor_dupsort_setup(ctx->cursor, node, mp); err = cursor_dupsort_setup(ctx->cursor, node, mp);
if (likely(err == MDBX_SUCCESS)) { if (likely(err == MDBX_SUCCESS)) {
assert(ctx->cursor->subcur == assert(ctx->cursor->subcur == &container_of(ctx->cursor, cursor_couple_t, outer)->inner);
&container_of(ctx->cursor, cursor_couple_t, outer)->inner);
ctx->cursor = &ctx->cursor->subcur->cursor; ctx->cursor = &ctx->cursor->subcur->cursor;
ctx->deep += 1; ctx->deep += 1;
tbl->nested = &aligned_db; tbl->nested = &aligned_db;
@ -257,8 +240,7 @@ __cold static int walk_pgno(walk_ctx_t *ctx, walk_tbl_t *tbl, const pgno_t pgno,
tbl->nested = nullptr; tbl->nested = nullptr;
ctx->deep -= 1; ctx->deep -= 1;
subcur_t *inner_xcursor = container_of(ctx->cursor, subcur_t, cursor); subcur_t *inner_xcursor = container_of(ctx->cursor, subcur_t, cursor);
cursor_couple_t *couple = cursor_couple_t *couple = container_of(inner_xcursor, cursor_couple_t, inner);
container_of(inner_xcursor, cursor_couple_t, inner);
ctx->cursor = &couple->outer; ctx->cursor = &couple->outer;
} }
} }
@ -280,30 +262,24 @@ __cold static int walk_tbl(walk_ctx_t *ctx, walk_tbl_t *tbl) {
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
const uint8_t cursor_checking = (ctx->options & dont_check_keys_ordering) const uint8_t cursor_checking = (ctx->options & dont_check_keys_ordering) ? z_pagecheck | z_ignord : z_pagecheck;
? z_pagecheck | z_ignord
: z_pagecheck;
couple.outer.checking |= cursor_checking; couple.outer.checking |= cursor_checking;
couple.inner.cursor.checking |= cursor_checking; couple.inner.cursor.checking |= cursor_checking;
couple.outer.next = ctx->cursor; couple.outer.next = ctx->cursor;
couple.outer.top_and_flags = z_disable_tree_search_fastpath; couple.outer.top_and_flags = z_disable_tree_search_fastpath;
ctx->cursor = &couple.outer; ctx->cursor = &couple.outer;
rc = walk_pgno(ctx, tbl, db->root, rc = walk_pgno(ctx, tbl, db->root, db->mod_txnid ? db->mod_txnid : ctx->txn->txnid);
db->mod_txnid ? db->mod_txnid : ctx->txn->txnid);
ctx->cursor = couple.outer.next; ctx->cursor = couple.outer.next;
return rc; return rc;
} }
__cold int walk_pages(MDBX_txn *txn, walk_func *visitor, void *user, __cold int walk_pages(MDBX_txn *txn, walk_func *visitor, void *user, walk_options_t options) {
walk_options_t options) {
int rc = check_txn(txn, MDBX_TXN_BLOCKED); int rc = check_txn(txn, MDBX_TXN_BLOCKED);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
return rc; return rc;
walk_ctx_t ctx = { walk_ctx_t ctx = {.txn = txn, .userctx = user, .visitor = visitor, .options = options};
.txn = txn, .userctx = user, .visitor = visitor, .options = options}; walk_tbl_t tbl = {.name = {.iov_base = MDBX_CHK_GC}, .internal = &txn->dbs[FREE_DBI]};
walk_tbl_t tbl = {.name = {.iov_base = MDBX_CHK_GC},
.internal = &txn->dbs[FREE_DBI]};
rc = walk_tbl(&ctx, &tbl); rc = walk_tbl(&ctx, &tbl);
if (!MDBX_IS_ERROR(rc)) { if (!MDBX_IS_ERROR(rc)) {
tbl.name.iov_base = MDBX_CHK_MAIN; tbl.name.iov_base = MDBX_CHK_MAIN;

View File

@ -10,14 +10,11 @@ typedef struct walk_tbl {
tree_t *internal, *nested; tree_t *internal, *nested;
} walk_tbl_t; } walk_tbl_t;
typedef int walk_func(const size_t pgno, const unsigned number, void *const ctx, typedef int walk_func(const size_t pgno, const unsigned number, void *const ctx, const int deep,
const int deep, const walk_tbl_t *table, const walk_tbl_t *table, const size_t page_size, const page_type_t page_type,
const size_t page_size, const page_type_t page_type, const MDBX_error_t err, const size_t nentries, const size_t payload_bytes,
const MDBX_error_t err, const size_t nentries, const size_t header_bytes, const size_t unused_bytes);
const size_t payload_bytes, const size_t header_bytes,
const size_t unused_bytes);
typedef enum walk_options { dont_check_keys_ordering = 1 } walk_options_t; typedef enum walk_options { dont_check_keys_ordering = 1 } walk_options_t;
MDBX_INTERNAL int walk_pages(MDBX_txn *txn, walk_func *visitor, void *user, MDBX_INTERNAL int walk_pages(MDBX_txn *txn, walk_func *visitor, void *user, walk_options_t options);
walk_options_t options);

View File

@ -9,9 +9,7 @@
// Stub for slim read-write lock // Stub for slim read-write lock
// Portion Copyright (C) 1995-2002 Brad Wilson // Portion Copyright (C) 1995-2002 Brad Wilson
static void WINAPI stub_srwlock_Init(osal_srwlock_t *srwl) { static void WINAPI stub_srwlock_Init(osal_srwlock_t *srwl) { srwl->readerCount = srwl->writerCount = 0; }
srwl->readerCount = srwl->writerCount = 0;
}
static void WINAPI stub_srwlock_AcquireShared(osal_srwlock_t *srwl) { static void WINAPI stub_srwlock_AcquireShared(osal_srwlock_t *srwl) {
while (true) { while (true) {
@ -76,8 +74,7 @@ static void WINAPI stub_srwlock_ReleaseExclusive(osal_srwlock_t *srwl) {
static uint64_t WINAPI stub_GetTickCount64(void) { static uint64_t WINAPI stub_GetTickCount64(void) {
LARGE_INTEGER Counter, Frequency; LARGE_INTEGER Counter, Frequency;
return (QueryPerformanceFrequency(&Frequency) && return (QueryPerformanceFrequency(&Frequency) && QueryPerformanceCounter(&Counter))
QueryPerformanceCounter(&Counter))
? Counter.QuadPart * 1000ul / Frequency.QuadPart ? Counter.QuadPart * 1000ul / Frequency.QuadPart
: 0; : 0;
} }
@ -91,8 +88,7 @@ struct libmdbx_imports imports;
#pragma GCC diagnostic ignored "-Wcast-function-type" #pragma GCC diagnostic ignored "-Wcast-function-type"
#endif /* GCC/MINGW */ #endif /* GCC/MINGW */
#define MDBX_IMPORT(HANDLE, ENTRY) \ #define MDBX_IMPORT(HANDLE, ENTRY) imports.ENTRY = (MDBX_##ENTRY)GetProcAddress(HANDLE, #ENTRY)
imports.ENTRY = (MDBX_##ENTRY)GetProcAddress(HANDLE, #ENTRY)
void windows_import(void) { void windows_import(void) {
const HINSTANCE hNtdll = GetModuleHandleA("ntdll.dll"); const HINSTANCE hNtdll = GetModuleHandleA("ntdll.dll");
@ -121,20 +117,13 @@ void windows_import(void) {
} }
const osal_srwlock_t_function srwlock_init = const osal_srwlock_t_function srwlock_init =
(osal_srwlock_t_function)(hKernel32dll (osal_srwlock_t_function)(hKernel32dll ? GetProcAddress(hKernel32dll, "InitializeSRWLock") : nullptr);
? GetProcAddress(hKernel32dll,
"InitializeSRWLock")
: nullptr);
if (srwlock_init) { if (srwlock_init) {
imports.srwl_Init = srwlock_init; imports.srwl_Init = srwlock_init;
imports.srwl_AcquireShared = (osal_srwlock_t_function)GetProcAddress( imports.srwl_AcquireShared = (osal_srwlock_t_function)GetProcAddress(hKernel32dll, "AcquireSRWLockShared");
hKernel32dll, "AcquireSRWLockShared"); imports.srwl_ReleaseShared = (osal_srwlock_t_function)GetProcAddress(hKernel32dll, "ReleaseSRWLockShared");
imports.srwl_ReleaseShared = (osal_srwlock_t_function)GetProcAddress( imports.srwl_AcquireExclusive = (osal_srwlock_t_function)GetProcAddress(hKernel32dll, "AcquireSRWLockExclusive");
hKernel32dll, "ReleaseSRWLockShared"); imports.srwl_ReleaseExclusive = (osal_srwlock_t_function)GetProcAddress(hKernel32dll, "ReleaseSRWLockExclusive");
imports.srwl_AcquireExclusive = (osal_srwlock_t_function)GetProcAddress(
hKernel32dll, "AcquireSRWLockExclusive");
imports.srwl_ReleaseExclusive = (osal_srwlock_t_function)GetProcAddress(
hKernel32dll, "ReleaseSRWLockExclusive");
} else { } else {
imports.srwl_Init = stub_srwlock_Init; imports.srwl_Init = stub_srwlock_Init;
imports.srwl_AcquireShared = stub_srwlock_AcquireShared; imports.srwl_AcquireShared = stub_srwlock_AcquireShared;

View File

@ -59,32 +59,27 @@ typedef struct _FILE_REMOTE_PROTOCOL_INFO {
#endif /* _WIN32_WINNT < 0x0600 (prior to Windows Vista) */ #endif /* _WIN32_WINNT < 0x0600 (prior to Windows Vista) */
typedef BOOL(WINAPI *MDBX_GetFileInformationByHandleEx)( typedef BOOL(WINAPI *MDBX_GetFileInformationByHandleEx)(_In_ HANDLE hFile,
_In_ HANDLE hFile, _In_ FILE_INFO_BY_HANDLE_CLASS FileInformationClass, _In_ FILE_INFO_BY_HANDLE_CLASS FileInformationClass,
_Out_ LPVOID lpFileInformation, _In_ DWORD dwBufferSize); _Out_ LPVOID lpFileInformation, _In_ DWORD dwBufferSize);
typedef BOOL(WINAPI *MDBX_GetVolumeInformationByHandleW)( typedef BOOL(WINAPI *MDBX_GetVolumeInformationByHandleW)(
_In_ HANDLE hFile, _Out_opt_ LPWSTR lpVolumeNameBuffer, _In_ HANDLE hFile, _Out_opt_ LPWSTR lpVolumeNameBuffer, _In_ DWORD nVolumeNameSize,
_In_ DWORD nVolumeNameSize, _Out_opt_ LPDWORD lpVolumeSerialNumber, _Out_opt_ LPDWORD lpVolumeSerialNumber, _Out_opt_ LPDWORD lpMaximumComponentLength,
_Out_opt_ LPDWORD lpMaximumComponentLength, _Out_opt_ LPDWORD lpFileSystemFlags, _Out_opt_ LPWSTR lpFileSystemNameBuffer, _In_ DWORD nFileSystemNameSize);
_Out_opt_ LPDWORD lpFileSystemFlags,
_Out_opt_ LPWSTR lpFileSystemNameBuffer, _In_ DWORD nFileSystemNameSize);
typedef DWORD(WINAPI *MDBX_GetFinalPathNameByHandleW)(_In_ HANDLE hFile, typedef DWORD(WINAPI *MDBX_GetFinalPathNameByHandleW)(_In_ HANDLE hFile, _Out_ LPWSTR lpszFilePath,
_Out_ LPWSTR lpszFilePath, _In_ DWORD cchFilePath, _In_ DWORD dwFlags);
_In_ DWORD cchFilePath,
_In_ DWORD dwFlags);
typedef BOOL(WINAPI *MDBX_SetFileInformationByHandle)( typedef BOOL(WINAPI *MDBX_SetFileInformationByHandle)(_In_ HANDLE hFile,
_In_ HANDLE hFile, _In_ FILE_INFO_BY_HANDLE_CLASS FileInformationClass, _In_ FILE_INFO_BY_HANDLE_CLASS FileInformationClass,
_Out_ LPVOID lpFileInformation, _In_ DWORD dwBufferSize); _Out_ LPVOID lpFileInformation, _In_ DWORD dwBufferSize);
typedef NTSTATUS(NTAPI *MDBX_NtFsControlFile)( typedef NTSTATUS(NTAPI *MDBX_NtFsControlFile)(IN HANDLE FileHandle, IN OUT HANDLE Event,
IN HANDLE FileHandle, IN OUT HANDLE Event, IN OUT PVOID /* PIO_APC_ROUTINE */ ApcRoutine, IN OUT PVOID ApcContext,
IN OUT PVOID /* PIO_APC_ROUTINE */ ApcRoutine, IN OUT PVOID ApcContext, OUT PIO_STATUS_BLOCK IoStatusBlock, IN ULONG FsControlCode,
OUT PIO_STATUS_BLOCK IoStatusBlock, IN ULONG FsControlCode, IN OUT PVOID InputBuffer, IN ULONG InputBufferLength,
IN OUT PVOID InputBuffer, IN ULONG InputBufferLength, OUT OPTIONAL PVOID OutputBuffer, IN ULONG OutputBufferLength);
OUT OPTIONAL PVOID OutputBuffer, IN ULONG OutputBufferLength);
typedef uint64_t(WINAPI *MDBX_GetTickCount64)(void); typedef uint64_t(WINAPI *MDBX_GetTickCount64)(void);
@ -95,27 +90,21 @@ typedef struct _WIN32_MEMORY_RANGE_ENTRY {
} WIN32_MEMORY_RANGE_ENTRY, *PWIN32_MEMORY_RANGE_ENTRY; } WIN32_MEMORY_RANGE_ENTRY, *PWIN32_MEMORY_RANGE_ENTRY;
#endif /* Windows 8.x */ #endif /* Windows 8.x */
typedef BOOL(WINAPI *MDBX_PrefetchVirtualMemory)( typedef BOOL(WINAPI *MDBX_PrefetchVirtualMemory)(HANDLE hProcess, ULONG_PTR NumberOfEntries,
HANDLE hProcess, ULONG_PTR NumberOfEntries, PWIN32_MEMORY_RANGE_ENTRY VirtualAddresses, ULONG Flags);
PWIN32_MEMORY_RANGE_ENTRY VirtualAddresses, ULONG Flags);
typedef enum _SECTION_INHERIT { ViewShare = 1, ViewUnmap = 2 } SECTION_INHERIT; typedef enum _SECTION_INHERIT { ViewShare = 1, ViewUnmap = 2 } SECTION_INHERIT;
typedef NTSTATUS(NTAPI *MDBX_NtExtendSection)(IN HANDLE SectionHandle, typedef NTSTATUS(NTAPI *MDBX_NtExtendSection)(IN HANDLE SectionHandle, IN PLARGE_INTEGER NewSectionSize);
IN PLARGE_INTEGER NewSectionSize);
typedef LSTATUS(WINAPI *MDBX_RegGetValueA)(HKEY hkey, LPCSTR lpSubKey, typedef LSTATUS(WINAPI *MDBX_RegGetValueA)(HKEY hkey, LPCSTR lpSubKey, LPCSTR lpValue, DWORD dwFlags, LPDWORD pdwType,
LPCSTR lpValue, DWORD dwFlags, PVOID pvData, LPDWORD pcbData);
LPDWORD pdwType, PVOID pvData,
LPDWORD pcbData);
typedef long(WINAPI *MDBX_CoCreateGuid)(bin128_t *guid); typedef long(WINAPI *MDBX_CoCreateGuid)(bin128_t *guid);
NTSYSAPI ULONG RtlRandomEx(PULONG Seed); NTSYSAPI ULONG RtlRandomEx(PULONG Seed);
typedef BOOL(WINAPI *MDBX_SetFileIoOverlappedRange)(HANDLE FileHandle, typedef BOOL(WINAPI *MDBX_SetFileIoOverlappedRange)(HANDLE FileHandle, PUCHAR OverlappedRangeStart, ULONG Length);
PUCHAR OverlappedRangeStart,
ULONG Length);
struct libmdbx_imports { struct libmdbx_imports {
osal_srwlock_t_function srwl_Init; osal_srwlock_t_function srwl_Init;

View File

@ -5,16 +5,14 @@
class testcase_append : public testcase { class testcase_append : public testcase {
public: public:
testcase_append(const actor_config &config, const mdbx_pid_t pid) testcase_append(const actor_config &config, const mdbx_pid_t pid) : testcase(config, pid) {}
: testcase(config, pid) {}
bool run() override; bool run() override;
static bool review_params(actor_params &params, unsigned space_id) { static bool review_params(actor_params &params, unsigned space_id) {
if (!testcase::review_params(params, space_id)) if (!testcase::review_params(params, space_id))
return false; return false;
const bool ordered = !flipcoin_x3(); const bool ordered = !flipcoin_x3();
log_notice("the '%s' key-generation mode is selected", log_notice("the '%s' key-generation mode is selected", ordered ? "ordered/linear" : "unordered/non-linear");
ordered ? "ordered/linear" : "unordered/non-linear");
if (ordered && !params.make_keygen_linear()) if (ordered && !params.make_keygen_linear())
return false; return false;
return true; return true;
@ -37,13 +35,10 @@ bool testcase_append::run() {
keyvalue_maker.setup(config.params, 0 /* thread_number */); keyvalue_maker.setup(config.params, 0 /* thread_number */);
/* LY: тест наполнения таблиц в append-режиме, /* LY: тест наполнения таблиц в append-режиме,
* при котором записи добавляются строго в конец (в порядке сортировки) */ * при котором записи добавляются строго в конец (в порядке сортировки) */
const MDBX_put_flags_t flags = const MDBX_put_flags_t flags = reverse ? ((config.params.table_flags & MDBX_DUPSORT) ? MDBX_UPSERT : MDBX_NOOVERWRITE)
reverse : ((config.params.table_flags & MDBX_DUPSORT)
? ((config.params.table_flags & MDBX_DUPSORT) ? MDBX_UPSERT ? (flipcoin() ? MDBX_APPEND | MDBX_APPENDDUP : MDBX_APPENDDUP)
: MDBX_NOOVERWRITE) : MDBX_APPEND);
: ((config.params.table_flags & MDBX_DUPSORT)
? (flipcoin() ? MDBX_APPEND | MDBX_APPENDDUP : MDBX_APPENDDUP)
: MDBX_APPEND);
key = keygen::alloc(config.params.keylen_max); key = keygen::alloc(config.params.keylen_max);
data = keygen::alloc(config.params.datalen_max); data = keygen::alloc(config.params.datalen_max);
@ -59,11 +54,9 @@ bool testcase_append::run() {
simple_checksum committed_inserted_checksum = inserted_checksum; simple_checksum committed_inserted_checksum = inserted_checksum;
while (should_continue()) { while (should_continue()) {
const keygen::serial_t serial = serial_count; const keygen::serial_t serial = serial_count;
const bool turn_key = (config.params.table_flags & MDBX_DUPSORT) == 0 || const bool turn_key = (config.params.table_flags & MDBX_DUPSORT) == 0 || flipcoin_n(config.params.keygen.split);
flipcoin_n(config.params.keygen.split); if (turn_key ? !keyvalue_maker.increment_key_part(serial_count, reverse ? -1 : 1)
if (turn_key : !keyvalue_maker.increment(serial_count, reverse ? -1 : 1)) {
? !keyvalue_maker.increment_key_part(serial_count, reverse ? -1 : 1)
: !keyvalue_maker.increment(serial_count, reverse ? -1 : 1)) {
// дошли до границы пространства ключей // дошли до границы пространства ключей
break; break;
} }
@ -106,8 +99,7 @@ bool testcase_append::run() {
break; break;
case MDBX_APPENDDUP: case MDBX_APPENDDUP:
assert((config.params.table_flags & MDBX_DUPSORT) != 0); assert((config.params.table_flags & MDBX_DUPSORT) != 0);
expect_key_mismatch = expect_key_mismatch = mdbx_cmp(txn_guard.get(), dbi, &key->value, &ge_key) == 0;
mdbx_cmp(txn_guard.get(), dbi, &key->value, &ge_key) == 0;
break; break;
} }
} else if (err == MDBX_NOTFOUND /* all pair are less than */) { } else if (err == MDBX_NOTFOUND /* all pair are less than */) {
@ -152,10 +144,9 @@ bool testcase_append::run() {
const auto insertion_result = speculum.insert(item); const auto insertion_result = speculum.insert(item);
if (!insertion_result.second) { if (!insertion_result.second) {
char dump_key[32], dump_value[32]; char dump_key[32], dump_value[32];
log_error( log_error("speculum.append: unexpected %s {%s, %s}", "MDBX_SUCCESS",
"speculum.append: unexpected %s {%s, %s}", "MDBX_SUCCESS", mdbx_dump_val(&key->value, dump_key, sizeof(dump_key)),
mdbx_dump_val(&key->value, dump_key, sizeof(dump_key)), mdbx_dump_val(&data->value, dump_value, sizeof(dump_value)));
mdbx_dump_val(&data->value, dump_value, sizeof(dump_value)));
return false; return false;
} }
} }
@ -199,8 +190,7 @@ bool testcase_append::run() {
cursor_renew(); cursor_renew();
MDBX_val check_key, check_data; MDBX_val check_key, check_data;
err = mdbx_cursor_get(cursor_guard.get(), &check_key, &check_data, err = mdbx_cursor_get(cursor_guard.get(), &check_key, &check_data, reverse ? MDBX_LAST : MDBX_FIRST);
reverse ? MDBX_LAST : MDBX_FIRST);
if (likely(inserted_number)) { if (likely(inserted_number)) {
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
failure_perror("mdbx_cursor_get(MDBX_FIRST)", err); failure_perror("mdbx_cursor_get(MDBX_FIRST)", err);
@ -213,19 +203,16 @@ bool testcase_append::run() {
read_checksum.push((uint32_t)read_count, check_key); read_checksum.push((uint32_t)read_count, check_key);
read_checksum.push(10639, check_data); read_checksum.push(10639, check_data);
err = mdbx_cursor_get(cursor_guard.get(), &check_key, &check_data, err = mdbx_cursor_get(cursor_guard.get(), &check_key, &check_data, reverse ? MDBX_PREV : MDBX_NEXT);
reverse ? MDBX_PREV : MDBX_NEXT);
} }
if (unlikely(err != MDBX_NOTFOUND)) if (unlikely(err != MDBX_NOTFOUND))
failure_perror("mdbx_cursor_get(MDBX_NEXT) != EOF", err); failure_perror("mdbx_cursor_get(MDBX_NEXT) != EOF", err);
if (unlikely(read_count != inserted_number)) if (unlikely(read_count != inserted_number))
failure("read_count(%" PRIu64 ") != inserted_number(%" PRIu64 ")", failure("read_count(%" PRIu64 ") != inserted_number(%" PRIu64 ")", read_count, inserted_number);
read_count, inserted_number);
if (unlikely(read_checksum.value != inserted_checksum.value) && if (unlikely(read_checksum.value != inserted_checksum.value) && !keyvalue_maker.is_unordered())
!keyvalue_maker.is_unordered())
failure("read_checksum(0x%016" PRIu64 ") " failure("read_checksum(0x%016" PRIu64 ") "
"!= inserted_checksum(0x%016" PRIu64 ")", "!= inserted_checksum(0x%016" PRIu64 ")",
read_checksum.value, inserted_checksum.value); read_checksum.value, inserted_checksum.value);

View File

@ -7,12 +7,12 @@
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(push, 1) #pragma warning(push, 1)
#pragma warning(disable : 4548) /* expression before comma has no effect; \ #pragma warning(disable : 4548) /* expression before comma has no effect; \
expected expression with side - effect */ expected expression with side - effect */
#pragma warning(disable : 4530) /* C++ exception handler used, but unwind \ #pragma warning(disable : 4530) /* C++ exception handler used, but unwind \
semantics are not enabled. Specify /EHsc */ semantics are not enabled. Specify /EHsc */
#pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \ #pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \
mode specified; termination on exception \ mode specified; termination on exception \
is not guaranteed. Specify /EHsc */ is not guaranteed. Specify /EHsc */
#endif /* _MSC_VER (warnings) */ #endif /* _MSC_VER (warnings) */
@ -71,24 +71,22 @@
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(pop) #pragma warning(pop)
#pragma warning(disable : 4201) /* nonstandard extension used: nameless \ #pragma warning(disable : 4201) /* nonstandard extension used: nameless \
struct/union */ struct/union */
#pragma warning(disable : 4127) /* conditional expression is constant */ #pragma warning(disable : 4127) /* conditional expression is constant */
#if _MSC_VER < 1900 #if _MSC_VER < 1900
#pragma warning(disable : 4510) /* default constructor could \ #pragma warning(disable : 4510) /* default constructor could \
not be generated */ not be generated */
#pragma warning(disable : 4512) /* assignment operator could \ #pragma warning(disable : 4512) /* assignment operator could \
not be generated */ not be generated */
#pragma warning(disable : 4610) /* user-defined constructor required */ #pragma warning(disable : 4610) /* user-defined constructor required */
#ifndef snprintf #ifndef snprintf
#define snprintf(buffer, buffer_size, format, ...) \ #define snprintf(buffer, buffer_size, format, ...) _snprintf_s(buffer, buffer_size, _TRUNCATE, format, __VA_ARGS__)
_snprintf_s(buffer, buffer_size, _TRUNCATE, format, __VA_ARGS__)
#endif #endif
#ifndef vsnprintf #ifndef vsnprintf
#define vsnprintf(buffer, buffer_size, format, args) \ #define vsnprintf(buffer, buffer_size, format, args) _vsnprintf_s(buffer, buffer_size, _TRUNCATE, format, args)
_vsnprintf_s(buffer, buffer_size, _TRUNCATE, format, args)
#endif #endif
#pragma warning(disable : 4996) /* 'vsnprintf': This function or variable \ #pragma warning(disable : 4996) /* 'vsnprintf': This function or variable \
may be unsafe */ may be unsafe */
#endif #endif
#endif /* _MSC_VER */ #endif /* _MSC_VER */

View File

@ -14,9 +14,7 @@ bool registry::add(const record *item) {
auto const singleton = instance(); auto const singleton = instance();
assert(singleton->name2id.count(std::string(item->name)) == 0); assert(singleton->name2id.count(std::string(item->name)) == 0);
assert(singleton->id2record.count(item->id) == 0); assert(singleton->id2record.count(item->id) == 0);
if (singleton->name2id.count(std::string(item->name)) + if (singleton->name2id.count(std::string(item->name)) + singleton->id2record.count(item->id) == 0) {
singleton->id2record.count(item->id) ==
0) {
singleton->name2id[std::string(item->name)] = item; singleton->name2id[std::string(item->name)] = item;
singleton->id2record[item->id] = item; singleton->id2record[item->id] = item;
return true; return true;
@ -24,28 +22,24 @@ bool registry::add(const record *item) {
return false; return false;
} }
testcase *registry::create_actor(const actor_config &config, testcase *registry::create_actor(const actor_config &config, const mdbx_pid_t pid) {
const mdbx_pid_t pid) {
return instance()->id2record.at(config.testcase)->constructor(config, pid); return instance()->id2record.at(config.testcase)->constructor(config, pid);
} }
bool registry::review_actor_params(const actor_testcase id, bool registry::review_actor_params(const actor_testcase id, actor_params &params, const unsigned space_id) {
actor_params &params,
const unsigned space_id) {
return instance()->id2record.at(id)->review_params(params, space_id); return instance()->id2record.at(id)->review_params(params, space_id);
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void configure_actor(unsigned &last_space_id, const actor_testcase testcase, void configure_actor(unsigned &last_space_id, const actor_testcase testcase, const char *space_id_cstr,
const char *space_id_cstr, actor_params params) { actor_params params) {
unsigned wait4id = 0; unsigned wait4id = 0;
if (params.waitfor_nops) { if (params.waitfor_nops) {
for (auto i = global::actors.rbegin(); i != global::actors.rend(); ++i) { for (auto i = global::actors.rbegin(); i != global::actors.rend(); ++i) {
if (i->is_waitable(params.waitfor_nops)) { if (i->is_waitable(params.waitfor_nops)) {
if (i->signal_nops && i->signal_nops != params.waitfor_nops) if (i->signal_nops && i->signal_nops != params.waitfor_nops)
failure("Previous waitable actor (id=%u) already linked on %u-ops\n", failure("Previous waitable actor (id=%u) already linked on %u-ops\n", i->actor_id, i->signal_nops);
i->actor_id, i->signal_nops);
wait4id = i->actor_id; wait4id = i->actor_id;
i->signal_nops = params.waitfor_nops; i->signal_nops = params.waitfor_nops;
break; break;
@ -75,15 +69,12 @@ void configure_actor(unsigned &last_space_id, const actor_testcase testcase,
failure("Actor config-review failed for space-id %lu\n", space_id); failure("Actor config-review failed for space-id %lu\n", space_id);
last_space_id = unsigned(space_id); last_space_id = unsigned(space_id);
log_trace("configure_actor: space %lu for %s", space_id, log_trace("configure_actor: space %lu for %s", space_id, testcase2str(testcase));
testcase2str(testcase)); global::actors.emplace_back(actor_config(testcase, params, unsigned(space_id), wait4id));
global::actors.emplace_back(
actor_config(testcase, params, unsigned(space_id), wait4id));
global::databases.insert(params.pathname_db); global::databases.insert(params.pathname_db);
} }
void testcase_setup(const char *casename, const actor_params &params, void testcase_setup(const char *casename, const actor_params &params, unsigned &last_space_id) {
unsigned &last_space_id) {
if (strcmp(casename, "basic") == 0) { if (strcmp(casename, "basic") == 0) {
log_notice(">>> testcase_setup(%s)", casename); log_notice(">>> testcase_setup(%s)", casename);
configure_actor(last_space_id, ac_nested, nullptr, params); configure_actor(last_space_id, ac_nested, nullptr, params);
@ -111,8 +102,7 @@ void keycase_setup(const char *casename, actor_params &params) {
params.keygen.keycase = kc_random; params.keygen.keycase = kc_random;
// TODO // TODO
log_notice("<<< keycase_setup(%s): done", casename); log_notice("<<< keycase_setup(%s): done", casename);
} else if (strcmp(casename, "dashes") == 0 || } else if (strcmp(casename, "dashes") == 0 || strcmp(casename, "aside") == 0) {
strcmp(casename, "aside") == 0) {
log_notice(">>> keycase_setup(%s)", casename); log_notice(">>> keycase_setup(%s)", casename);
params.keygen.keycase = kc_dashes; params.keygen.keycase = kc_dashes;
// TODO // TODO

View File

@ -18,9 +18,7 @@ uint32_t ns2fractional(uint32_t ns) {
return uint32_t((uint64_t(ns) << 32) / NSEC_PER_SEC); return uint32_t((uint64_t(ns) << 32) / NSEC_PER_SEC);
} }
uint32_t fractional2ns(uint32_t fractional) { uint32_t fractional2ns(uint32_t fractional) { return uint32_t((fractional * uint64_t(NSEC_PER_SEC)) >> 32); }
return uint32_t((fractional * uint64_t(NSEC_PER_SEC)) >> 32);
}
#ifndef USEC_PER_SEC #ifndef USEC_PER_SEC
#define USEC_PER_SEC 1000000u #define USEC_PER_SEC 1000000u
@ -51,33 +49,27 @@ uint32_t ms2fractional(uint32_t ms) {
return uint32_t((uint64_t(ms) << 32) / MSEC_PER_SEC); return uint32_t((uint64_t(ms) << 32) / MSEC_PER_SEC);
} }
uint32_t fractional2ms(uint32_t fractional) { uint32_t fractional2ms(uint32_t fractional) { return uint32_t((fractional * uint64_t(MSEC_PER_SEC)) >> 32); }
return uint32_t((fractional * uint64_t(MSEC_PER_SEC)) >> 32);
}
time from_ns(uint64_t ns) { time from_ns(uint64_t ns) {
time result; time result;
result.fixedpoint = result.fixedpoint = ((ns / NSEC_PER_SEC) << 32) | ns2fractional(uint32_t(ns % NSEC_PER_SEC));
((ns / NSEC_PER_SEC) << 32) | ns2fractional(uint32_t(ns % NSEC_PER_SEC));
return result; return result;
} }
time from_us(uint64_t us) { time from_us(uint64_t us) {
time result; time result;
result.fixedpoint = result.fixedpoint = ((us / USEC_PER_SEC) << 32) | us2fractional(uint32_t(us % USEC_PER_SEC));
((us / USEC_PER_SEC) << 32) | us2fractional(uint32_t(us % USEC_PER_SEC));
return result; return result;
} }
time from_ms(uint64_t ms) { time from_ms(uint64_t ms) {
time result; time result;
result.fixedpoint = result.fixedpoint = ((ms / MSEC_PER_SEC) << 32) | ms2fractional(uint32_t(ms % MSEC_PER_SEC));
((ms / MSEC_PER_SEC) << 32) | ms2fractional(uint32_t(ms % MSEC_PER_SEC));
return result; return result;
} }
#if __GNUC_PREREQ(8, 0) && \ #if __GNUC_PREREQ(8, 0) && (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__))
(defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__))
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-function-type" #pragma GCC diagnostic ignored "-Wcast-function-type"
#endif /* GCC/MINGW */ #endif /* GCC/MINGW */
@ -88,16 +80,14 @@ time now_realtime() {
if (unlikely(!query_time)) { if (unlikely(!query_time)) {
HMODULE hModule = GetModuleHandle(TEXT("kernel32.dll")); HMODULE hModule = GetModuleHandle(TEXT("kernel32.dll"));
if (hModule) if (hModule)
query_time = (void(WINAPI *)(LPFILETIME))GetProcAddress( query_time = (void(WINAPI *)(LPFILETIME))GetProcAddress(hModule, "GetSystemTimePreciseAsFileTime");
hModule, "GetSystemTimePreciseAsFileTime");
if (!query_time) if (!query_time)
query_time = GetSystemTimeAsFileTime; query_time = GetSystemTimeAsFileTime;
} }
FILETIME filetime; FILETIME filetime;
query_time(&filetime); query_time(&filetime);
uint64_t ns100 = uint64_t ns100 = (uint64_t)filetime.dwHighDateTime << 32 | filetime.dwLowDateTime;
(uint64_t)filetime.dwHighDateTime << 32 | filetime.dwLowDateTime;
return from_ns((ns100 - UINT64_C(116444736000000000)) * 100u); return from_ns((ns100 - UINT64_C(116444736000000000)) * 100u);
#else #else
struct timespec ts; struct timespec ts;
@ -115,8 +105,7 @@ time now_monotonic() {
if (reciprocal == 0) { if (reciprocal == 0) {
if (!QueryPerformanceFrequency(&Frequency)) if (!QueryPerformanceFrequency(&Frequency))
failure_perror("QueryPerformanceFrequency()", GetLastError()); failure_perror("QueryPerformanceFrequency()", GetLastError());
reciprocal = (((UINT64_C(1) << 48) + Frequency.QuadPart / 2 + 1) / reciprocal = (((UINT64_C(1) << 48) + Frequency.QuadPart / 2 + 1) / Frequency.QuadPart);
Frequency.QuadPart);
assert(reciprocal); assert(reciprocal);
} }
@ -138,8 +127,7 @@ time now_monotonic() {
#endif #endif
} }
#if __GNUC_PREREQ(8, 0) && \ #if __GNUC_PREREQ(8, 0) && (defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__))
(defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__))
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif /* GCC/MINGW */ #endif /* GCC/MINGW */

View File

@ -63,12 +63,10 @@ inline time infinite() {
return result; return result;
} }
#if defined(HAVE_TIMESPEC_TV_NSEC) || defined(__timespec_defined) || \ #if defined(HAVE_TIMESPEC_TV_NSEC) || defined(__timespec_defined) || defined(CLOCK_REALTIME)
defined(CLOCK_REALTIME)
inline time from_timespec(const struct timespec &ts) { inline time from_timespec(const struct timespec &ts) {
time result; time result;
result.fixedpoint = result.fixedpoint = ((uint64_t)ts.tv_sec << 32) | ns2fractional((uint32_t)ts.tv_nsec);
((uint64_t)ts.tv_sec << 32) | ns2fractional((uint32_t)ts.tv_nsec);
return result; return result;
} }
#endif /* HAVE_TIMESPEC_TV_NSEC */ #endif /* HAVE_TIMESPEC_TV_NSEC */
@ -76,8 +74,7 @@ inline time from_timespec(const struct timespec &ts) {
#if defined(HAVE_TIMEVAL_TV_USEC) || defined(_STRUCT_TIMEVAL) #if defined(HAVE_TIMEVAL_TV_USEC) || defined(_STRUCT_TIMEVAL)
inline time from_timeval(const struct timeval &tv) { inline time from_timeval(const struct timeval &tv) {
time result; time result;
result.fixedpoint = result.fixedpoint = ((uint64_t)tv.tv_sec << 32) | us2fractional((uint32_t)tv.tv_usec);
((uint64_t)tv.tv_sec << 32) | us2fractional((uint32_t)tv.tv_usec);
return result; return result;
} }
#endif /* HAVE_TIMEVAL_TV_USEC */ #endif /* HAVE_TIMEVAL_TV_USEC */

View File

@ -9,8 +9,8 @@
namespace config { namespace config {
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, const char **value,
const char **value, const char *default_value) { const char *default_value) {
assert(narg < argc); assert(narg < argc);
const char *current = argv[narg]; const char *current = argv[narg];
const size_t optlen = strlen(option); const size_t optlen = strlen(option);
@ -49,14 +49,11 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
failure("No value given for '--%s' option\n", option); failure("No value given for '--%s' option\n", option);
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, std::string &value, bool allow_empty) {
std::string &value, bool allow_empty) { return parse_option(argc, argv, narg, option, value, allow_empty, allow_empty ? "" : nullptr);
return parse_option(argc, argv, narg, option, value, allow_empty,
allow_empty ? "" : nullptr);
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, std::string &value, bool allow_empty,
std::string &value, bool allow_empty,
const char *default_value) { const char *default_value) {
const char *value_cstr; const char *value_cstr;
if (!parse_option(argc, argv, narg, option, &value_cstr, default_value)) if (!parse_option(argc, argv, narg, option, &value_cstr, default_value))
@ -70,8 +67,7 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
} }
template <> template <>
bool parse_option<unsigned>(int argc, char *const argv[], int &narg, bool parse_option<unsigned>(int argc, char *const argv[], int &narg, const char *option, unsigned &mask,
const char *option, unsigned &mask,
const option_verb *verbs) { const option_verb *verbs) {
const char *list; const char *list;
if (!parse_option(argc, argv, narg, option, &list)) if (!parse_option(argc, argv, narg, option, &list))
@ -95,8 +91,7 @@ bool parse_option<unsigned>(int argc, char *const argv[], int &narg,
while (true) { while (true) {
if (!scan->verb) if (!scan->verb)
failure("Unknown verb '%.*s', for option '--%s'\n", (int)len, list, failure("Unknown verb '%.*s', for option '--%s'\n", (int)len, list, option);
option);
if (strlen(scan->verb) == len && strncmp(list, scan->verb, len) == 0) { if (strlen(scan->verb) == len && strncmp(list, scan->verb, len) == 0) {
mask = strikethrough ? mask & ~scan->mask : mask | scan->mask; mask = strikethrough ? mask & ~scan->mask : mask | scan->mask;
clear = strikethrough ? clear & ~scan->mask : clear | scan->mask; clear = strikethrough ? clear & ~scan->mask : clear | scan->mask;
@ -110,10 +105,8 @@ bool parse_option<unsigned>(int argc, char *const argv[], int &narg,
return true; return true;
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, uint64_t &value, const scale_mode scale,
uint64_t &value, const scale_mode scale, const uint64_t minval, const uint64_t maxval, const uint64_t default_value) {
const uint64_t minval, const uint64_t maxval,
const uint64_t default_value) {
const char *value_cstr; const char *value_cstr;
if (!parse_option(argc, argv, narg, option, &value_cstr)) if (!parse_option(argc, argv, narg, option, &value_cstr))
@ -134,17 +127,13 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
return true; return true;
} }
if (strcmp(value_cstr, "rnd") == 0 || strcmp(value_cstr, "rand") == 0 || if (strcmp(value_cstr, "rnd") == 0 || strcmp(value_cstr, "rand") == 0 || strcmp(value_cstr, "random") == 0) {
strcmp(value_cstr, "random") == 0) {
value = minval; value = minval;
if (maxval > minval) { if (maxval > minval) {
uint64_t salt = (scale != entropy) uint64_t salt = (scale != entropy) ? prng64() ^ UINT64_C(44263400549519813)
? prng64() ^ UINT64_C(44263400549519813) : (chrono::now_monotonic().fixedpoint ^ UINT64_C(0xD85794512ED321FD)) *
: (chrono::now_monotonic().fixedpoint ^ UINT64_C(0x9120038359EAF3) ^
UINT64_C(0xD85794512ED321FD)) * chrono::now_realtime().fixedpoint * UINT64_C(0x2FE5232BDC8E5F);
UINT64_C(0x9120038359EAF3) ^
chrono::now_realtime().fixedpoint *
UINT64_C(0x2FE5232BDC8E5F);
value += salt % (maxval - minval); value += salt % (maxval - minval);
} }
if (scale == intkey) if (scale == intkey)
@ -161,43 +150,32 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
raw = strtoull(value_cstr, &suffix, 10); raw = strtoull(value_cstr, &suffix, 10);
} }
if (errno) if (errno)
failure("Option '--%s' expects a numeric value (%s)\n", option, failure("Option '--%s' expects a numeric value (%s)\n", option, test_strerror(errno));
test_strerror(errno));
uint64_t multiplier = 1; uint64_t multiplier = 1;
if (suffix && *suffix) { if (suffix && *suffix) {
if (scale == no_scale || scale == intkey) if (scale == no_scale || scale == intkey)
failure("Option '--%s' doesn't accepts suffixes, so '%s' is unexpected\n", failure("Option '--%s' doesn't accepts suffixes, so '%s' is unexpected\n", option, suffix);
option, suffix);
if (strcmp(suffix, "K") == 0 || strcasecmp(suffix, "Kilo") == 0) if (strcmp(suffix, "K") == 0 || strcasecmp(suffix, "Kilo") == 0)
multiplier = (scale == decimal) ? UINT64_C(1000) : UINT64_C(1024); multiplier = (scale == decimal) ? UINT64_C(1000) : UINT64_C(1024);
else if (strcmp(suffix, "M") == 0 || strcasecmp(suffix, "Mega") == 0) else if (strcmp(suffix, "M") == 0 || strcasecmp(suffix, "Mega") == 0)
multiplier = multiplier = (scale == decimal) ? UINT64_C(1000) * 1000 : UINT64_C(1024) * 1024;
(scale == decimal) ? UINT64_C(1000) * 1000 : UINT64_C(1024) * 1024;
else if (strcmp(suffix, "G") == 0 || strcasecmp(suffix, "Giga") == 0) else if (strcmp(suffix, "G") == 0 || strcasecmp(suffix, "Giga") == 0)
multiplier = (scale == decimal) ? UINT64_C(1000) * 1000 * 1000 multiplier = (scale == decimal) ? UINT64_C(1000) * 1000 * 1000 : UINT64_C(1024) * 1024 * 1024;
: UINT64_C(1024) * 1024 * 1024;
else if (strcmp(suffix, "T") == 0 || strcasecmp(suffix, "Tera") == 0) else if (strcmp(suffix, "T") == 0 || strcasecmp(suffix, "Tera") == 0)
multiplier = (scale == decimal) ? UINT64_C(1000) * 1000 * 1000 * 1000 multiplier = (scale == decimal) ? UINT64_C(1000) * 1000 * 1000 * 1000 : UINT64_C(1024) * 1024 * 1024 * 1024;
: UINT64_C(1024) * 1024 * 1024 * 1024; else if (scale == duration && (strcmp(suffix, "s") == 0 || strcasecmp(suffix, "Seconds") == 0))
else if (scale == duration &&
(strcmp(suffix, "s") == 0 || strcasecmp(suffix, "Seconds") == 0))
multiplier = 1; multiplier = 1;
else if (scale == duration && else if (scale == duration && (strcmp(suffix, "m") == 0 || strcasecmp(suffix, "Minutes") == 0))
(strcmp(suffix, "m") == 0 || strcasecmp(suffix, "Minutes") == 0))
multiplier = 60; multiplier = 60;
else if (scale == duration && else if (scale == duration && (strcmp(suffix, "h") == 0 || strcasecmp(suffix, "Hours") == 0))
(strcmp(suffix, "h") == 0 || strcasecmp(suffix, "Hours") == 0))
multiplier = 3600; multiplier = 3600;
else if (scale == duration && else if (scale == duration && (strcmp(suffix, "d") == 0 || strcasecmp(suffix, "Days") == 0))
(strcmp(suffix, "d") == 0 || strcasecmp(suffix, "Days") == 0))
multiplier = 3600 * 24; multiplier = 3600 * 24;
else else
failure( failure("Option '--%s' expects a numeric value with Kilo/Mega/Giga/Tera %s"
"Option '--%s' expects a numeric value with Kilo/Mega/Giga/Tera %s" "suffixes, but '%s' is unexpected\n",
"suffixes, but '%s' is unexpected\n", option, (scale == duration) ? "or Seconds/Minutes/Hours/Days " : "", suffix);
option, (scale == duration) ? "or Seconds/Minutes/Hours/Days " : "",
suffix);
} }
if (raw >= UINT64_MAX / multiplier) if (raw >= UINT64_MAX / multiplier)
@ -205,47 +183,38 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
value = raw * multiplier; value = raw * multiplier;
if (maxval && value > maxval) if (maxval && value > maxval)
failure("The maximal value for option '--%s' is %" PRIu64 "\n", option, failure("The maximal value for option '--%s' is %" PRIu64 "\n", option, maxval);
maxval);
if (value < minval) if (value < minval)
failure("The minimal value for option '--%s' is %" PRIu64 "\n", option, failure("The minimal value for option '--%s' is %" PRIu64 "\n", option, minval);
minval);
if (scale == intkey) if (scale == intkey)
value &= ~3u; value &= ~3u;
return true; return true;
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, unsigned &value, const scale_mode scale,
unsigned &value, const scale_mode scale, const unsigned minval, const unsigned maxval, const unsigned default_value) {
const unsigned minval, const unsigned maxval,
const unsigned default_value) {
uint64_t huge; uint64_t huge;
if (!parse_option(argc, argv, narg, option, huge, scale, minval, maxval, if (!parse_option(argc, argv, narg, option, huge, scale, minval, maxval, default_value))
default_value))
return false; return false;
value = unsigned(huge); value = unsigned(huge);
return true; return true;
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, uint8_t &value, const uint8_t minval,
uint8_t &value, const uint8_t minval, const uint8_t maxval, const uint8_t maxval, const uint8_t default_value) {
const uint8_t default_value) {
uint64_t huge; uint64_t huge;
if (!parse_option(argc, argv, narg, option, huge, no_scale, minval, maxval, if (!parse_option(argc, argv, narg, option, huge, no_scale, minval, maxval, default_value))
default_value))
return false; return false;
value = uint8_t(huge); value = uint8_t(huge);
return true; return true;
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, int64_t &value, const int64_t minval,
int64_t &value, const int64_t minval, const int64_t maxval, const int64_t maxval, const int64_t default_value) {
const int64_t default_value) {
uint64_t proxy = uint64_t(value); uint64_t proxy = uint64_t(value);
if (parse_option(argc, argv, narg, option, proxy, config::binary, if (parse_option(argc, argv, narg, option, proxy, config::binary, uint64_t(minval), uint64_t(maxval),
uint64_t(minval), uint64_t(maxval),
uint64_t(default_value))) { uint64_t(default_value))) {
value = int64_t(proxy); value = int64_t(proxy);
return true; return true;
@ -253,12 +222,10 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
return false; return false;
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, int32_t &value, const int32_t minval,
int32_t &value, const int32_t minval, const int32_t maxval, const int32_t maxval, const int32_t default_value) {
const int32_t default_value) {
uint64_t proxy = uint64_t(value); uint64_t proxy = uint64_t(value);
if (parse_option(argc, argv, narg, option, proxy, config::binary, if (parse_option(argc, argv, narg, option, proxy, config::binary, uint64_t(minval), uint64_t(maxval),
uint64_t(minval), uint64_t(maxval),
uint64_t(default_value))) { uint64_t(default_value))) {
value = int32_t(proxy); value = int32_t(proxy);
return true; return true;
@ -266,14 +233,12 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
return false; return false;
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, logging::loglevel &loglevel) {
logging::loglevel &loglevel) {
const char *value_cstr; const char *value_cstr;
if (!parse_option(argc, argv, narg, option, &value_cstr)) if (!parse_option(argc, argv, narg, option, &value_cstr))
return false; return false;
if (strcmp(value_cstr, "min") == 0 || strcmp(value_cstr, "minimal") == 0 || if (strcmp(value_cstr, "min") == 0 || strcmp(value_cstr, "minimal") == 0 || strcmp(value_cstr, "fatal") == 0) {
strcmp(value_cstr, "fatal") == 0) {
loglevel = logging::failure; loglevel = logging::failure;
return true; return true;
} }
@ -308,8 +273,7 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
return true; return true;
} }
if (strcmp(value_cstr, "max") == 0 || strcmp(value_cstr, "maximal") == 0 || if (strcmp(value_cstr, "max") == 0 || strcmp(value_cstr, "maximal") == 0 || strcmp(value_cstr, "extra") == 0) {
strcmp(value_cstr, "extra") == 0) {
loglevel = logging::extra; loglevel = logging::extra;
return true; return true;
} }
@ -329,8 +293,7 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
failure("Unknown log-level '%s', for option '--%s'\n", value_cstr, option); failure("Unknown log-level '%s', for option '--%s'\n", value_cstr, option);
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool &value) {
bool &value) {
const char *value_cstr = nullptr; const char *value_cstr = nullptr;
if (!parse_option(argc, argv, narg, option, &value_cstr, "yes")) { if (!parse_option(argc, argv, narg, option, &value_cstr, "yes")) {
const char *current = argv[narg]; const char *current = argv[narg];
@ -338,8 +301,7 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
value = false; value = false;
return true; return true;
} }
if (strncmp(current, "--dont-", 7) == 0 && if (strncmp(current, "--dont-", 7) == 0 && strcmp(current + 7, option) == 0) {
strcmp(current + 7, option) == 0) {
value = false; value = false;
return true; return true;
} }
@ -361,41 +323,36 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
return true; return true;
} }
failure( failure("Option '--%s' expects a 'boolean' value Yes/No, so '%s' is unexpected\n", option, value_cstr);
"Option '--%s' expects a 'boolean' value Yes/No, so '%s' is unexpected\n",
option, value_cstr);
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
const struct option_verb mode_bits[] = { const struct option_verb mode_bits[] = {{"rdonly", unsigned(MDBX_RDONLY)},
{"rdonly", unsigned(MDBX_RDONLY)}, {"nosync-utterly", unsigned(MDBX_UTTERLY_NOSYNC)},
{"nosync-utterly", unsigned(MDBX_UTTERLY_NOSYNC)}, {"nosubdir", unsigned(MDBX_NOSUBDIR)},
{"nosubdir", unsigned(MDBX_NOSUBDIR)}, {"nosync-safe", unsigned(MDBX_SAFE_NOSYNC)},
{"nosync-safe", unsigned(MDBX_SAFE_NOSYNC)}, {"nometasync", unsigned(MDBX_NOMETASYNC)},
{"nometasync", unsigned(MDBX_NOMETASYNC)}, {"writemap", unsigned(MDBX_WRITEMAP)},
{"writemap", unsigned(MDBX_WRITEMAP)}, {"nostickythreads", unsigned(MDBX_NOSTICKYTHREADS)},
{"nostickythreads", unsigned(MDBX_NOSTICKYTHREADS)}, {"no-sticky-threads", unsigned(MDBX_NOSTICKYTHREADS)},
{"no-sticky-threads", unsigned(MDBX_NOSTICKYTHREADS)}, {"nordahead", unsigned(MDBX_NORDAHEAD)},
{"nordahead", unsigned(MDBX_NORDAHEAD)}, {"nomeminit", unsigned(MDBX_NOMEMINIT)},
{"nomeminit", unsigned(MDBX_NOMEMINIT)}, {"lifo", unsigned(MDBX_LIFORECLAIM)},
{"lifo", unsigned(MDBX_LIFORECLAIM)}, {"perturb", unsigned(MDBX_PAGEPERTURB)},
{"perturb", unsigned(MDBX_PAGEPERTURB)}, {"accede", unsigned(MDBX_ACCEDE)},
{"accede", unsigned(MDBX_ACCEDE)}, {"exclusive", unsigned(MDBX_EXCLUSIVE)},
{"exclusive", unsigned(MDBX_EXCLUSIVE)}, {nullptr, 0}};
{nullptr, 0}};
const struct option_verb table_bits[] = { const struct option_verb table_bits[] = {{"key.reverse", unsigned(MDBX_REVERSEKEY)},
{"key.reverse", unsigned(MDBX_REVERSEKEY)}, {"key.integer", unsigned(MDBX_INTEGERKEY)},
{"key.integer", unsigned(MDBX_INTEGERKEY)}, {"data.integer", unsigned(MDBX_INTEGERDUP | MDBX_DUPFIXED | MDBX_DUPSORT)},
{"data.integer", unsigned(MDBX_INTEGERDUP | MDBX_DUPFIXED | MDBX_DUPSORT)}, {"data.fixed", unsigned(MDBX_DUPFIXED | MDBX_DUPSORT)},
{"data.fixed", unsigned(MDBX_DUPFIXED | MDBX_DUPSORT)}, {"data.reverse", unsigned(MDBX_REVERSEDUP | MDBX_DUPSORT)},
{"data.reverse", unsigned(MDBX_REVERSEDUP | MDBX_DUPSORT)}, {"data.dups", unsigned(MDBX_DUPSORT)},
{"data.dups", unsigned(MDBX_DUPSORT)}, {nullptr, 0}};
{nullptr, 0}};
static void dump_verbs(const char *caption, size_t bits, static void dump_verbs(const char *caption, size_t bits, const struct option_verb *verbs) {
const struct option_verb *verbs) {
log_verbose("%s: 0x%" PRIx64 " = ", caption, (uint64_t)bits); log_verbose("%s: 0x%" PRIx64 " = ", caption, (uint64_t)bits);
const char *comma = ""; const char *comma = "";
@ -429,28 +386,21 @@ void dump(const char *title) {
logging::local_suffix indent(title); logging::local_suffix indent(title);
for (auto i = global::actors.begin(); i != global::actors.end(); ++i) { for (auto i = global::actors.begin(); i != global::actors.end(); ++i) {
log_verbose("#%u, testcase %s, space_id/table %u\n", i->actor_id, log_verbose("#%u, testcase %s, space_id/table %u\n", i->actor_id, testcase2str(i->testcase), i->space_id);
testcase2str(i->testcase), i->space_id);
indent.push(); indent.push();
log_verbose("prng-seed: %u\n", i->params.prng_seed); log_verbose("prng-seed: %u\n", i->params.prng_seed);
if (i->params.loglevel) { if (i->params.loglevel) {
log_verbose("log: level %u, %s\n", i->params.loglevel, log_verbose("log: level %u, %s\n", i->params.loglevel,
i->params.pathname_log.empty() i->params.pathname_log.empty() ? "console" : i->params.pathname_log.c_str());
? "console"
: i->params.pathname_log.c_str());
} }
log_verbose("database: %s, size %" PRIuPTR "[%" PRIiPTR "..%" PRIiPTR log_verbose("database: %s, size %" PRIuPTR "[%" PRIiPTR "..%" PRIiPTR ", %i %i, %i]\n",
", %i %i, %i]\n", i->params.pathname_db.c_str(), i->params.size_now, i->params.size_lower, i->params.size_upper,
i->params.pathname_db.c_str(), i->params.size_now, i->params.shrink_threshold, i->params.growth_step, i->params.pagesize);
i->params.size_lower, i->params.size_upper,
i->params.shrink_threshold, i->params.growth_step,
i->params.pagesize);
dump_verbs("mode", i->params.mode_flags, mode_bits); dump_verbs("mode", i->params.mode_flags, mode_bits);
log_verbose("random-writemap: %s\n", log_verbose("random-writemap: %s\n", i->params.random_writemap ? "Yes" : "No");
i->params.random_writemap ? "Yes" : "No");
dump_verbs("table", i->params.table_flags, table_bits); dump_verbs("table", i->params.table_flags, table_bits);
if (i->params.test_nops) if (i->params.test_nops)
@ -465,62 +415,46 @@ void dump(const char *title) {
log_verbose("threads %u\n", i->params.nthreads); log_verbose("threads %u\n", i->params.nthreads);
log_verbose( log_verbose("keygen.params: case %s, width %u, mesh %u, rotate %u, offset %" PRIu64 ", split %u/%u\n",
"keygen.params: case %s, width %u, mesh %u, rotate %u, offset %" PRIu64 keygencase2str(i->params.keygen.keycase), i->params.keygen.width, i->params.keygen.mesh,
", split %u/%u\n", i->params.keygen.rotate, i->params.keygen.offset, i->params.keygen.split,
keygencase2str(i->params.keygen.keycase), i->params.keygen.width, i->params.keygen.width - i->params.keygen.split);
i->params.keygen.mesh, i->params.keygen.rotate, i->params.keygen.offset, log_verbose("keygen.zerofill: %s\n", i->params.keygen.zero_fill ? "Yes" : "No");
i->params.keygen.split, log_verbose("key: minlen %u, maxlen %u\n", i->params.keylen_min, i->params.keylen_max);
i->params.keygen.width - i->params.keygen.split); log_verbose("data: minlen %u, maxlen %u\n", i->params.datalen_min, i->params.datalen_max);
log_verbose("keygen.zerofill: %s\n",
i->params.keygen.zero_fill ? "Yes" : "No");
log_verbose("key: minlen %u, maxlen %u\n", i->params.keylen_min,
i->params.keylen_max);
log_verbose("data: minlen %u, maxlen %u\n", i->params.datalen_min,
i->params.datalen_max);
log_verbose("batch: read %u, write %u\n", i->params.batch_read, log_verbose("batch: read %u, write %u\n", i->params.batch_read, i->params.batch_write);
i->params.batch_write);
if (i->params.waitfor_nops) if (i->params.waitfor_nops)
log_verbose("wait: actor %u for %u ops\n", i->wait4id, log_verbose("wait: actor %u for %u ops\n", i->wait4id, i->params.waitfor_nops);
i->params.waitfor_nops);
else if (i->params.delaystart) else if (i->params.delaystart)
dump_duration("delay", i->params.delaystart); dump_duration("delay", i->params.delaystart);
else else
log_verbose("no-delay\n"); log_verbose("no-delay\n");
if (i->params.inject_writefaultn) if (i->params.inject_writefaultn)
log_verbose("inject-writefault on %u ops\n", log_verbose("inject-writefault on %u ops\n", i->params.inject_writefaultn);
i->params.inject_writefaultn);
else else
log_verbose("no-inject-writefault\n"); log_verbose("no-inject-writefault\n");
log_verbose("limits: readers %u, tables %u, txn-bytes %zu\n", log_verbose("limits: readers %u, tables %u, txn-bytes %zu\n", i->params.max_readers, i->params.max_tables,
i->params.max_readers, i->params.max_tables,
mdbx_limits_txnsize_max(i->params.pagesize)); mdbx_limits_txnsize_max(i->params.pagesize));
log_verbose("drop table: %s\n", i->params.drop_table ? "Yes" : "No"); log_verbose("drop table: %s\n", i->params.drop_table ? "Yes" : "No");
log_verbose("ignore MDBX_MAP_FULL error: %s\n", log_verbose("ignore MDBX_MAP_FULL error: %s\n", i->params.ignore_dbfull ? "Yes" : "No");
i->params.ignore_dbfull ? "Yes" : "No"); log_verbose("verifying by speculum: %s\n", i->params.speculum ? "Yes" : "No");
log_verbose("verifying by speculum: %s\n",
i->params.speculum ? "Yes" : "No");
indent.pop(); indent.pop();
} }
dump_duration("timeout", global::config::timeout_duration_seconds); dump_duration("timeout", global::config::timeout_duration_seconds);
log_verbose("cleanup: before %s, after %s\n", log_verbose("cleanup: before %s, after %s\n", global::config::cleanup_before ? "Yes" : "No",
global::config::cleanup_before ? "Yes" : "No",
global::config::cleanup_after ? "Yes" : "No"); global::config::cleanup_after ? "Yes" : "No");
log_verbose("failfast: %s\n", global::config::failfast ? "Yes" : "No"); log_verbose("failfast: %s\n", global::config::failfast ? "Yes" : "No");
log_verbose("progress indicator: %s\n", log_verbose("progress indicator: %s\n", global::config::progress_indicator ? "Yes" : "No");
global::config::progress_indicator ? "Yes" : "No"); log_verbose("console mode: %s\n", global::config::console_mode ? "Yes" : "No");
log_verbose("console mode: %s\n", log_verbose("geometry jitter: %s\n", global::config::geometry_jitter ? "Yes" : "No");
global::config::console_mode ? "Yes" : "No");
log_verbose("geometry jitter: %s\n",
global::config::geometry_jitter ? "Yes" : "No");
} }
} /* namespace config */ } /* namespace config */
@ -529,11 +463,8 @@ void dump(const char *title) {
using namespace config; using namespace config;
actor_config::actor_config(actor_testcase testcase, const actor_params &params, actor_config::actor_config(actor_testcase testcase, const actor_params &params, unsigned space_id, unsigned wait4id)
unsigned space_id, unsigned wait4id) : actor_config_pod(1 + unsigned(global::actors.size()), testcase, space_id, wait4id), params(params) {}
: actor_config_pod(1 + unsigned(global::actors.size()), testcase, space_id,
wait4id),
params(params) {}
const std::string actor_config::serialize(const char *prefix) const { const std::string actor_config::serialize(const char *prefix) const {
simple_checksum checksum; simple_checksum checksum;
@ -551,25 +482,19 @@ const std::string actor_config::serialize(const char *prefix) const {
result.push_back('|'); result.push_back('|');
#if __cplusplus > 201400 #if __cplusplus > 201400
static_assert(std::is_trivially_copyable<actor_params_pod>::value, static_assert(std::is_trivially_copyable<actor_params_pod>::value, "actor_params_pod should by POD");
"actor_params_pod should by POD");
#else #else
static_assert(std::is_standard_layout<actor_params_pod>::value, static_assert(std::is_standard_layout<actor_params_pod>::value, "actor_params_pod should by POD");
"actor_params_pod should by POD");
#endif #endif
result.append(data2hex(static_cast<const actor_params_pod *>(&params), result.append(data2hex(static_cast<const actor_params_pod *>(&params), sizeof(actor_params_pod), checksum));
sizeof(actor_params_pod), checksum));
result.push_back('|'); result.push_back('|');
#if __cplusplus > 201400 #if __cplusplus > 201400
static_assert(std::is_trivially_copyable<actor_config_pod>::value, static_assert(std::is_trivially_copyable<actor_config_pod>::value, "actor_config_pod should by POD");
"actor_config_pod should by POD");
#else #else
static_assert(std::is_standard_layout<actor_config_pod>::value, static_assert(std::is_standard_layout<actor_config_pod>::value, "actor_config_pod should by POD");
"actor_config_pod should by POD");
#endif #endif
result.append(data2hex(static_cast<const actor_config_pod *>(this), result.append(data2hex(static_cast<const actor_config_pod *>(this), sizeof(actor_config_pod), checksum));
sizeof(actor_config_pod), checksum));
result.push_back('|'); result.push_back('|');
result.push_back(global::config::progress_indicator ? 'Y' : 'N'); result.push_back(global::config::progress_indicator ? 'Y' : 'N');
checksum.push(global::config::progress_indicator); checksum.push(global::config::progress_indicator);
@ -615,16 +540,12 @@ bool actor_config::deserialize(const char *str, actor_config &config) {
return false; return false;
} }
#if __cplusplus > 201400 #if __cplusplus > 201400
static_assert(std::is_trivially_copyable<actor_params_pod>::value, static_assert(std::is_trivially_copyable<actor_params_pod>::value, "actor_params_pod should by POD");
"actor_params_pod should by POD");
#else #else
static_assert(std::is_standard_layout<actor_params_pod>::value, static_assert(std::is_standard_layout<actor_params_pod>::value, "actor_params_pod should by POD");
"actor_params_pod should by POD");
#endif #endif
if (!hex2data(str, slash, static_cast<actor_params_pod *>(&config.params), if (!hex2data(str, slash, static_cast<actor_params_pod *>(&config.params), sizeof(actor_params_pod), checksum)) {
sizeof(actor_params_pod), checksum)) { TRACE("<< actor_config::deserialize: actor_params_pod(%.*s)\n", (int)(slash - str), str);
TRACE("<< actor_config::deserialize: actor_params_pod(%.*s)\n",
(int)(slash - str), str);
return false; return false;
} }
str = slash + 1; str = slash + 1;
@ -635,16 +556,12 @@ bool actor_config::deserialize(const char *str, actor_config &config) {
return false; return false;
} }
#if __cplusplus > 201400 #if __cplusplus > 201400
static_assert(std::is_trivially_copyable<actor_config_pod>::value, static_assert(std::is_trivially_copyable<actor_config_pod>::value, "actor_config_pod should by POD");
"actor_config_pod should by POD");
#else #else
static_assert(std::is_standard_layout<actor_config_pod>::value, static_assert(std::is_standard_layout<actor_config_pod>::value, "actor_config_pod should by POD");
"actor_config_pod should by POD");
#endif #endif
if (!hex2data(str, slash, static_cast<actor_config_pod *>(&config), if (!hex2data(str, slash, static_cast<actor_config_pod *>(&config), sizeof(actor_config_pod), checksum)) {
sizeof(actor_config_pod), checksum)) { TRACE("<< actor_config::deserialize: actor_config_pod(%.*s)\n", (int)(slash - str), str);
TRACE("<< actor_config::deserialize: actor_config_pod(%.*s)\n",
(int)(slash - str), str);
return false; return false;
} }
str = slash + 1; str = slash + 1;
@ -654,8 +571,7 @@ bool actor_config::deserialize(const char *str, actor_config &config) {
TRACE("<< actor_config::deserialize: slash-5\n"); TRACE("<< actor_config::deserialize: slash-5\n");
return false; return false;
} }
if ((str[0] == 'Y' || str[0] == 'N') && (str[1] == 'Y' || str[1] == 'N') && if ((str[0] == 'Y' || str[0] == 'N') && (str[1] == 'Y' || str[1] == 'N') && (str[2] == 'Y' || str[2] == 'N')) {
(str[2] == 'Y' || str[2] == 'N')) {
global::config::progress_indicator = str[0] == 'Y'; global::config::progress_indicator = str[0] == 'Y';
checksum.push(global::config::progress_indicator); checksum.push(global::config::progress_indicator);
global::config::console_mode = str[1] == 'Y'; global::config::console_mode = str[1] == 'Y';
@ -690,21 +606,14 @@ bool actor_config::deserialize(const char *str, actor_config &config) {
return true; return true;
} }
unsigned actor_params::mdbx_keylen_min() const { unsigned actor_params::mdbx_keylen_min() const { return unsigned(mdbx_limits_keysize_min(table_flags)); }
return unsigned(mdbx_limits_keysize_min(table_flags));
}
unsigned actor_params::mdbx_keylen_max() const { unsigned actor_params::mdbx_keylen_max() const { return unsigned(mdbx_limits_keysize_max(pagesize, table_flags)); }
return unsigned(mdbx_limits_keysize_max(pagesize, table_flags));
}
unsigned actor_params::mdbx_datalen_min() const { unsigned actor_params::mdbx_datalen_min() const { return unsigned(mdbx_limits_valsize_min(table_flags)); }
return unsigned(mdbx_limits_valsize_min(table_flags));
}
unsigned actor_params::mdbx_datalen_max() const { unsigned actor_params::mdbx_datalen_max() const {
return std::min(unsigned(UINT16_MAX), return std::min(unsigned(UINT16_MAX), unsigned(mdbx_limits_valsize_max(pagesize, table_flags)));
unsigned(mdbx_limits_valsize_max(pagesize, table_flags)));
} }
bool actor_params::make_keygen_linear() { bool actor_params::make_keygen_linear() {
@ -713,26 +622,18 @@ bool actor_params::make_keygen_linear() {
keygen.rotate = 0; keygen.rotate = 0;
keygen.offset = 0; keygen.offset = 0;
const auto max_serial = serial_mask(keygen.width) + base; const auto max_serial = serial_mask(keygen.width) + base;
const auto max_key_serial = (keygen.split && (table_flags & MDBX_DUPSORT)) const auto max_key_serial = (keygen.split && (table_flags & MDBX_DUPSORT)) ? max_serial >> keygen.split : max_serial;
? max_serial >> keygen.split const auto max_value_serial = (keygen.split && (table_flags & MDBX_DUPSORT)) ? serial_mask(keygen.split) : 0;
: max_serial;
const auto max_value_serial = (keygen.split && (table_flags & MDBX_DUPSORT))
? serial_mask(keygen.split)
: 0;
while (keylen_min < 8 && while (keylen_min < 8 && (keylen_min == 0 || serial_mask(keylen_min * 8) < max_key_serial)) {
(keylen_min == 0 || serial_mask(keylen_min * 8) < max_key_serial)) {
keylen_min += (table_flags & (MDBX_INTEGERKEY | MDBX_INTEGERDUP)) ? 4 : 1; keylen_min += (table_flags & (MDBX_INTEGERKEY | MDBX_INTEGERDUP)) ? 4 : 1;
if (keylen_max < keylen_min) if (keylen_max < keylen_min)
keylen_max = keylen_min; keylen_max = keylen_min;
} }
if (table_flags & MDBX_DUPSORT) if (table_flags & MDBX_DUPSORT)
while ( while (datalen_min < 8 && (datalen_min == 0 || serial_mask(datalen_min * 8) < max_value_serial)) {
datalen_min < 8 && datalen_min += (table_flags & (MDBX_INTEGERKEY | MDBX_INTEGERDUP)) ? 4 : 1;
(datalen_min == 0 || serial_mask(datalen_min * 8) < max_value_serial)) {
datalen_min +=
(table_flags & (MDBX_INTEGERKEY | MDBX_INTEGERDUP)) ? 4 : 1;
if (datalen_max < datalen_min) if (datalen_max < datalen_min)
datalen_max = datalen_min; datalen_max = datalen_min;
} }

View File

@ -54,18 +54,16 @@ namespace config {
enum scale_mode { no_scale, decimal, binary, duration, intkey, entropy }; enum scale_mode { no_scale, decimal, binary, duration, intkey, entropy };
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, const char **value,
const char **value, const char *default_value = nullptr); const char *default_value = nullptr);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, std::string &value,
std::string &value, bool allow_empty = false); bool allow_empty = false);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, std::string &value, bool allow_empty,
std::string &value, bool allow_empty,
const char *default_value); const char *default_value);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool &value);
bool &value);
struct option_verb { struct option_verb {
const char *const verb; const char *const verb;
@ -73,8 +71,7 @@ struct option_verb {
}; };
template <typename MASK> template <typename MASK>
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, MASK &mask, const option_verb *verbs) {
MASK &mask, const option_verb *verbs) {
static_assert(sizeof(MASK) <= sizeof(unsigned), "WTF?"); static_assert(sizeof(MASK) <= sizeof(unsigned), "WTF?");
unsigned u = unsigned(mask); unsigned u = unsigned(mask);
if (parse_option<unsigned>(argc, argv, narg, option, u, verbs)) { if (parse_option<unsigned>(argc, argv, narg, option, u, verbs)) {
@ -85,49 +82,36 @@ bool parse_option(int argc, char *const argv[], int &narg, const char *option,
} }
template <> template <>
bool parse_option<unsigned>(int argc, char *const argv[], int &narg, bool parse_option<unsigned>(int argc, char *const argv[], int &narg, const char *option, unsigned &mask,
const char *option, unsigned &mask,
const option_verb *verbs); const option_verb *verbs);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, uint64_t &value, const scale_mode scale,
uint64_t &value, const scale_mode scale, const uint64_t minval = 0, const uint64_t maxval = INT64_MAX, const uint64_t default_value = 0);
const uint64_t minval = 0, const uint64_t maxval = INT64_MAX,
const uint64_t default_value = 0);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, unsigned &value, const scale_mode scale,
unsigned &value, const scale_mode scale, const unsigned minval = 0, const unsigned maxval = INT32_MAX, const unsigned default_value = 0);
const unsigned minval = 0, const unsigned maxval = INT32_MAX,
const unsigned default_value = 0);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, uint8_t &value, const uint8_t minval = 0,
uint8_t &value, const uint8_t minval = 0,
const uint8_t maxval = 255, const uint8_t default_value = 0); const uint8_t maxval = 255, const uint8_t default_value = 0);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, int64_t &value, const int64_t minval,
int64_t &value, const int64_t minval, const int64_t maxval, const int64_t maxval, const int64_t default_value = -1);
const int64_t default_value = -1);
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, int32_t &value, const int32_t minval,
int32_t &value, const int32_t minval, const int32_t maxval, const int32_t maxval, const int32_t default_value = -1);
const int32_t default_value = -1);
inline bool parse_option_intptr(int argc, char *const argv[], int &narg, inline bool parse_option_intptr(int argc, char *const argv[], int &narg, const char *option, intptr_t &value,
const char *option, intptr_t &value, const intptr_t minval, const intptr_t maxval, const intptr_t default_value = -1) {
const intptr_t minval, const intptr_t maxval,
const intptr_t default_value = -1) {
static_assert(sizeof(intptr_t) == 4 || sizeof(intptr_t) == 8, "WTF?"); static_assert(sizeof(intptr_t) == 4 || sizeof(intptr_t) == 8, "WTF?");
if (sizeof(intptr_t) == 8) if (sizeof(intptr_t) == 8)
return parse_option(argc, argv, narg, option, return parse_option(argc, argv, narg, option, *reinterpret_cast<int64_t *>(&value), int64_t(minval),
*reinterpret_cast<int64_t *>(&value), int64_t(minval),
int64_t(maxval), int64_t(default_value)); int64_t(maxval), int64_t(default_value));
else else
return parse_option(argc, argv, narg, option, return parse_option(argc, argv, narg, option, *reinterpret_cast<int32_t *>(&value), int32_t(minval),
*reinterpret_cast<int32_t *>(&value), int32_t(minval),
int32_t(maxval), int32_t(default_value)); int32_t(maxval), int32_t(default_value));
} }
bool parse_option(int argc, char *const argv[], int &narg, const char *option, bool parse_option(int argc, char *const argv[], int &narg, const char *option, logging::loglevel &);
logging::loglevel &);
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
struct keygen_params_pod { struct keygen_params_pod {
@ -295,10 +279,8 @@ struct actor_config_pod {
unsigned signal_nops{0}; unsigned signal_nops{0};
actor_config_pod() = default; actor_config_pod() = default;
actor_config_pod(unsigned actor_id, actor_testcase testcase, actor_config_pod(unsigned actor_id, actor_testcase testcase, unsigned space_id, unsigned wait4id)
unsigned space_id, unsigned wait4id) : actor_id(actor_id), space_id(space_id), testcase(testcase), wait4id(wait4id) {}
: actor_id(actor_id), space_id(space_id), testcase(testcase),
wait4id(wait4id) {}
}; };
extern const struct option_verb mode_bits[]; extern const struct option_verb mode_bits[];
@ -326,8 +308,7 @@ struct actor_config : public config::actor_config_pod {
bool wanna_event4signalling() const { return true /* TODO ? */; } bool wanna_event4signalling() const { return true /* TODO ? */; }
actor_config() = default; actor_config() = default;
actor_config(actor_testcase testcase, const actor_params &params, actor_config(actor_testcase testcase, const actor_params &params, unsigned space_id, unsigned wait4id);
unsigned space_id, unsigned wait4id);
actor_config(const char *str) : actor_config() { actor_config(const char *str) : actor_config() {
if (!deserialize(str, *this)) if (!deserialize(str, *this))

View File

@ -9,8 +9,7 @@ class testcase_copy : public testcase {
public: public:
testcase_copy(const actor_config &config, const mdbx_pid_t pid) testcase_copy(const actor_config &config, const mdbx_pid_t pid)
: testcase(config, pid), : testcase(config, pid), copy_pathname(config.params.pathname_db + "-copy") {}
copy_pathname(config.params.pathname_db + "-copy") {}
bool run() override; bool run() override;
}; };
REGISTER_TESTCASE(copy); REGISTER_TESTCASE(copy);
@ -21,14 +20,10 @@ void testcase_copy::copy_db(const bool with_compaction) {
failure_perror("osal_removefile()", err); failure_perror("osal_removefile()", err);
if (flipcoin()) { if (flipcoin()) {
err = mdbx_env_copy(db_guard.get(), copy_pathname.c_str(), err = mdbx_env_copy(db_guard.get(), copy_pathname.c_str(), with_compaction ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS);
with_compaction ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS); log_verbose("mdbx_env_copy(%s), err %d", with_compaction ? "true" : "false", err);
log_verbose("mdbx_env_copy(%s), err %d", with_compaction ? "true" : "false",
err);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
failure_perror(with_compaction ? "mdbx_env_copy(MDBX_CP_COMPACT)" failure_perror(with_compaction ? "mdbx_env_copy(MDBX_CP_COMPACT)" : "mdbx_env_copy(MDBX_CP_ASIS)", err);
: "mdbx_env_copy(MDBX_CP_ASIS)",
err);
} else { } else {
do { do {
const bool ro = mode_readonly() || flipcoin(); const bool ro = mode_readonly() || flipcoin();
@ -36,26 +31,20 @@ void testcase_copy::copy_db(const bool with_compaction) {
const bool dynsize = flipcoin(); const bool dynsize = flipcoin();
const bool flush = flipcoin(); const bool flush = flipcoin();
const bool enable_renew = flipcoin(); const bool enable_renew = flipcoin();
const MDBX_copy_flags_t flags = const MDBX_copy_flags_t flags = (with_compaction ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) |
(with_compaction ? MDBX_CP_COMPACT : MDBX_CP_DEFAULTS) | (dynsize ? MDBX_CP_FORCE_DYNAMIC_SIZE : MDBX_CP_DEFAULTS) |
(dynsize ? MDBX_CP_FORCE_DYNAMIC_SIZE : MDBX_CP_DEFAULTS) | (throttle ? MDBX_CP_THROTTLE_MVCC : MDBX_CP_DEFAULTS) |
(throttle ? MDBX_CP_THROTTLE_MVCC : MDBX_CP_DEFAULTS) | (flush ? MDBX_CP_DEFAULTS : MDBX_CP_DONT_FLUSH) |
(flush ? MDBX_CP_DEFAULTS : MDBX_CP_DONT_FLUSH) | (enable_renew ? MDBX_CP_RENEW_TXN : MDBX_CP_DEFAULTS);
(enable_renew ? MDBX_CP_RENEW_TXN : MDBX_CP_DEFAULTS);
txn_begin(ro); txn_begin(ro);
err = err = mdbx_txn_copy2pathname(txn_guard.get(), copy_pathname.c_str(), flags);
mdbx_txn_copy2pathname(txn_guard.get(), copy_pathname.c_str(), flags);
log_verbose("mdbx_txn_copy2pathname(flags=0x%X), err %d", flags, err); log_verbose("mdbx_txn_copy2pathname(flags=0x%X), err %d", flags, err);
txn_end(err != MDBX_SUCCESS || flipcoin()); txn_end(err != MDBX_SUCCESS || flipcoin());
if (unlikely( if (unlikely(err != MDBX_SUCCESS && !(throttle && err == MDBX_OUSTED) &&
err != MDBX_SUCCESS && !(throttle && err == MDBX_OUSTED) && !(!enable_renew && err == MDBX_MVCC_RETARDED) &&
!(!enable_renew && err == MDBX_MVCC_RETARDED) && !(err == MDBX_EINVAL && !ro && (flags & (MDBX_CP_THROTTLE_MVCC | MDBX_CP_RENEW_TXN)) != 0)))
!(err == MDBX_EINVAL && !ro && failure_perror(
(flags & (MDBX_CP_THROTTLE_MVCC | MDBX_CP_RENEW_TXN)) != 0))) with_compaction ? "mdbx_txn_copy2pathname(MDBX_CP_COMPACT)" : "mdbx_txn_copy2pathname(MDBX_CP_ASIS)", err);
failure_perror(with_compaction
? "mdbx_txn_copy2pathname(MDBX_CP_COMPACT)"
: "mdbx_txn_copy2pathname(MDBX_CP_ASIS)",
err);
} while (err != MDBX_SUCCESS); } while (err != MDBX_SUCCESS);
} }
} }

View File

@ -5,8 +5,7 @@
class testcase_deadread : public testcase { class testcase_deadread : public testcase {
public: public:
testcase_deadread(const actor_config &config, const mdbx_pid_t pid) testcase_deadread(const actor_config &config, const mdbx_pid_t pid) : testcase(config, pid) {}
: testcase(config, pid) {}
bool run() override; bool run() override;
}; };
REGISTER_TESTCASE(deadread); REGISTER_TESTCASE(deadread);
@ -24,8 +23,7 @@ bool testcase_deadread::run() {
class testcase_deadwrite : public testcase { class testcase_deadwrite : public testcase {
public: public:
testcase_deadwrite(const actor_config &config, const mdbx_pid_t pid) testcase_deadwrite(const actor_config &config, const mdbx_pid_t pid) : testcase(config, pid) {}
: testcase(config, pid) {}
bool run() override; bool run() override;
}; };

View File

@ -26,8 +26,7 @@ std::string format_va(const char *fmt, va_list ap) {
result.reserve(size_t(needed + 1)); result.reserve(size_t(needed + 1));
result.resize(size_t(needed), '\0'); result.resize(size_t(needed), '\0');
assert(int(result.capacity()) > needed); assert(int(result.capacity()) > needed);
int actual = vsnprintf(const_cast<char *>(result.data()), result.capacity(), int actual = vsnprintf(const_cast<char *>(result.data()), result.capacity(), fmt, ones);
fmt, ones);
assert(actual == needed); assert(actual == needed);
(void)actual; (void)actual;
va_end(ones); va_end(ones);
@ -47,10 +46,8 @@ struct acase {
unsigned vlen_min, vlen_max; unsigned vlen_min, vlen_max;
unsigned dupmax_log2; unsigned dupmax_log2;
acase(unsigned klen_min, unsigned klen_max, unsigned vlen_min, acase(unsigned klen_min, unsigned klen_max, unsigned vlen_min, unsigned vlen_max, unsigned dupmax_log2)
unsigned vlen_max, unsigned dupmax_log2) : klen_min(klen_min), klen_max(klen_max), vlen_min(vlen_min), vlen_max(vlen_max), dupmax_log2(dupmax_log2) {}
: klen_min(klen_min), klen_max(klen_max), vlen_min(vlen_min),
vlen_max(vlen_max), dupmax_log2(dupmax_log2) {}
}; };
// std::random_device rd; // std::random_device rd;
@ -80,33 +77,26 @@ static mdbx::slice mk_val(mdbx::default_buffer &buf, const acase &thecase) {
static std::string name(unsigned n) { return format("Commitment_%05u", n); } static std::string name(unsigned n) { return format("Commitment_%05u", n); }
static mdbx::map_handle create_and_fill(mdbx::txn txn, const acase &thecase, static mdbx::map_handle create_and_fill(mdbx::txn txn, const acase &thecase, const unsigned n) {
const unsigned n) {
auto map = txn.create_map(name(n), auto map = txn.create_map(name(n),
(thecase.klen_min == thecase.klen_max && (thecase.klen_min == thecase.klen_max && (thecase.klen_min == 4 || thecase.klen_max == 8))
(thecase.klen_min == 4 || thecase.klen_max == 8))
? mdbx::key_mode::ordinal ? mdbx::key_mode::ordinal
: mdbx::key_mode::usual, : mdbx::key_mode::usual,
(thecase.vlen_min == thecase.vlen_max) (thecase.vlen_min == thecase.vlen_max) ? mdbx::value_mode::multi_samelength
? mdbx::value_mode::multi_samelength : mdbx::value_mode::multi);
: mdbx::value_mode::multi);
if (txn.get_map_stat(map).ms_entries < NN) { if (txn.get_map_stat(map).ms_entries < NN) {
mdbx::default_buffer k, v; mdbx::default_buffer k, v;
for (auto i = 0u; i < NN; i++) { for (auto i = 0u; i < NN; i++) {
mk_key(k, thecase); mk_key(k, thecase);
for (auto ii = thecase.dupmax_log2 for (auto ii = thecase.dupmax_log2 ? 1u + (rnd() & ((2u << thecase.dupmax_log2) - 1u)) : 1u; ii > 0; --ii)
? 1u + (rnd() & ((2u << thecase.dupmax_log2) - 1u))
: 1u;
ii > 0; --ii)
txn.upsert(map, k, mk_val(v, thecase)); txn.upsert(map, k, mk_val(v, thecase));
} }
} }
return map; return map;
} }
static void chunched_delete(mdbx::txn txn, const acase &thecase, static void chunched_delete(mdbx::txn txn, const acase &thecase, const unsigned n) {
const unsigned n) {
// printf(">> %s, case #%i\n", __FUNCTION__, n); // printf(">> %s, case #%i\n", __FUNCTION__, n);
mdbx::default_buffer k, v; mdbx::default_buffer k, v;
auto map = txn.open_map_accede(name(n)); auto map = txn.open_map_accede(name(n));
@ -121,27 +111,20 @@ static void chunched_delete(mdbx::txn txn, const acase &thecase,
bool last_r; bool last_r;
if (true == ((last_op = "MDBX_GET_BOTH"), if (true == ((last_op = "MDBX_GET_BOTH"),
(last_r = cursor.find_multivalue( (last_r = cursor.find_multivalue(mk_key(k, thecase), mk_val(v, thecase), false))) ||
mk_key(k, thecase), mk_val(v, thecase), false))) ||
rnd() % 3 == 0 || rnd() % 3 == 0 ||
true == ((last_op = "MDBX_SET_RANGE"), true == ((last_op = "MDBX_SET_RANGE"), (last_r = cursor.lower_bound(mk_key(k, thecase), false)))) {
(last_r = cursor.lower_bound(mk_key(k, thecase), false)))) {
int i = int(rnd() % 7) - 3; int i = int(rnd() % 7) - 3;
// if (i) // if (i)
// printf(" %s -> %s\n", last_op, last_r ? "true" : "false"); // printf(" %s -> %s\n", last_op, last_r ? "true" : "false");
// printf("== shift multi %i\n", i); // printf("== shift multi %i\n", i);
try { try {
while (i < 0 && while (i < 0 && true == ((last_op = "MDBX_PREV_DUP"), (last_r = cursor.to_current_prev_multi(false))))
true == ((last_op = "MDBX_PREV_DUP"),
(last_r = cursor.to_current_prev_multi(false))))
++i; ++i;
while (i > 0 && while (i > 0 && true == ((last_op = "MDBX_NEXT_DUP"), (last_r = cursor.to_current_next_multi(false))))
true == ((last_op = "MDBX_NEXT_DUP"),
(last_r = cursor.to_current_next_multi(false))))
--i; --i;
} catch (const mdbx::no_data &) { } catch (const mdbx::no_data &) {
printf("cursor_del() -> exception, last %s %s\n", last_op, printf("cursor_del() -> exception, last %s %s\n", last_op, last_r ? "true" : "false");
last_r ? "true" : "false");
continue; continue;
} }
} }
@ -159,8 +142,7 @@ static void chunched_delete(mdbx::txn txn, const acase &thecase,
// printf(" cursor_del() -> %s\n", last_r ? "true" : "false"); // printf(" cursor_del() -> %s\n", last_r ? "true" : "false");
} while (cursor.to_next(false) && --i > 0); } while (cursor.to_next(false) && --i > 0);
} catch (const mdbx::no_data &) { } catch (const mdbx::no_data &) {
printf("cursor_del() -> exception, last %s %s\n", last_op, printf("cursor_del() -> exception, last %s %s\n", last_op, last_r ? "true" : "false");
last_r ? "true" : "false");
} }
// (void) last_op; // (void) last_op;
@ -178,8 +160,8 @@ static void chunched_delete(mdbx::txn txn, const acase &thecase,
static char log_buffer[1024]; static char log_buffer[1024];
static void logger_nofmt(MDBX_log_level_t loglevel, const char *function, static void logger_nofmt(MDBX_log_level_t loglevel, const char *function, int line, const char *msg,
int line, const char *msg, unsigned length) noexcept { unsigned length) noexcept {
(void)length; (void)length;
(void)loglevel; (void)loglevel;
fprintf(stdout, "%s:%u %s", function, line, msg); fprintf(stdout, "%s:%u %s", function, line, msg);
@ -187,12 +169,10 @@ static void logger_nofmt(MDBX_log_level_t loglevel, const char *function,
bool outofrange_prev(mdbx::env env) { bool outofrange_prev(mdbx::env env) {
mdbx::cursor_managed cursor; mdbx::cursor_managed cursor;
const std::array<mdbx::pair, 4> items = { const std::array<mdbx::pair, 4> items = {{{"k1", "v1"}, {"k1", "v2"}, {"k2", "v1"}, {"k2", "v2"}}};
{{"k1", "v1"}, {"k1", "v2"}, {"k2", "v1"}, {"k2", "v2"}}};
auto txn = env.start_write(); auto txn = env.start_write();
auto multi = auto multi = txn.create_map("multi", mdbx::key_mode::usual, mdbx::value_mode::multi);
txn.create_map("multi", mdbx::key_mode::usual, mdbx::value_mode::multi);
auto simple = txn.create_map("simple"); auto simple = txn.create_map("simple");
txn.clear_map(multi); txn.clear_map(multi);
txn.clear_map(simple); txn.clear_map(simple);
@ -242,12 +222,10 @@ bool outofrange_prev(mdbx::env env) {
} }
bool next_prev_current(mdbx::env env) { bool next_prev_current(mdbx::env env) {
const std::array<mdbx::pair, 4> items = { const std::array<mdbx::pair, 4> items = {{{"k1", "v1"}, {"k1", "v2"}, {"k2", "v1"}, {"k2", "v2"}}};
{{"k1", "v1"}, {"k1", "v2"}, {"k2", "v1"}, {"k2", "v2"}}};
auto txn = env.start_write(); auto txn = env.start_write();
auto map = auto map = txn.create_map("multi", mdbx::key_mode::usual, mdbx::value_mode::multi);
txn.create_map("multi", mdbx::key_mode::usual, mdbx::value_mode::multi);
txn.clear_map(map); txn.clear_map(map);
for (const auto &i : items) for (const auto &i : items)
txn.upsert(map, i); txn.upsert(map, i);
@ -309,8 +287,7 @@ bool next_prev_current(mdbx::env env) {
} }
bool simple(mdbx::env env) { bool simple(mdbx::env env) {
const std::array<mdbx::pair, 3> items = { const std::array<mdbx::pair, 3> items = {{{"k0", "v0"}, {"k1", "v1"}, {"k2", "v2"}}};
{{"k0", "v0"}, {"k1", "v1"}, {"k2", "v2"}}};
auto txn = env.start_write(); auto txn = env.start_write();
auto map = txn.create_map("simple"); auto map = txn.create_map("simple");
@ -376,14 +353,12 @@ int main(int argc, const char *argv[]) {
(void)argc; (void)argc;
(void)argv; (void)argv;
mdbx_setup_debug_nofmt(MDBX_LOG_NOTICE, MDBX_DBG_ASSERT, logger_nofmt, mdbx_setup_debug_nofmt(MDBX_LOG_NOTICE, MDBX_DBG_ASSERT, logger_nofmt, log_buffer, sizeof(log_buffer));
log_buffer, sizeof(log_buffer));
mdbx::path db_filename = "test-crunched-del"; mdbx::path db_filename = "test-crunched-del";
mdbx::env::remove(db_filename); mdbx::env::remove(db_filename);
mdbx::env_managed env(db_filename, mdbx::env_managed::create_parameters(), mdbx::env_managed env(db_filename, mdbx::env_managed::create_parameters(), mdbx::env::operate_parameters(42));
mdbx::env::operate_parameters(42));
if (!simple(env) || !next_prev_current(env) || !outofrange_prev(env)) if (!simple(env) || !next_prev_current(env) || !outofrange_prev(env))
return EXIT_FAILURE; return EXIT_FAILURE;
@ -392,8 +367,7 @@ int main(int argc, const char *argv[]) {
// Значения разной длины от 100 до 1000 байт. // Значения разной длины от 100 до 1000 байт.
testset.emplace_back(/* keylen_min */ 1, /* keylen_max */ 64, testset.emplace_back(/* keylen_min */ 1, /* keylen_max */ 64,
/* datalen_min */ 100, /* datalen_max */ /* datalen_min */ 100, /* datalen_max */
mdbx_env_get_valsize4page_max( mdbx_env_get_valsize4page_max(env, MDBX_db_flags_t(mdbx::value_mode::multi)),
env, MDBX_db_flags_t(mdbx::value_mode::multi)),
/* dups_log2 */ 6); /* dups_log2 */ 6);
// В одной таблице DupSort: path -> version_u64+data // В одной таблице DupSort: path -> version_u64+data
// path - это префикс в дереве. Самые частые длины: 1-5 байт и 32-36 байт. // path - это префикс в дереве. Самые частые длины: 1-5 байт и 32-36 байт.

View File

@ -2,8 +2,8 @@
#include <iostream> #include <iostream>
static void logger_nofmt(MDBX_log_level_t loglevel, const char *function, static void logger_nofmt(MDBX_log_level_t loglevel, const char *function, int line, const char *msg,
int line, const char *msg, unsigned length) noexcept { unsigned length) noexcept {
(void)length; (void)length;
(void)loglevel; (void)loglevel;
std::cout << function << ":" << line << " " << msg; std::cout << function << ":" << line << " " << msg;
@ -15,20 +15,17 @@ int main(int argc, const char *argv[]) {
(void)argc; (void)argc;
(void)argv; (void)argv;
mdbx_setup_debug_nofmt(MDBX_LOG_NOTICE, MDBX_DBG_ASSERT, logger_nofmt, mdbx_setup_debug_nofmt(MDBX_LOG_NOTICE, MDBX_DBG_ASSERT, logger_nofmt, log_buffer, sizeof(log_buffer));
log_buffer, sizeof(log_buffer));
mdbx::path db_filename = "test-cursor-closing"; mdbx::path db_filename = "test-cursor-closing";
mdbx::env::remove(db_filename); mdbx::env::remove(db_filename);
mdbx::env_managed env( mdbx::env_managed env(db_filename, mdbx::env_managed::create_parameters(),
db_filename, mdbx::env_managed::create_parameters(), mdbx::env::operate_parameters(42, 0, mdbx::env::nested_transactions));
mdbx::env::operate_parameters(42, 0, mdbx::env::nested_transactions));
{ {
auto txn = env.start_write(); auto txn = env.start_write();
auto table = txn.create_map("dummy", mdbx::key_mode::usual, auto table = txn.create_map("dummy", mdbx::key_mode::usual, mdbx::value_mode::single);
mdbx::value_mode::single);
auto cursor_1 = txn.open_cursor(table); auto cursor_1 = txn.open_cursor(table);
auto cursor_2 = cursor_1.clone(); auto cursor_2 = cursor_1.clone();

View File

@ -4,8 +4,8 @@
static char log_buffer[1024]; static char log_buffer[1024];
static void logger_nofmt(MDBX_log_level_t loglevel, const char *function, static void logger_nofmt(MDBX_log_level_t loglevel, const char *function, int line, const char *msg,
int line, const char *msg, unsigned length) noexcept { unsigned length) noexcept {
(void)length; (void)length;
(void)loglevel; (void)loglevel;
fprintf(stdout, "%s:%u %s", function, line, msg); fprintf(stdout, "%s:%u %s", function, line, msg);
@ -15,8 +15,7 @@ int main(int argc, const char *argv[]) {
(void)argc; (void)argc;
(void)argv; (void)argv;
mdbx_setup_debug_nofmt(MDBX_LOG_NOTICE, MDBX_DBG_ASSERT, logger_nofmt, mdbx_setup_debug_nofmt(MDBX_LOG_NOTICE, MDBX_DBG_ASSERT, logger_nofmt, log_buffer, sizeof(log_buffer));
log_buffer, sizeof(log_buffer));
mdbx::path db_filename = "test-dbi"; mdbx::path db_filename = "test-dbi";
mdbx::env::remove(db_filename); mdbx::env::remove(db_filename);
@ -26,14 +25,12 @@ int main(int argc, const char *argv[]) {
{ {
mdbx::env_managed env2(db_filename, createParameters, operateParameters); mdbx::env_managed env2(db_filename, createParameters, operateParameters);
mdbx::txn_managed txn2 = env2.start_write(false); mdbx::txn_managed txn2 = env2.start_write(false);
/* mdbx::map_handle testHandle2 = */ txn2.create_map( /* mdbx::map_handle testHandle2 = */ txn2.create_map("fap1", mdbx::key_mode::reverse, mdbx::value_mode::single);
"fap1", mdbx::key_mode::reverse, mdbx::value_mode::single);
txn2.commit(); txn2.commit();
} }
mdbx::env_managed env(db_filename, createParameters, operateParameters); mdbx::env_managed env(db_filename, createParameters, operateParameters);
mdbx::txn_managed txn = env.start_write(false); mdbx::txn_managed txn = env.start_write(false);
/* mdbx::map_handle testHandle = */ txn.create_map( /* mdbx::map_handle testHandle = */ txn.create_map("fap1", mdbx::key_mode::usual, mdbx::value_mode::single);
"fap1", mdbx::key_mode::usual, mdbx::value_mode::single);
txn.commit(); txn.commit();
std::cout << "OK\n"; std::cout << "OK\n";

Some files were not shown because too many files have changed in this diff Show More