mirror of
https://github.com/isar/libmdbx.git
synced 2025-08-25 09:44:27 +08:00
mdbx: merge branch master
into devel
.
This commit is contained in:
@@ -571,11 +571,17 @@ retry_snap_meta:
|
||||
uint8_t *const data_buffer = buffer + ceil_powerof2(meta_bytes, globals.sys_pagesize);
|
||||
#if MDBX_USE_COPYFILERANGE
|
||||
static bool copyfilerange_unavailable;
|
||||
#if (defined(__linux__) || defined(__gnu_linux__))
|
||||
if (globals.linux_kernel_version >= 0x05030000 && globals.linux_kernel_version < 0x05130000)
|
||||
copyfilerange_unavailable = true;
|
||||
#endif /* linux */
|
||||
bool not_the_same_filesystem = false;
|
||||
struct statfs statfs_info;
|
||||
if (fstatfs(fd, &statfs_info) || statfs_info.f_type == /* ECRYPTFS_SUPER_MAGIC */ 0xf15f)
|
||||
/* avoid use copyfilerange_unavailable() to ecryptfs due bugs */
|
||||
not_the_same_filesystem = true;
|
||||
if (!copyfilerange_unavailable) {
|
||||
struct statfs statfs_info;
|
||||
if (fstatfs(fd, &statfs_info) || statfs_info.f_type == /* ECRYPTFS_SUPER_MAGIC */ 0xf15f)
|
||||
/* avoid use copyfilerange_unavailable() to ecryptfs due bugs */
|
||||
not_the_same_filesystem = true;
|
||||
}
|
||||
#endif /* MDBX_USE_COPYFILERANGE */
|
||||
|
||||
for (size_t offset = meta_bytes; rc == MDBX_SUCCESS && offset < used_size;) {
|
||||
@@ -760,14 +766,24 @@ __cold static int copy2pathname(MDBX_txn *txn, const pathchar_t *dest_path, MDBX
|
||||
lock_op.l_whence = SEEK_SET;
|
||||
lock_op.l_start = 0;
|
||||
lock_op.l_len = OFF_T_MAX;
|
||||
if (MDBX_FCNTL(newfd, MDBX_F_SETLK, &lock_op)
|
||||
#if (defined(__linux__) || defined(__gnu_linux__)) && defined(LOCK_EX) && \
|
||||
(!defined(__ANDROID_API__) || __ANDROID_API__ >= 24)
|
||||
|| flock(newfd, LOCK_EX | LOCK_NB)
|
||||
#endif /* Linux */
|
||||
)
|
||||
if (MDBX_FCNTL(newfd, MDBX_F_SETLK, &lock_op))
|
||||
rc = errno;
|
||||
}
|
||||
|
||||
#if defined(LOCK_EX) && (!defined(__ANDROID_API__) || __ANDROID_API__ >= 24)
|
||||
if (rc == MDBX_SUCCESS && flock(newfd, LOCK_EX | LOCK_NB)) {
|
||||
const int err_flock = errno, err_fs = osal_check_fs_local(newfd, 0);
|
||||
if (err_flock != EAGAIN || err_fs != MDBX_EREMOTE) {
|
||||
ERROR("%s flock(%" MDBX_PRIsPATH ") error %d, remote-fs check status %d", "unexpected", dest_path, err_flock,
|
||||
err_fs);
|
||||
rc = err_flock;
|
||||
} else {
|
||||
WARNING("%s flock(%" MDBX_PRIsPATH ") error %d, remote-fs check status %d", "ignore", dest_path, err_flock,
|
||||
err_fs);
|
||||
}
|
||||
}
|
||||
#endif /* LOCK_EX && ANDROID_API >= 24 */
|
||||
|
||||
#endif /* Windows / POSIX */
|
||||
|
||||
if (rc == MDBX_SUCCESS)
|
||||
|
@@ -63,7 +63,7 @@ int mdbx_cursor_bind(MDBX_txn *txn, MDBX_cursor *mc, MDBX_dbi dbi) {
|
||||
return MDBX_SUCCESS;
|
||||
rc = mdbx_cursor_unbind(mc);
|
||||
if (unlikely(rc != MDBX_SUCCESS))
|
||||
return rc;
|
||||
return (rc == MDBX_BAD_TXN) ? MDBX_EINVAL : rc;
|
||||
}
|
||||
cASSERT(mc, mc->next == mc);
|
||||
|
||||
@@ -89,8 +89,16 @@ int mdbx_cursor_unbind(MDBX_cursor *mc) {
|
||||
return LOG_IFERR(MDBX_EINVAL);
|
||||
|
||||
int rc = check_txn(mc->txn, MDBX_TXN_FINISHED | MDBX_TXN_HAS_CHILD);
|
||||
if (unlikely(rc != MDBX_SUCCESS))
|
||||
if (unlikely(rc != MDBX_SUCCESS)) {
|
||||
for (const MDBX_txn *txn = mc->txn; rc == MDBX_BAD_TXN && check_txn(txn, MDBX_TXN_FINISHED) == MDBX_SUCCESS;
|
||||
txn = txn->nested)
|
||||
if (dbi_state(txn, cursor_dbi(mc)) == 0)
|
||||
/* специальный случай: курсор прикреплён к родительской транзакции, но соответствующий dbi-дескриптор ещё
|
||||
* не использовался во вложенной транзакции, т.е. курсор ещё не импортирован в дочернюю транзакцию и не имеет
|
||||
* связанного сохранённого состояния (поэтому mc→backup равен nullptr). */
|
||||
rc = MDBX_EINVAL;
|
||||
return LOG_IFERR(rc);
|
||||
}
|
||||
|
||||
if (unlikely(!mc->txn || mc->txn->signature != txn_signature)) {
|
||||
ERROR("Wrong cursor's transaction %p 0x%x", __Wpedantic_format_voidptr(mc->txn), mc->txn ? mc->txn->signature : 0);
|
||||
@@ -245,9 +253,8 @@ int mdbx_txn_release_all_cursors_ex(const MDBX_txn *txn, bool unbind, size_t *co
|
||||
MDBX_cursor *bk = mc->backup;
|
||||
mc->next = bk->next;
|
||||
mc->backup = bk->backup;
|
||||
mc->backup = nullptr;
|
||||
bk->backup = nullptr;
|
||||
bk->signature = 0;
|
||||
bk = bk->next;
|
||||
osal_free(bk);
|
||||
} else {
|
||||
mc->signature = cur_signature_ready4dispose;
|
||||
|
@@ -140,7 +140,7 @@ int mdbx_txn_lock(MDBX_env *env, bool dont_wait) {
|
||||
|
||||
if (unlikely(env->flags & MDBX_RDONLY))
|
||||
return LOG_IFERR(MDBX_EACCESS);
|
||||
if (unlikely(env->basal_txn->owner || (env->basal_txn->flags & MDBX_TXN_FINISHED) == 0))
|
||||
if (dont_wait && unlikely(env->basal_txn->owner || (env->basal_txn->flags & MDBX_TXN_FINISHED) == 0))
|
||||
return LOG_IFERR(MDBX_BUSY);
|
||||
|
||||
return LOG_IFERR(lck_txn_lock(env, dont_wait));
|
||||
|
@@ -235,9 +235,11 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, M
|
||||
flags |= parent->flags & (txn_rw_begin_flags | MDBX_TXN_SPILLS | MDBX_NOSTICKYTHREADS | MDBX_WRITEMAP);
|
||||
rc = txn_nested_create(parent, flags);
|
||||
txn = parent->nested;
|
||||
if (unlikely(rc != MDBX_SUCCESS))
|
||||
txn_end(txn, TXN_END_FAIL_BEGIN_NESTED);
|
||||
else if (AUDIT_ENABLED() && ASSERT_ENABLED()) {
|
||||
if (unlikely(rc != MDBX_SUCCESS)) {
|
||||
int err = txn_end(txn, TXN_END_FAIL_BEGIN_NESTED);
|
||||
return err ? err : rc;
|
||||
}
|
||||
if (AUDIT_ENABLED() && ASSERT_ENABLED()) {
|
||||
txn->signature = txn_signature;
|
||||
tASSERT(txn, audit_ex(txn, 0, false) == 0);
|
||||
}
|
||||
@@ -249,31 +251,30 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, M
|
||||
return LOG_IFERR(MDBX_ENOMEM);
|
||||
}
|
||||
rc = txn_renew(txn, flags);
|
||||
}
|
||||
|
||||
if (unlikely(rc != MDBX_SUCCESS)) {
|
||||
if (txn != env->basal_txn)
|
||||
osal_free(txn);
|
||||
} else {
|
||||
if (flags & (MDBX_TXN_RDONLY_PREPARE - MDBX_TXN_RDONLY))
|
||||
eASSERT(env, txn->flags == (MDBX_TXN_RDONLY | MDBX_TXN_FINISHED));
|
||||
else if (flags & MDBX_TXN_RDONLY)
|
||||
eASSERT(env, (txn->flags & ~(MDBX_NOSTICKYTHREADS | MDBX_TXN_RDONLY | MDBX_WRITEMAP |
|
||||
/* Win32: SRWL flag */ txn_shrink_allowed)) == 0);
|
||||
else {
|
||||
eASSERT(env, (txn->flags & ~(MDBX_NOSTICKYTHREADS | MDBX_WRITEMAP | txn_shrink_allowed | txn_may_have_cursors |
|
||||
MDBX_NOMETASYNC | MDBX_SAFE_NOSYNC | MDBX_TXN_SPILLS)) == 0);
|
||||
assert(!txn->wr.spilled.list && !txn->wr.spilled.least_removed);
|
||||
if (unlikely(rc != MDBX_SUCCESS)) {
|
||||
if (txn != env->basal_txn)
|
||||
osal_free(txn);
|
||||
return LOG_IFERR(rc);
|
||||
}
|
||||
txn->signature = txn_signature;
|
||||
txn->userctx = context;
|
||||
*ret = txn;
|
||||
DEBUG("begin txn %" PRIaTXN "%c %p on env %p, root page %" PRIaPGNO "/%" PRIaPGNO, txn->txnid,
|
||||
(flags & MDBX_TXN_RDONLY) ? 'r' : 'w', (void *)txn, (void *)env, txn->dbs[MAIN_DBI].root,
|
||||
txn->dbs[FREE_DBI].root);
|
||||
}
|
||||
|
||||
return LOG_IFERR(rc);
|
||||
if (flags & (MDBX_TXN_RDONLY_PREPARE - MDBX_TXN_RDONLY))
|
||||
eASSERT(env, txn->flags == (MDBX_TXN_RDONLY | MDBX_TXN_FINISHED));
|
||||
else if (flags & MDBX_TXN_RDONLY)
|
||||
eASSERT(env, (txn->flags & ~(MDBX_NOSTICKYTHREADS | MDBX_TXN_RDONLY | MDBX_WRITEMAP |
|
||||
/* Win32: SRWL flag */ txn_shrink_allowed)) == 0);
|
||||
else {
|
||||
eASSERT(env, (txn->flags & ~(MDBX_NOSTICKYTHREADS | MDBX_WRITEMAP | txn_shrink_allowed | txn_may_have_cursors |
|
||||
MDBX_NOMETASYNC | MDBX_SAFE_NOSYNC | MDBX_TXN_SPILLS)) == 0);
|
||||
assert(!txn->wr.spilled.list && !txn->wr.spilled.least_removed);
|
||||
}
|
||||
txn->signature = txn_signature;
|
||||
txn->userctx = context;
|
||||
*ret = txn;
|
||||
DEBUG("begin txn %" PRIaTXN "%c %p on env %p, root page %" PRIaPGNO "/%" PRIaPGNO, txn->txnid,
|
||||
(flags & MDBX_TXN_RDONLY) ? 'r' : 'w', (void *)txn, (void *)env, txn->dbs[MAIN_DBI].root,
|
||||
txn->dbs[FREE_DBI].root);
|
||||
return MDBX_SUCCESS;
|
||||
}
|
||||
|
||||
static void latency_gcprof(MDBX_commit_latency *latency, const MDBX_txn *txn) {
|
||||
|
@@ -159,12 +159,12 @@ __cold static MDBX_chk_line_t *MDBX_PRINTF_ARGS(2, 3) chk_print(MDBX_chk_line_t
|
||||
return line;
|
||||
}
|
||||
|
||||
__cold MDBX_MAYBE_UNUSED static void chk_println_va(MDBX_chk_scope_t *const scope, enum MDBX_chk_severity severity,
|
||||
MDBX_MAYBE_UNUSED __cold static void chk_println_va(MDBX_chk_scope_t *const scope, enum MDBX_chk_severity severity,
|
||||
const char *fmt, va_list args) {
|
||||
chk_line_end(chk_print_va(chk_line_begin(scope, severity), fmt, args));
|
||||
}
|
||||
|
||||
__cold MDBX_MAYBE_UNUSED static void chk_println(MDBX_chk_scope_t *const scope, enum MDBX_chk_severity severity,
|
||||
MDBX_MAYBE_UNUSED __cold static void chk_println(MDBX_chk_scope_t *const scope, enum MDBX_chk_severity severity,
|
||||
const char *fmt, ...) {
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
@@ -1726,6 +1726,7 @@ __hot csr_t cursor_seek(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cur
|
||||
|
||||
csr_t ret;
|
||||
ret.exact = false;
|
||||
/* coverity[logical_vs_bitwise] */
|
||||
if (unlikely(key->iov_len < mc->clc->k.lmin ||
|
||||
(key->iov_len > mc->clc->k.lmax &&
|
||||
(mc->clc->k.lmin == mc->clc->k.lmax || MDBX_DEBUG || MDBX_FORCE_ASSERTIONS)))) {
|
||||
|
40
src/dbi.h
40
src/dbi.h
@@ -43,29 +43,35 @@ static inline size_t dbi_bitmap_ctz(const MDBX_txn *txn, intptr_t bmi) {
|
||||
return dbi_bitmap_ctz_fallback(txn, bmi);
|
||||
}
|
||||
|
||||
static inline bool dbi_foreach_step(const MDBX_txn *const txn, size_t *bitmap_item, size_t *dbi) {
|
||||
const size_t bitmap_chunk = CHAR_BIT * sizeof(txn->dbi_sparse[0]);
|
||||
if (*bitmap_item & 1) {
|
||||
*bitmap_item >>= 1;
|
||||
return txn->dbi_state[*dbi] != 0;
|
||||
}
|
||||
if (*bitmap_item) {
|
||||
size_t bitmap_skip = dbi_bitmap_ctz(txn, *bitmap_item);
|
||||
*bitmap_item >>= bitmap_skip;
|
||||
*dbi += bitmap_skip - 1;
|
||||
} else {
|
||||
*dbi = (*dbi - 1) | (bitmap_chunk - 1);
|
||||
*bitmap_item = txn->dbi_sparse[(1 + *dbi) / bitmap_chunk];
|
||||
if (*bitmap_item == 0)
|
||||
*dbi += bitmap_chunk;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* LY: Макрос целенаправленно сделан с одним циклом, чтобы сохранить возможность
|
||||
* использования оператора break */
|
||||
#define TXN_FOREACH_DBI_FROM(TXN, I, FROM) \
|
||||
for (size_t bitmap_chunk = CHAR_BIT * sizeof(TXN->dbi_sparse[0]), bitmap_item = TXN->dbi_sparse[0] >> FROM, \
|
||||
I = FROM; \
|
||||
I < TXN->n_dbi; ++I) \
|
||||
if (bitmap_item == 0) { \
|
||||
I = (I - 1) | (bitmap_chunk - 1); \
|
||||
bitmap_item = TXN->dbi_sparse[(1 + I) / bitmap_chunk]; \
|
||||
if (!bitmap_item) \
|
||||
I += bitmap_chunk; \
|
||||
continue; \
|
||||
} else if ((bitmap_item & 1) == 0) { \
|
||||
size_t bitmap_skip = dbi_bitmap_ctz(txn, bitmap_item); \
|
||||
bitmap_item >>= bitmap_skip; \
|
||||
I += bitmap_skip - 1; \
|
||||
continue; \
|
||||
} else if (bitmap_item >>= 1, TXN->dbi_state[I])
|
||||
for (size_t bitmap_item = TXN->dbi_sparse[0] >> FROM, I = FROM; I < TXN->n_dbi; ++I) \
|
||||
if (dbi_foreach_step(TXN, &bitmap_item, &I))
|
||||
|
||||
#else
|
||||
|
||||
#define TXN_FOREACH_DBI_FROM(TXN, I, SKIP) \
|
||||
for (size_t I = SKIP; I < TXN->n_dbi; ++I) \
|
||||
#define TXN_FOREACH_DBI_FROM(TXN, I, FROM) \
|
||||
for (size_t I = FROM; I < TXN->n_dbi; ++I) \
|
||||
if (TXN->dbi_state[I])
|
||||
|
||||
#endif /* MDBX_ENABLE_DBI_SPARSE */
|
||||
|
@@ -65,6 +65,8 @@ int dpl_alloc(MDBX_txn *txn) {
|
||||
unlikely(!dpl_reserve(txn, wanna)))
|
||||
return MDBX_ENOMEM;
|
||||
|
||||
/* LY: wr.dirtylist не может быть nullptr, так как либо уже выделен, либо будет выделен в dpl_reserve(). */
|
||||
/* coverity[var_deref_model] */
|
||||
dpl_clear(txn->wr.dirtylist);
|
||||
return MDBX_SUCCESS;
|
||||
}
|
||||
@@ -395,7 +397,7 @@ __cold bool dpl_check(MDBX_txn *txn) {
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
__noinline void dpl_lru_reduce(MDBX_txn *txn) {
|
||||
NOTICE("lru-reduce %u -> %u", txn->wr.dirtylru, txn->wr.dirtylru >> 1);
|
||||
VERBOSE("lru-reduce %u -> %u", txn->wr.dirtylru, txn->wr.dirtylru >> 1);
|
||||
tASSERT(txn, (txn->flags & (MDBX_TXN_RDONLY | MDBX_WRITEMAP)) == 0);
|
||||
do {
|
||||
txn->wr.dirtylru >>= 1;
|
||||
|
@@ -1158,7 +1158,7 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika
|
||||
if (!head.is_steady && meta_is_steady(pending))
|
||||
target = (meta_t *)head.ptr_c;
|
||||
else {
|
||||
NOTICE("skip update meta%" PRIaPGNO " for txn#%" PRIaTXN "since it is already steady",
|
||||
NOTICE("skip update meta%" PRIaPGNO " for txn#%" PRIaTXN ", since it is already steady",
|
||||
data_page(head.ptr_c)->pgno, head.txnid);
|
||||
return MDBX_SUCCESS;
|
||||
}
|
||||
@@ -1291,6 +1291,7 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika
|
||||
}
|
||||
|
||||
uint64_t timestamp = 0;
|
||||
/* coverity[array_null] */
|
||||
while ("workaround for https://libmdbx.dqdkfa.ru/dead-github/issues/269") {
|
||||
rc = coherency_check_written(env, pending->unsafe_txnid, target,
|
||||
bytes2pgno(env, ptr_dist(target, env->dxb_mmap.base)), ×tamp);
|
||||
|
@@ -186,7 +186,7 @@ typedef struct reader_slot {
|
||||
/* The header for the reader table (a memory-mapped lock file). */
|
||||
typedef struct shared_lck {
|
||||
/* Stamp identifying this as an MDBX file.
|
||||
* It must be set to MDBX_MAGIC with with MDBX_LOCK_VERSION. */
|
||||
* It must be set to MDBX_MAGIC with MDBX_LOCK_VERSION. */
|
||||
uint64_t magic_and_version;
|
||||
|
||||
/* Format of this lock file. Must be set to MDBX_LOCK_FORMAT. */
|
||||
|
@@ -1745,7 +1745,7 @@ MDBX_INTERNAL int osal_check_fs_incore(mdbx_filehandle_t handle) {
|
||||
return MDBX_RESULT_FALSE;
|
||||
}
|
||||
|
||||
static int osal_check_fs_local(mdbx_filehandle_t handle, int flags) {
|
||||
MDBX_INTERNAL int osal_check_fs_local(mdbx_filehandle_t handle, int flags) {
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
if (globals.running_under_Wine && !(flags & MDBX_EXCLUSIVE))
|
||||
return ERROR_NOT_CAPABLE /* workaround for Wine */;
|
||||
@@ -2856,7 +2856,7 @@ __cold static LSTATUS mdbx_RegGetValue(HKEY hKey, LPCSTR lpSubKey, LPCSTR lpValu
|
||||
}
|
||||
#endif
|
||||
|
||||
__cold MDBX_MAYBE_UNUSED static bool bootid_parse_uuid(bin128_t *s, const void *p, const size_t n) {
|
||||
MDBX_MAYBE_UNUSED __cold static bool bootid_parse_uuid(bin128_t *s, const void *p, const size_t n) {
|
||||
if (n > 31) {
|
||||
unsigned bits = 0;
|
||||
for (unsigned i = 0; i < n; ++i) /* try parse an UUID in text form */ {
|
||||
|
@@ -481,6 +481,7 @@ MDBX_INTERNAL int osal_resume_threads_after_remap(mdbx_handle_array_t *array);
|
||||
MDBX_INTERNAL int osal_msync(const osal_mmap_t *map, size_t offset, size_t length, enum osal_syncmode_bits mode_bits);
|
||||
MDBX_INTERNAL int osal_check_fs_rdonly(mdbx_filehandle_t handle, const pathchar_t *pathname, int err);
|
||||
MDBX_INTERNAL int osal_check_fs_incore(mdbx_filehandle_t handle);
|
||||
MDBX_INTERNAL int osal_check_fs_local(mdbx_filehandle_t handle, int flags);
|
||||
|
||||
MDBX_MAYBE_UNUSED static inline uint32_t osal_getpid(void) {
|
||||
STATIC_ASSERT(sizeof(mdbx_pid_t) <= sizeof(uint32_t));
|
||||
|
@@ -98,6 +98,7 @@ int txn_basal_start(MDBX_txn *txn, unsigned flags) {
|
||||
txn->wr.troika = meta_tap(env);
|
||||
const meta_ptr_t head = meta_recent(env, &txn->wr.troika);
|
||||
uint64_t timestamp = 0;
|
||||
/* coverity[array_null] */
|
||||
while ("workaround for https://libmdbx.dqdkfa.ru/dead-github/issues/269") {
|
||||
int err = coherency_fetch_head(txn, head, ×tamp);
|
||||
if (likely(err == MDBX_SUCCESS))
|
||||
|
@@ -326,7 +326,6 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
|
||||
tASSERT(parent, dpl_check(parent));
|
||||
}
|
||||
|
||||
parent->flags &= ~MDBX_TXN_HAS_CHILD;
|
||||
if (parent->wr.spilled.list) {
|
||||
assert(pnl_check_allocated(parent->wr.spilled.list, (size_t)parent->geo.first_unallocated << 1));
|
||||
if (MDBX_PNL_GETSIZE(parent->wr.spilled.list))
|
||||
@@ -349,26 +348,31 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
|
||||
return LOG_IFERR(MDBX_ENOMEM);
|
||||
|
||||
tASSERT(parent, dpl_check(parent));
|
||||
txn->txnid = parent->txnid;
|
||||
txn->front_txnid = parent->front_txnid + 1;
|
||||
txn->canary = parent->canary;
|
||||
parent->flags |= MDBX_TXN_HAS_CHILD;
|
||||
parent->nested = txn;
|
||||
txn->parent = parent;
|
||||
txn->env->txn = txn;
|
||||
txn->owner = parent->owner;
|
||||
txn->wr.troika = parent->wr.troika;
|
||||
rkl_init(&txn->wr.gc.reclaimed);
|
||||
|
||||
#if MDBX_ENABLE_DBI_SPARSE
|
||||
txn->dbi_sparse = parent->dbi_sparse;
|
||||
#endif /* MDBX_ENABLE_DBI_SPARSE */
|
||||
txn->dbi_seqs = parent->dbi_seqs;
|
||||
txn->geo = parent->geo;
|
||||
|
||||
int err = dpl_alloc(txn);
|
||||
if (likely(err == MDBX_SUCCESS)) {
|
||||
const size_t len = MDBX_PNL_GETSIZE(parent->wr.repnl) + parent->wr.loose_count;
|
||||
txn->wr.repnl = pnl_alloc((len > MDBX_PNL_INITIAL) ? len : MDBX_PNL_INITIAL);
|
||||
if (unlikely(!txn->wr.repnl))
|
||||
err = MDBX_ENOMEM;
|
||||
}
|
||||
if (unlikely(err != MDBX_SUCCESS)) {
|
||||
failed:
|
||||
pnl_free(txn->wr.repnl);
|
||||
dpl_free(txn);
|
||||
osal_free(txn);
|
||||
if (unlikely(err != MDBX_SUCCESS))
|
||||
return LOG_IFERR(err);
|
||||
}
|
||||
|
||||
const size_t len = MDBX_PNL_GETSIZE(parent->wr.repnl) + parent->wr.loose_count;
|
||||
txn->wr.repnl = pnl_alloc((len > MDBX_PNL_INITIAL) ? len : MDBX_PNL_INITIAL);
|
||||
if (unlikely(!txn->wr.repnl))
|
||||
return LOG_IFERR(MDBX_ENOMEM);
|
||||
|
||||
/* Move loose pages to reclaimed list */
|
||||
if (parent->wr.loose_count) {
|
||||
@@ -377,7 +381,7 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
|
||||
tASSERT(parent, lp->flags == P_LOOSE);
|
||||
err = pnl_insert_span(&parent->wr.repnl, lp->pgno, 1);
|
||||
if (unlikely(err != MDBX_SUCCESS))
|
||||
goto failed;
|
||||
return LOG_IFERR(err);
|
||||
MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(lp), sizeof(page_t *));
|
||||
VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *));
|
||||
parent->wr.loose_pages = page_next(lp);
|
||||
@@ -390,6 +394,9 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
|
||||
#endif /* MDBX_ENABLE_REFUND */
|
||||
tASSERT(parent, dpl_check(parent));
|
||||
}
|
||||
#if MDBX_ENABLE_REFUND
|
||||
txn->wr.loose_refund_wl = 0;
|
||||
#endif /* MDBX_ENABLE_REFUND */
|
||||
txn->wr.dirtyroom = parent->wr.dirtyroom;
|
||||
txn->wr.dirtylru = parent->wr.dirtylru;
|
||||
|
||||
@@ -399,6 +406,7 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
|
||||
|
||||
tASSERT(txn, MDBX_PNL_ALLOCLEN(txn->wr.repnl) >= MDBX_PNL_GETSIZE(parent->wr.repnl));
|
||||
memcpy(txn->wr.repnl, parent->wr.repnl, MDBX_PNL_SIZEOF(parent->wr.repnl));
|
||||
/* coverity[assignment_where_comparison_intended] */
|
||||
tASSERT(txn, pnl_check_allocated(txn->wr.repnl, (txn->geo.first_unallocated /* LY: intentional assignment
|
||||
here, only for assertion */
|
||||
= parent->geo.first_unallocated) -
|
||||
@@ -413,18 +421,6 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
|
||||
txn->wr.retired_pages = parent->wr.retired_pages;
|
||||
parent->wr.retired_pages = (void *)(intptr_t)MDBX_PNL_GETSIZE(parent->wr.retired_pages);
|
||||
|
||||
txn->txnid = parent->txnid;
|
||||
txn->front_txnid = parent->front_txnid + 1;
|
||||
#if MDBX_ENABLE_REFUND
|
||||
txn->wr.loose_refund_wl = 0;
|
||||
#endif /* MDBX_ENABLE_REFUND */
|
||||
txn->canary = parent->canary;
|
||||
parent->flags |= MDBX_TXN_HAS_CHILD;
|
||||
parent->nested = txn;
|
||||
txn->parent = parent;
|
||||
txn->owner = parent->owner;
|
||||
txn->wr.troika = parent->wr.troika;
|
||||
|
||||
txn->cursors[FREE_DBI] = nullptr;
|
||||
txn->cursors[MAIN_DBI] = nullptr;
|
||||
txn->dbi_state[FREE_DBI] = parent->dbi_state[FREE_DBI] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY);
|
||||
@@ -436,7 +432,6 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
|
||||
(parent->parent ? parent->parent->wr.dirtyroom : parent->env->options.dp_limit));
|
||||
tASSERT(txn, txn->wr.dirtyroom + txn->wr.dirtylist->length ==
|
||||
(txn->parent ? txn->parent->wr.dirtyroom : txn->env->options.dp_limit));
|
||||
parent->env->txn = txn;
|
||||
tASSERT(parent, parent->cursors[FREE_DBI] == nullptr);
|
||||
// TODO: shadow GC' cursor
|
||||
return txn_shadow_cursors(parent, MAIN_DBI);
|
||||
@@ -456,9 +451,6 @@ void txn_nested_abort(MDBX_txn *nested) {
|
||||
parent->wr.retired_pages = nested->wr.retired_pages;
|
||||
}
|
||||
|
||||
parent->wr.dirtylru = nested->wr.dirtylru;
|
||||
parent->nested = nullptr;
|
||||
parent->flags &= ~MDBX_TXN_HAS_CHILD;
|
||||
tASSERT(parent, dpl_check(parent));
|
||||
tASSERT(parent, audit_ex(parent, 0, false) == 0);
|
||||
dpl_release_shadows(nested);
|
||||
@@ -554,15 +546,16 @@ int txn_nested_join(MDBX_txn *txn, struct commit_timestamp *ts) {
|
||||
/* Update parent's DBs array */
|
||||
eASSERT(env, parent->n_dbi == txn->n_dbi);
|
||||
TXN_FOREACH_DBI_ALL(txn, dbi) {
|
||||
if (txn->dbi_state[dbi] & (DBI_CREAT | DBI_FRESH | DBI_DIRTY)) {
|
||||
if (txn->dbi_state[dbi] != (parent->dbi_state[dbi] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY))) {
|
||||
eASSERT(env,
|
||||
(txn->dbi_state[dbi] & (DBI_CREAT | DBI_FRESH | DBI_DIRTY)) != 0 ||
|
||||
(txn->dbi_state[dbi] | DBI_STALE) == (parent->dbi_state[dbi] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY)));
|
||||
parent->dbs[dbi] = txn->dbs[dbi];
|
||||
/* preserve parent's status */
|
||||
const uint8_t state = txn->dbi_state[dbi] | (parent->dbi_state[dbi] & (DBI_CREAT | DBI_FRESH | DBI_DIRTY));
|
||||
DEBUG("dbi %zu dbi-state %s 0x%02x -> 0x%02x", dbi, (parent->dbi_state[dbi] != state) ? "update" : "still",
|
||||
parent->dbi_state[dbi], state);
|
||||
parent->dbi_state[dbi] = state;
|
||||
} else {
|
||||
eASSERT(env, txn->dbi_state[dbi] == (parent->dbi_state[dbi] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -574,9 +567,10 @@ int txn_nested_join(MDBX_txn *txn, struct commit_timestamp *ts) {
|
||||
ts->sync = /* no sync */ ts->write;
|
||||
}
|
||||
txn_merge(parent, txn, parent_retired_len);
|
||||
tASSERT(parent, parent->flags & MDBX_TXN_HAS_CHILD);
|
||||
parent->flags -= MDBX_TXN_HAS_CHILD;
|
||||
env->txn = parent;
|
||||
parent->nested = nullptr;
|
||||
parent->flags &= ~MDBX_TXN_HAS_CHILD;
|
||||
tASSERT(parent, dpl_check(parent));
|
||||
|
||||
#if MDBX_ENABLE_REFUND
|
||||
|
@@ -362,7 +362,10 @@ int txn_end(MDBX_txn *txn, unsigned mode) {
|
||||
tASSERT(txn, pnl_check_allocated(txn->wr.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
|
||||
tASSERT(txn, memcmp(&txn->wr.troika, &parent->wr.troika, sizeof(troika_t)) == 0);
|
||||
tASSERT(txn, mode & TXN_END_FREE);
|
||||
tASSERT(parent, parent->flags & MDBX_TXN_HAS_CHILD);
|
||||
env->txn = parent;
|
||||
parent->nested = nullptr;
|
||||
parent->flags -= MDBX_TXN_HAS_CHILD;
|
||||
const pgno_t nested_now = txn->geo.now, nested_upper = txn->geo.upper;
|
||||
txn_nested_abort(txn);
|
||||
|
||||
|
Reference in New Issue
Block a user