mdbx: переименование repnl/retxl.

This commit is contained in:
Леонид Юрьев (Leonid Yuriev) 2024-12-19 22:03:03 +03:00
parent ffb7918525
commit 225fb79eb2
12 changed files with 210 additions and 211 deletions

View File

@ -510,8 +510,8 @@ __cold int mdbx_env_openW(MDBX_env *env, const wchar_t *pathname, MDBX_env_flags
txn->flags = MDBX_TXN_FINISHED; txn->flags = MDBX_TXN_FINISHED;
env->basal_txn = txn; env->basal_txn = txn;
txn->tw.retired_pages = pnl_alloc(MDBX_PNL_INITIAL); txn->tw.retired_pages = pnl_alloc(MDBX_PNL_INITIAL);
txn->tw.relist = pnl_alloc(MDBX_PNL_INITIAL); txn->tw.repnl = pnl_alloc(MDBX_PNL_INITIAL);
if (unlikely(!txn->tw.retired_pages || !txn->tw.relist)) { if (unlikely(!txn->tw.retired_pages || !txn->tw.repnl)) {
rc = MDBX_ENOMEM; rc = MDBX_ENOMEM;
goto bailout; goto bailout;
} }

View File

@ -257,14 +257,14 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, M
txn->geo = parent->geo; txn->geo = parent->geo;
rc = dpl_alloc(txn); rc = dpl_alloc(txn);
if (likely(rc == MDBX_SUCCESS)) { if (likely(rc == MDBX_SUCCESS)) {
const size_t len = MDBX_PNL_GETSIZE(parent->tw.relist) + parent->tw.loose_count; const size_t len = MDBX_PNL_GETSIZE(parent->tw.repnl) + parent->tw.loose_count;
txn->tw.relist = pnl_alloc((len > MDBX_PNL_INITIAL) ? len : MDBX_PNL_INITIAL); txn->tw.repnl = pnl_alloc((len > MDBX_PNL_INITIAL) ? len : MDBX_PNL_INITIAL);
if (unlikely(!txn->tw.relist)) if (unlikely(!txn->tw.repnl))
rc = MDBX_ENOMEM; rc = MDBX_ENOMEM;
} }
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
nested_failed: nested_failed:
pnl_free(txn->tw.relist); pnl_free(txn->tw.repnl);
dpl_free(txn); dpl_free(txn);
osal_free(txn); osal_free(txn);
return LOG_IFERR(rc); return LOG_IFERR(rc);
@ -275,7 +275,7 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, M
do { do {
page_t *lp = parent->tw.loose_pages; page_t *lp = parent->tw.loose_pages;
tASSERT(parent, lp->flags == P_LOOSE); tASSERT(parent, lp->flags == P_LOOSE);
rc = pnl_insert_span(&parent->tw.relist, lp->pgno, 1); rc = pnl_insert_span(&parent->tw.repnl, lp->pgno, 1);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto nested_failed; goto nested_failed;
MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(lp), sizeof(page_t *)); MDBX_ASAN_UNPOISON_MEMORY_REGION(&page_next(lp), sizeof(page_t *));
@ -297,18 +297,18 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, M
if (parent->tw.spilled.list) if (parent->tw.spilled.list)
spill_purge(parent); spill_purge(parent);
tASSERT(txn, MDBX_PNL_ALLOCLEN(txn->tw.relist) >= MDBX_PNL_GETSIZE(parent->tw.relist)); tASSERT(txn, MDBX_PNL_ALLOCLEN(txn->tw.repnl) >= MDBX_PNL_GETSIZE(parent->tw.repnl));
memcpy(txn->tw.relist, parent->tw.relist, MDBX_PNL_SIZEOF(parent->tw.relist)); memcpy(txn->tw.repnl, parent->tw.repnl, MDBX_PNL_SIZEOF(parent->tw.repnl));
eASSERT(env, pnl_check_allocated(txn->tw.relist, (txn->geo.first_unallocated /* LY: intentional assignment eASSERT(env, pnl_check_allocated(txn->tw.repnl, (txn->geo.first_unallocated /* LY: intentional assignment
here, only for assertion */ here, only for assertion */
= parent->geo.first_unallocated) - = parent->geo.first_unallocated) -
MDBX_ENABLE_REFUND)); MDBX_ENABLE_REFUND));
txn->tw.gc.time_acc = parent->tw.gc.time_acc; txn->tw.gc.time_acc = parent->tw.gc.time_acc;
txn->tw.gc.last_reclaimed = parent->tw.gc.last_reclaimed; txn->tw.gc.last_reclaimed = parent->tw.gc.last_reclaimed;
if (parent->tw.gc.reclaimed) { if (parent->tw.gc.retxl) {
txn->tw.gc.reclaimed = parent->tw.gc.reclaimed; txn->tw.gc.retxl = parent->tw.gc.retxl;
parent->tw.gc.reclaimed = (void *)(intptr_t)MDBX_PNL_GETSIZE(parent->tw.gc.reclaimed); parent->tw.gc.retxl = (void *)(intptr_t)MDBX_PNL_GETSIZE(parent->tw.gc.retxl);
} }
txn->tw.retired_pages = parent->tw.retired_pages; txn->tw.retired_pages = parent->tw.retired_pages;
@ -464,7 +464,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
tASSERT(txn, parent_retired_len <= MDBX_PNL_GETSIZE(txn->tw.retired_pages)); tASSERT(txn, parent_retired_len <= MDBX_PNL_GETSIZE(txn->tw.retired_pages));
const size_t retired_delta = MDBX_PNL_GETSIZE(txn->tw.retired_pages) - parent_retired_len; const size_t retired_delta = MDBX_PNL_GETSIZE(txn->tw.retired_pages) - parent_retired_len;
if (retired_delta) { if (retired_delta) {
rc = pnl_need(&txn->tw.relist, retired_delta); rc = pnl_need(&txn->tw.repnl, retired_delta);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto fail; goto fail;
} }
@ -486,15 +486,15 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
//------------------------------------------------------------------------- //-------------------------------------------------------------------------
parent->tw.gc.reclaimed = txn->tw.gc.reclaimed; parent->tw.gc.retxl = txn->tw.gc.retxl;
txn->tw.gc.reclaimed = nullptr; txn->tw.gc.retxl = nullptr;
parent->tw.retired_pages = txn->tw.retired_pages; parent->tw.retired_pages = txn->tw.retired_pages;
txn->tw.retired_pages = nullptr; txn->tw.retired_pages = nullptr;
pnl_free(parent->tw.relist); pnl_free(parent->tw.repnl);
parent->tw.relist = txn->tw.relist; parent->tw.repnl = txn->tw.repnl;
txn->tw.relist = nullptr; txn->tw.repnl = nullptr;
parent->tw.gc.time_acc = txn->tw.gc.time_acc; parent->tw.gc.time_acc = txn->tw.gc.time_acc;
parent->tw.gc.last_reclaimed = txn->tw.gc.last_reclaimed; parent->tw.gc.last_reclaimed = txn->tw.gc.last_reclaimed;
@ -550,8 +550,8 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *)); VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *));
} }
/* Check parent's reclaimed pages not suitable for refund */ /* Check parent's reclaimed pages not suitable for refund */
if (MDBX_PNL_GETSIZE(parent->tw.relist)) if (MDBX_PNL_GETSIZE(parent->tw.repnl))
tASSERT(parent, MDBX_PNL_MOST(parent->tw.relist) + 1 < parent->geo.first_unallocated); tASSERT(parent, MDBX_PNL_MOST(parent->tw.repnl) + 1 < parent->geo.first_unallocated);
} }
#endif /* MDBX_ENABLE_REFUND */ #endif /* MDBX_ENABLE_REFUND */

View File

@ -28,7 +28,7 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored, bool don
const MDBX_env *const env = txn->env; const MDBX_env *const env = txn->env;
size_t pending = 0; size_t pending = 0;
if ((txn->flags & MDBX_TXN_RDONLY) == 0) if ((txn->flags & MDBX_TXN_RDONLY) == 0)
pending = txn->tw.loose_count + MDBX_PNL_GETSIZE(txn->tw.relist) + pending = txn->tw.loose_count + MDBX_PNL_GETSIZE(txn->tw.repnl) +
(MDBX_PNL_GETSIZE(txn->tw.retired_pages) - retired_stored); (MDBX_PNL_GETSIZE(txn->tw.retired_pages) - retired_stored);
cursor_couple_t cx; cursor_couple_t cx;
@ -46,9 +46,9 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored, bool don
return MDBX_CORRUPTED; return MDBX_CORRUPTED;
} }
txnid_t id = unaligned_peek_u64(4, key.iov_base); txnid_t id = unaligned_peek_u64(4, key.iov_base);
if (txn->tw.gc.reclaimed) { if (txn->tw.gc.retxl) {
for (size_t i = 1; i <= MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed); ++i) for (size_t i = 1; i <= MDBX_PNL_GETSIZE(txn->tw.gc.retxl); ++i)
if (id == txn->tw.gc.reclaimed[i]) if (id == txn->tw.gc.retxl[i])
goto skip; goto skip;
} else if (id <= txn->tw.gc.last_reclaimed) } else if (id <= txn->tw.gc.last_reclaimed)
goto skip; goto skip;
@ -93,7 +93,7 @@ __cold static int audit_ex_locked(MDBX_txn *txn, size_t retired_stored, bool don
if ((txn->flags & MDBX_TXN_RDONLY) == 0) if ((txn->flags & MDBX_TXN_RDONLY) == 0)
ERROR("audit @%" PRIaTXN ": %zu(pending) = %zu(loose) + " ERROR("audit @%" PRIaTXN ": %zu(pending) = %zu(loose) + "
"%zu(reclaimed) + %zu(retired-pending) - %zu(retired-stored)", "%zu(reclaimed) + %zu(retired-pending) - %zu(retired-stored)",
txn->txnid, pending, txn->tw.loose_count, MDBX_PNL_GETSIZE(txn->tw.relist), txn->txnid, pending, txn->tw.loose_count, MDBX_PNL_GETSIZE(txn->tw.repnl),
txn->tw.retired_pages ? MDBX_PNL_GETSIZE(txn->tw.retired_pages) : 0, retired_stored); txn->tw.retired_pages ? MDBX_PNL_GETSIZE(txn->tw.retired_pages) : 0, retired_stored);
ERROR("audit @%" PRIaTXN ": %zu(pending) + %zu" ERROR("audit @%" PRIaTXN ": %zu(pending) + %zu"
"(gc) + %zu(count) = %zu(total) <> %zu" "(gc) + %zu(count) = %zu(total) <> %zu"

View File

@ -362,12 +362,12 @@ __cold bool dpl_check(MDBX_txn *txn) {
return false; return false;
} }
const size_t rpa = pnl_search(txn->tw.relist, dp->pgno, txn->geo.first_unallocated); const size_t rpa = pnl_search(txn->tw.repnl, dp->pgno, txn->geo.first_unallocated);
tASSERT(txn, rpa > MDBX_PNL_GETSIZE(txn->tw.relist) || txn->tw.relist[rpa] != dp->pgno); tASSERT(txn, rpa > MDBX_PNL_GETSIZE(txn->tw.repnl) || txn->tw.repnl[rpa] != dp->pgno);
if (rpa <= MDBX_PNL_GETSIZE(txn->tw.relist) && unlikely(txn->tw.relist[rpa] == dp->pgno)) if (rpa <= MDBX_PNL_GETSIZE(txn->tw.repnl) && unlikely(txn->tw.repnl[rpa] == dp->pgno))
return false; return false;
if (num > 1) { if (num > 1) {
const size_t rpb = pnl_search(txn->tw.relist, dp->pgno + num - 1, txn->geo.first_unallocated); const size_t rpb = pnl_search(txn->tw.repnl, dp->pgno + num - 1, txn->geo.first_unallocated);
tASSERT(txn, rpa == rpb); tASSERT(txn, rpa == rpb);
if (unlikely(rpa != rpb)) if (unlikely(rpa != rpb))
return false; return false;

View File

@ -612,10 +612,10 @@ __cold int env_close(MDBX_env *env, bool resurrect_after_fork) {
} }
if (env->basal_txn) { if (env->basal_txn) {
dpl_free(env->basal_txn); dpl_free(env->basal_txn);
txl_free(env->basal_txn->tw.gc.reclaimed); txl_free(env->basal_txn->tw.gc.retxl);
pnl_free(env->basal_txn->tw.retired_pages); pnl_free(env->basal_txn->tw.retired_pages);
pnl_free(env->basal_txn->tw.spilled.list); pnl_free(env->basal_txn->tw.spilled.list);
pnl_free(env->basal_txn->tw.relist); pnl_free(env->basal_txn->tw.repnl);
osal_free(env->basal_txn); osal_free(env->basal_txn);
env->basal_txn = nullptr; env->basal_txn = nullptr;
} }

View File

@ -591,17 +591,17 @@ static inline bool is_gc_usable(MDBX_txn *txn, const MDBX_cursor *mc, const uint
} }
__hot static bool is_already_reclaimed(const MDBX_txn *txn, txnid_t id) { __hot static bool is_already_reclaimed(const MDBX_txn *txn, txnid_t id) {
const size_t len = MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed); const size_t len = MDBX_PNL_GETSIZE(txn->tw.gc.retxl);
for (size_t i = 1; i <= len; ++i) for (size_t i = 1; i <= len; ++i)
if (txn->tw.gc.reclaimed[i] == id) if (txn->tw.gc.retxl[i] == id)
return true; return true;
return false; return false;
} }
__hot static pgno_t relist_get_single(MDBX_txn *txn) { __hot static pgno_t repnl_get_single(MDBX_txn *txn) {
const size_t len = MDBX_PNL_GETSIZE(txn->tw.relist); const size_t len = MDBX_PNL_GETSIZE(txn->tw.repnl);
assert(len > 0); assert(len > 0);
pgno_t *target = MDBX_PNL_EDGE(txn->tw.relist); pgno_t *target = MDBX_PNL_EDGE(txn->tw.repnl);
const ptrdiff_t dir = MDBX_PNL_ASCENDING ? 1 : -1; const ptrdiff_t dir = MDBX_PNL_ASCENDING ? 1 : -1;
/* Есть ТРИ потенциально выигрышные, но противо-направленные тактики: /* Есть ТРИ потенциально выигрышные, но противо-направленные тактики:
@ -610,7 +610,7 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
* диском будет более кучным, а у страниц ближе к концу БД будет больше шансов * диском будет более кучным, а у страниц ближе к концу БД будет больше шансов
* попасть под авто-компактификацию. Частично эта тактика уже реализована, но * попасть под авто-компактификацию. Частично эта тактика уже реализована, но
* для её эффективности требуется явно приоритезировать выделение страниц: * для её эффективности требуется явно приоритезировать выделение страниц:
* - поддерживать два relist, для ближних и для дальних страниц; * - поддерживать два repnl, для ближних и для дальних страниц;
* - использовать страницы из дальнего списка, если первый пуст, * - использовать страницы из дальнего списка, если первый пуст,
* а второй слишком большой, либо при пустой GC. * а второй слишком большой, либо при пустой GC.
* *
@ -634,7 +634,7 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
* сама-по-себе, но и работает во вред (добавляет хаоса). * сама-по-себе, но и работает во вред (добавляет хаоса).
* *
* Поэтому: * Поэтому:
* - в TODO добавляется разделение relist на «ближние» и «дальние» страницы, * - в TODO добавляется разделение repnl на «ближние» и «дальние» страницы,
* с последующей реализацией первой тактики; * с последующей реализацией первой тактики;
* - преимущественное использование последовательностей отправляется * - преимущественное использование последовательностей отправляется
* в MithrilDB как составляющая "HDD frendly" feature; * в MithrilDB как составляющая "HDD frendly" feature;
@ -669,7 +669,7 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
#else #else
/* вырезаем элемент с перемещением хвоста */ /* вырезаем элемент с перемещением хвоста */
const pgno_t pgno = *scan; const pgno_t pgno = *scan;
MDBX_PNL_SETSIZE(txn->tw.relist, len - 1); MDBX_PNL_SETSIZE(txn->tw.repnl, len - 1);
while (++scan <= target) while (++scan <= target)
scan[-1] = *scan; scan[-1] = *scan;
return pgno; return pgno;
@ -682,44 +682,44 @@ __hot static pgno_t relist_get_single(MDBX_txn *txn) {
const pgno_t pgno = *target; const pgno_t pgno = *target;
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
/* вырезаем элемент с перемещением хвоста */ /* вырезаем элемент с перемещением хвоста */
MDBX_PNL_SETSIZE(txn->tw.relist, len - 1); MDBX_PNL_SETSIZE(txn->tw.repnl, len - 1);
for (const pgno_t *const end = txn->tw.relist + len - 1; target <= end; ++target) for (const pgno_t *const end = txn->tw.repnl + len - 1; target <= end; ++target)
*target = target[1]; *target = target[1];
#else #else
/* перемещать хвост не нужно, просто усекам список */ /* перемещать хвост не нужно, просто усекам список */
MDBX_PNL_SETSIZE(txn->tw.relist, len - 1); MDBX_PNL_SETSIZE(txn->tw.repnl, len - 1);
#endif #endif
return pgno; return pgno;
} }
__hot static pgno_t relist_get_sequence(MDBX_txn *txn, const size_t num, uint8_t flags) { __hot static pgno_t repnl_get_sequence(MDBX_txn *txn, const size_t num, uint8_t flags) {
const size_t len = MDBX_PNL_GETSIZE(txn->tw.relist); const size_t len = MDBX_PNL_GETSIZE(txn->tw.repnl);
pgno_t *edge = MDBX_PNL_EDGE(txn->tw.relist); pgno_t *edge = MDBX_PNL_EDGE(txn->tw.repnl);
assert(len >= num && num > 1); assert(len >= num && num > 1);
const size_t seq = num - 1; const size_t seq = num - 1;
#if !MDBX_PNL_ASCENDING #if !MDBX_PNL_ASCENDING
if (edge[-(ptrdiff_t)seq] - *edge == seq) { if (edge[-(ptrdiff_t)seq] - *edge == seq) {
if (unlikely(flags & ALLOC_RESERVE)) if (unlikely(flags & ALLOC_RESERVE))
return P_INVALID; return P_INVALID;
assert(edge == scan4range_checker(txn->tw.relist, seq)); assert(edge == scan4range_checker(txn->tw.repnl, seq));
/* перемещать хвост не нужно, просто усекам список */ /* перемещать хвост не нужно, просто усекам список */
MDBX_PNL_SETSIZE(txn->tw.relist, len - num); MDBX_PNL_SETSIZE(txn->tw.repnl, len - num);
return *edge; return *edge;
} }
#endif #endif
pgno_t *target = scan4seq_impl(edge, len, seq); pgno_t *target = scan4seq_impl(edge, len, seq);
assert(target == scan4range_checker(txn->tw.relist, seq)); assert(target == scan4range_checker(txn->tw.repnl, seq));
if (target) { if (target) {
if (unlikely(flags & ALLOC_RESERVE)) if (unlikely(flags & ALLOC_RESERVE))
return P_INVALID; return P_INVALID;
const pgno_t pgno = *target; const pgno_t pgno = *target;
/* вырезаем найденную последовательность с перемещением хвоста */ /* вырезаем найденную последовательность с перемещением хвоста */
MDBX_PNL_SETSIZE(txn->tw.relist, len - num); MDBX_PNL_SETSIZE(txn->tw.repnl, len - num);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
for (const pgno_t *const end = txn->tw.relist + len - num; target <= end; ++target) for (const pgno_t *const end = txn->tw.repnl + len - num; target <= end; ++target)
*target = target[num]; *target = target[num];
#else #else
for (const pgno_t *const end = txn->tw.relist + len; ++target <= end;) for (const pgno_t *const end = txn->tw.repnl + len; ++target <= end;)
target[-(ptrdiff_t)num] = *target; target[-(ptrdiff_t)num] = *target;
#endif #endif
return pgno; return pgno;
@ -829,7 +829,7 @@ static inline pgr_t page_alloc_finalize(MDBX_env *const env, MDBX_txn *const txn
ret.err = page_dirty(txn, ret.page, (pgno_t)num); ret.err = page_dirty(txn, ret.page, (pgno_t)num);
bailout: bailout:
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
size_t majflt_after; size_t majflt_after;
prof->xtime_cpu += osal_cputime(&majflt_after) - cputime_before; prof->xtime_cpu += osal_cputime(&majflt_after) - cputime_before;
@ -849,7 +849,7 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags)
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
eASSERT(env, num > 0 || (flags & ALLOC_RESERVE)); eASSERT(env, num > 0 || (flags & ALLOC_RESERVE));
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); eASSERT(env, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
size_t newnext; size_t newnext;
const uint64_t monotime_begin = (MDBX_ENABLE_PROFGC || (num > 1 && env->options.gc_time_limit)) ? osal_monotime() : 0; const uint64_t monotime_begin = (MDBX_ENABLE_PROFGC || (num > 1 && env->options.gc_time_limit)) ? osal_monotime() : 0;
@ -864,15 +864,15 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags)
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
prof->xpages += 1; prof->xpages += 1;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) { if (MDBX_PNL_GETSIZE(txn->tw.repnl) >= num) {
eASSERT(env, MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated && eASSERT(env, MDBX_PNL_LAST(txn->tw.repnl) < txn->geo.first_unallocated &&
MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated); MDBX_PNL_FIRST(txn->tw.repnl) < txn->geo.first_unallocated);
pgno = relist_get_sequence(txn, num, flags); pgno = repnl_get_sequence(txn, num, flags);
if (likely(pgno)) if (likely(pgno))
goto done; goto done;
} }
} else { } else {
eASSERT(env, num == 0 || MDBX_PNL_GETSIZE(txn->tw.relist) == 0); eASSERT(env, num == 0 || MDBX_PNL_GETSIZE(txn->tw.repnl) == 0);
eASSERT(env, !(flags & ALLOC_RESERVE) || num == 0); eASSERT(env, !(flags & ALLOC_RESERVE) || num == 0);
} }
@ -890,7 +890,7 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags)
* Иначе попытка увеличить резерв может приводить к необходимости ещё * Иначе попытка увеличить резерв может приводить к необходимости ещё
* большего резерва из-за увеличения списка переработанных страниц. */ * большего резерва из-за увеличения списка переработанных страниц. */
(flags & ALLOC_RESERVE) == 0) { (flags & ALLOC_RESERVE) == 0) {
if (txn->dbs[FREE_DBI].branch_pages && MDBX_PNL_GETSIZE(txn->tw.relist) < env->maxgc_large1page / 2) if (txn->dbs[FREE_DBI].branch_pages && MDBX_PNL_GETSIZE(txn->tw.repnl) < env->maxgc_large1page / 2)
flags += ALLOC_COALESCE; flags += ALLOC_COALESCE;
} }
@ -930,9 +930,9 @@ retry_gc_have_oldest:
txnid_t id = 0; txnid_t id = 0;
MDBX_cursor_op op = MDBX_FIRST; MDBX_cursor_op op = MDBX_FIRST;
if (flags & ALLOC_LIFO) { if (flags & ALLOC_LIFO) {
if (!txn->tw.gc.reclaimed) { if (!txn->tw.gc.retxl) {
txn->tw.gc.reclaimed = txl_alloc(); txn->tw.gc.retxl = txl_alloc();
if (unlikely(!txn->tw.gc.reclaimed)) { if (unlikely(!txn->tw.gc.retxl)) {
ret.err = MDBX_ENOMEM; ret.err = MDBX_ENOMEM;
goto fail; goto fail;
} }
@ -1000,9 +1000,9 @@ next_gc:;
} }
const size_t gc_len = MDBX_PNL_GETSIZE(gc_pnl); const size_t gc_len = MDBX_PNL_GETSIZE(gc_pnl);
TRACE("gc-read: id #%" PRIaTXN " len %zu, re-list will %zu ", id, gc_len, gc_len + MDBX_PNL_GETSIZE(txn->tw.relist)); TRACE("gc-read: id #%" PRIaTXN " len %zu, re-list will %zu ", id, gc_len, gc_len + MDBX_PNL_GETSIZE(txn->tw.repnl));
if (unlikely(gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) >= env->maxgc_large1page)) { if (unlikely(gc_len + MDBX_PNL_GETSIZE(txn->tw.repnl) >= env->maxgc_large1page)) {
/* Don't try to coalesce too much. */ /* Don't try to coalesce too much. */
if (flags & ALLOC_SHOULD_SCAN) { if (flags & ALLOC_SHOULD_SCAN) {
eASSERT(env, flags & ALLOC_COALESCE); eASSERT(env, flags & ALLOC_COALESCE);
@ -1012,32 +1012,32 @@ next_gc:;
env->lck->pgops.gc_prof.coalescences += 1; env->lck->pgops.gc_prof.coalescences += 1;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
TRACE("clear %s %s", "ALLOC_COALESCE", "since got threshold"); TRACE("clear %s %s", "ALLOC_COALESCE", "since got threshold");
if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) { if (MDBX_PNL_GETSIZE(txn->tw.repnl) >= num) {
eASSERT(env, MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated && eASSERT(env, MDBX_PNL_LAST(txn->tw.repnl) < txn->geo.first_unallocated &&
MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated); MDBX_PNL_FIRST(txn->tw.repnl) < txn->geo.first_unallocated);
if (likely(num == 1)) { if (likely(num == 1)) {
pgno = relist_get_single(txn); pgno = repnl_get_single(txn);
goto done; goto done;
} }
pgno = relist_get_sequence(txn, num, flags); pgno = repnl_get_sequence(txn, num, flags);
if (likely(pgno)) if (likely(pgno))
goto done; goto done;
} }
flags -= ALLOC_COALESCE | ALLOC_SHOULD_SCAN; flags -= ALLOC_COALESCE | ALLOC_SHOULD_SCAN;
} }
if (unlikely(/* list is too long already */ MDBX_PNL_GETSIZE(txn->tw.relist) >= env->options.rp_augment_limit) && if (unlikely(/* list is too long already */ MDBX_PNL_GETSIZE(txn->tw.repnl) >= env->options.rp_augment_limit) &&
((/* not a slot-request from gc-update */ num && ((/* not a slot-request from gc-update */ num &&
/* have enough unallocated space */ txn->geo.upper >= txn->geo.first_unallocated + num && /* have enough unallocated space */ txn->geo.upper >= txn->geo.first_unallocated + num &&
monotime_since_cached(monotime_begin, &now_cache) + txn->tw.gc.time_acc >= env->options.gc_time_limit) || monotime_since_cached(monotime_begin, &now_cache) + txn->tw.gc.time_acc >= env->options.gc_time_limit) ||
gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) >= PAGELIST_LIMIT)) { gc_len + MDBX_PNL_GETSIZE(txn->tw.repnl) >= PAGELIST_LIMIT)) {
/* Stop reclaiming to avoid large/overflow the page list. This is a rare /* Stop reclaiming to avoid large/overflow the page list. This is a rare
* case while search for a continuously multi-page region in a * case while search for a continuously multi-page region in a
* large database, see https://libmdbx.dqdkfa.ru/dead-github/issues/123 */ * large database, see https://libmdbx.dqdkfa.ru/dead-github/issues/123 */
NOTICE("stop reclaiming %s: %zu (current) + %zu " NOTICE("stop reclaiming %s: %zu (current) + %zu "
"(chunk) -> %zu, rp_augment_limit %u", "(chunk) -> %zu, rp_augment_limit %u",
likely(gc_len + MDBX_PNL_GETSIZE(txn->tw.relist) < PAGELIST_LIMIT) ? "since rp_augment_limit was reached" likely(gc_len + MDBX_PNL_GETSIZE(txn->tw.repnl) < PAGELIST_LIMIT) ? "since rp_augment_limit was reached"
: "to avoid PNL overflow", : "to avoid PNL overflow",
MDBX_PNL_GETSIZE(txn->tw.relist), gc_len, gc_len + MDBX_PNL_GETSIZE(txn->tw.relist), MDBX_PNL_GETSIZE(txn->tw.repnl), gc_len, gc_len + MDBX_PNL_GETSIZE(txn->tw.repnl),
env->options.rp_augment_limit); env->options.rp_augment_limit);
goto depleted_gc; goto depleted_gc;
} }
@ -1046,13 +1046,13 @@ next_gc:;
/* Remember ID of readed GC record */ /* Remember ID of readed GC record */
txn->tw.gc.last_reclaimed = id; txn->tw.gc.last_reclaimed = id;
if (flags & ALLOC_LIFO) { if (flags & ALLOC_LIFO) {
ret.err = txl_append(&txn->tw.gc.reclaimed, id); ret.err = txl_append(&txn->tw.gc.retxl, id);
if (unlikely(ret.err != MDBX_SUCCESS)) if (unlikely(ret.err != MDBX_SUCCESS))
goto fail; goto fail;
} }
/* Append PNL from GC record to tw.relist */ /* Append PNL from GC record to tw.repnl */
ret.err = pnl_need(&txn->tw.relist, gc_len); ret.err = pnl_need(&txn->tw.repnl, gc_len);
if (unlikely(ret.err != MDBX_SUCCESS)) if (unlikely(ret.err != MDBX_SUCCESS))
goto fail; goto fail;
@ -1067,36 +1067,36 @@ next_gc:;
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
const uint64_t merge_begin = osal_monotime(); const uint64_t merge_begin = osal_monotime();
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
pnl_merge(txn->tw.relist, gc_pnl); pnl_merge(txn->tw.repnl, gc_pnl);
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
prof->pnl_merge.calls += 1; prof->pnl_merge.calls += 1;
prof->pnl_merge.volume += MDBX_PNL_GETSIZE(txn->tw.relist); prof->pnl_merge.volume += MDBX_PNL_GETSIZE(txn->tw.repnl);
prof->pnl_merge.time += osal_monotime() - merge_begin; prof->pnl_merge.time += osal_monotime() - merge_begin;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */
flags |= ALLOC_SHOULD_SCAN; flags |= ALLOC_SHOULD_SCAN;
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
if (unlikely(!pnl_check(txn->tw.relist, txn->geo.first_unallocated))) { if (unlikely(!pnl_check(txn->tw.repnl, txn->geo.first_unallocated))) {
ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid txn retired-list"); ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid txn retired-list");
ret.err = MDBX_CORRUPTED; ret.err = MDBX_CORRUPTED;
goto fail; goto fail;
} }
} else { } else {
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated)); eASSERT(env, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated));
} }
eASSERT(env, dpl_check(txn)); eASSERT(env, dpl_check(txn));
eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.relist) == 0 || MDBX_PNL_MOST(txn->tw.relist) < txn->geo.first_unallocated); eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.repnl) == 0 || MDBX_PNL_MOST(txn->tw.repnl) < txn->geo.first_unallocated);
if (MDBX_ENABLE_REFUND && MDBX_PNL_GETSIZE(txn->tw.relist) && if (MDBX_ENABLE_REFUND && MDBX_PNL_GETSIZE(txn->tw.repnl) &&
unlikely(MDBX_PNL_MOST(txn->tw.relist) == txn->geo.first_unallocated - 1)) { unlikely(MDBX_PNL_MOST(txn->tw.repnl) == txn->geo.first_unallocated - 1)) {
/* Refund suitable pages into "unallocated" space */ /* Refund suitable pages into "unallocated" space */
txn_refund(txn); txn_refund(txn);
} }
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); eASSERT(env, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
/* Done for a kick-reclaim mode, actually no page needed */ /* Done for a kick-reclaim mode, actually no page needed */
if (unlikely(num == 0)) { if (unlikely(num == 0)) {
eASSERT(env, ret.err == MDBX_SUCCESS); eASSERT(env, ret.err == MDBX_SUCCESS);
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "early-exit for slot", id, MDBX_PNL_GETSIZE(txn->tw.relist)); TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "early-exit for slot", id, MDBX_PNL_GETSIZE(txn->tw.repnl));
goto early_exit; goto early_exit;
} }
@ -1104,33 +1104,33 @@ next_gc:;
eASSERT(env, op == MDBX_PREV || op == MDBX_NEXT); eASSERT(env, op == MDBX_PREV || op == MDBX_NEXT);
if (flags & ALLOC_COALESCE) { if (flags & ALLOC_COALESCE) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "coalesce-continue", id, MDBX_PNL_GETSIZE(txn->tw.relist)); TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "coalesce-continue", id, MDBX_PNL_GETSIZE(txn->tw.repnl));
goto next_gc; goto next_gc;
} }
scan: scan:
eASSERT(env, flags & ALLOC_SHOULD_SCAN); eASSERT(env, flags & ALLOC_SHOULD_SCAN);
eASSERT(env, num > 0); eASSERT(env, num > 0);
if (MDBX_PNL_GETSIZE(txn->tw.relist) >= num) { if (MDBX_PNL_GETSIZE(txn->tw.repnl) >= num) {
eASSERT(env, MDBX_PNL_LAST(txn->tw.relist) < txn->geo.first_unallocated && eASSERT(env, MDBX_PNL_LAST(txn->tw.repnl) < txn->geo.first_unallocated &&
MDBX_PNL_FIRST(txn->tw.relist) < txn->geo.first_unallocated); MDBX_PNL_FIRST(txn->tw.repnl) < txn->geo.first_unallocated);
if (likely(num == 1)) { if (likely(num == 1)) {
eASSERT(env, !(flags & ALLOC_RESERVE)); eASSERT(env, !(flags & ALLOC_RESERVE));
pgno = relist_get_single(txn); pgno = repnl_get_single(txn);
goto done; goto done;
} }
pgno = relist_get_sequence(txn, num, flags); pgno = repnl_get_sequence(txn, num, flags);
if (likely(pgno)) if (likely(pgno))
goto done; goto done;
} }
flags -= ALLOC_SHOULD_SCAN; flags -= ALLOC_SHOULD_SCAN;
if (ret.err == MDBX_SUCCESS) { if (ret.err == MDBX_SUCCESS) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "continue-search", id, MDBX_PNL_GETSIZE(txn->tw.relist)); TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "continue-search", id, MDBX_PNL_GETSIZE(txn->tw.repnl));
goto next_gc; goto next_gc;
} }
depleted_gc: depleted_gc:
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "gc-depleted", id, MDBX_PNL_GETSIZE(txn->tw.relist)); TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "gc-depleted", id, MDBX_PNL_GETSIZE(txn->tw.repnl));
ret.err = MDBX_NOTFOUND; ret.err = MDBX_NOTFOUND;
if (flags & ALLOC_SHOULD_SCAN) if (flags & ALLOC_SHOULD_SCAN)
goto scan; goto scan;
@ -1269,7 +1269,7 @@ done:
if (likely((flags & ALLOC_RESERVE) == 0)) { if (likely((flags & ALLOC_RESERVE) == 0)) {
if (pgno) { if (pgno) {
eASSERT(env, pgno + num <= txn->geo.first_unallocated && pgno >= NUM_METAS); eASSERT(env, pgno + num <= txn->geo.first_unallocated && pgno >= NUM_METAS);
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); eASSERT(env, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
} else { } else {
pgno = txn->geo.first_unallocated; pgno = txn->geo.first_unallocated;
txn->geo.first_unallocated += (pgno_t)num; txn->geo.first_unallocated += (pgno_t)num;
@ -1281,7 +1281,7 @@ done:
if (unlikely(ret.err != MDBX_SUCCESS)) { if (unlikely(ret.err != MDBX_SUCCESS)) {
fail: fail:
eASSERT(env, ret.err != MDBX_SUCCESS); eASSERT(env, ret.err != MDBX_SUCCESS);
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); eASSERT(env, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
int level; int level;
const char *what; const char *what;
if (flags & ALLOC_RESERVE) { if (flags & ALLOC_RESERVE) {
@ -1297,7 +1297,7 @@ done:
"unable alloc %zu %s, alloc-flags 0x%x, err %d, txn-flags " "unable alloc %zu %s, alloc-flags 0x%x, err %d, txn-flags "
"0x%x, re-list-len %zu, loose-count %zu, gc: height %u, " "0x%x, re-list-len %zu, loose-count %zu, gc: height %u, "
"branch %zu, leaf %zu, large %zu, entries %zu\n", "branch %zu, leaf %zu, large %zu, entries %zu\n",
num, what, flags, ret.err, txn->flags, MDBX_PNL_GETSIZE(txn->tw.relist), txn->tw.loose_count, num, what, flags, ret.err, txn->flags, MDBX_PNL_GETSIZE(txn->tw.repnl), txn->tw.loose_count,
txn->dbs[FREE_DBI].height, (size_t)txn->dbs[FREE_DBI].branch_pages, txn->dbs[FREE_DBI].height, (size_t)txn->dbs[FREE_DBI].branch_pages,
(size_t)txn->dbs[FREE_DBI].leaf_pages, (size_t)txn->dbs[FREE_DBI].large_pages, (size_t)txn->dbs[FREE_DBI].leaf_pages, (size_t)txn->dbs[FREE_DBI].large_pages,
(size_t)txn->dbs[FREE_DBI].items); (size_t)txn->dbs[FREE_DBI].items);
@ -1346,8 +1346,8 @@ __hot pgr_t gc_alloc_single(const MDBX_cursor *const mc) {
return ret; return ret;
} }
if (likely(MDBX_PNL_GETSIZE(txn->tw.relist) > 0)) if (likely(MDBX_PNL_GETSIZE(txn->tw.repnl) > 0))
return page_alloc_finalize(txn->env, txn, mc, relist_get_single(txn), 1); return page_alloc_finalize(txn->env, txn, mc, repnl_get_single(txn), 1);
return gc_alloc_ex(mc, 1, ALLOC_DEFAULT); return gc_alloc_ex(mc, 1, ALLOC_DEFAULT);
} }

View File

@ -11,7 +11,7 @@ MDBX_MAYBE_UNUSED static inline const char *dbg_prefix(const gcu_t *ctx) {
return is_lifo(ctx->cursor.txn) ? " lifo" : " fifo"; return is_lifo(ctx->cursor.txn) ? " lifo" : " fifo";
} }
static inline size_t backlog_size(MDBX_txn *txn) { return MDBX_PNL_GETSIZE(txn->tw.relist) + txn->tw.loose_count; } static inline size_t backlog_size(MDBX_txn *txn) { return MDBX_PNL_GETSIZE(txn->tw.repnl) + txn->tw.loose_count; }
static int clean_stored_retired(MDBX_txn *txn, gcu_t *ctx) { static int clean_stored_retired(MDBX_txn *txn, gcu_t *ctx) {
int err = MDBX_SUCCESS; int err = MDBX_SUCCESS;
@ -72,35 +72,35 @@ static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) {
tASSERT(txn, is_pointed(&ctx->cursor) || txn->dbs[FREE_DBI].leaf_pages == 0); tASSERT(txn, is_pointed(&ctx->cursor) || txn->dbs[FREE_DBI].leaf_pages == 0);
const intptr_t retired_left = MDBX_PNL_SIZEOF(txn->tw.retired_pages) - ctx->retired_stored; const intptr_t retired_left = MDBX_PNL_SIZEOF(txn->tw.retired_pages) - ctx->retired_stored;
size_t for_relist = 0; size_t for_repnl = 0;
if (MDBX_ENABLE_BIGFOOT && retired_left > 0) { if (MDBX_ENABLE_BIGFOOT && retired_left > 0) {
for_relist = (retired_left + txn->env->maxgc_large1page - 1) / txn->env->maxgc_large1page; for_repnl = (retired_left + txn->env->maxgc_large1page - 1) / txn->env->maxgc_large1page;
const size_t per_branch_page = txn->env->maxgc_per_branch; const size_t per_branch_page = txn->env->maxgc_per_branch;
for (size_t entries = for_relist; entries > 1; for_split += entries) for (size_t entries = for_repnl; entries > 1; for_split += entries)
entries = (entries + per_branch_page - 1) / per_branch_page; entries = (entries + per_branch_page - 1) / per_branch_page;
} else if (!MDBX_ENABLE_BIGFOOT && retired_left != 0) { } else if (!MDBX_ENABLE_BIGFOOT && retired_left != 0) {
for_relist = largechunk_npages(txn->env, MDBX_PNL_SIZEOF(txn->tw.retired_pages)); for_repnl = largechunk_npages(txn->env, MDBX_PNL_SIZEOF(txn->tw.retired_pages));
} }
const size_t for_tree_before_touch = for_cow + for_rebalance + for_split; const size_t for_tree_before_touch = for_cow + for_rebalance + for_split;
const size_t for_tree_after_touch = for_rebalance + for_split; const size_t for_tree_after_touch = for_rebalance + for_split;
const size_t for_all_before_touch = for_relist + for_tree_before_touch; const size_t for_all_before_touch = for_repnl + for_tree_before_touch;
const size_t for_all_after_touch = for_relist + for_tree_after_touch; const size_t for_all_after_touch = for_repnl + for_tree_after_touch;
if (likely(for_relist < 2 && backlog_size(txn) > for_all_before_touch) && if (likely(for_repnl < 2 && backlog_size(txn) > for_all_before_touch) &&
(ctx->cursor.top < 0 || is_modifable(txn, ctx->cursor.pg[ctx->cursor.top]))) (ctx->cursor.top < 0 || is_modifable(txn, ctx->cursor.pg[ctx->cursor.top])))
return MDBX_SUCCESS; return MDBX_SUCCESS;
TRACE(">> retired-stored %zu, left %zi, backlog %zu, need %zu (4list %zu, " TRACE(">> retired-stored %zu, left %zi, backlog %zu, need %zu (4list %zu, "
"4split %zu, " "4split %zu, "
"4cow %zu, 4tree %zu)", "4cow %zu, 4tree %zu)",
ctx->retired_stored, retired_left, backlog_size(txn), for_all_before_touch, for_relist, for_split, for_cow, ctx->retired_stored, retired_left, backlog_size(txn), for_all_before_touch, for_repnl, for_split, for_cow,
for_tree_before_touch); for_tree_before_touch);
int err = touch_gc(ctx); int err = touch_gc(ctx);
TRACE("== after-touch, backlog %zu, err %d", backlog_size(txn), err); TRACE("== after-touch, backlog %zu, err %d", backlog_size(txn), err);
if (!MDBX_ENABLE_BIGFOOT && unlikely(for_relist > 1) && if (!MDBX_ENABLE_BIGFOOT && unlikely(for_repnl > 1) &&
MDBX_PNL_GETSIZE(txn->tw.retired_pages) != ctx->retired_stored && err == MDBX_SUCCESS) { MDBX_PNL_GETSIZE(txn->tw.retired_pages) != ctx->retired_stored && err == MDBX_SUCCESS) {
if (unlikely(ctx->retired_stored)) { if (unlikely(ctx->retired_stored)) {
err = clean_stored_retired(txn, ctx); err = clean_stored_retired(txn, ctx);
@ -109,9 +109,9 @@ static int prepare_backlog(MDBX_txn *txn, gcu_t *ctx) {
if (!ctx->retired_stored) if (!ctx->retired_stored)
return /* restart by tail-recursion */ prepare_backlog(txn, ctx); return /* restart by tail-recursion */ prepare_backlog(txn, ctx);
} }
err = gc_alloc_ex(&ctx->cursor, for_relist, ALLOC_RESERVE).err; err = gc_alloc_ex(&ctx->cursor, for_repnl, ALLOC_RESERVE).err;
TRACE("== after-4linear, backlog %zu, err %d", backlog_size(txn), err); TRACE("== after-4linear, backlog %zu, err %d", backlog_size(txn), err);
cASSERT(&ctx->cursor, backlog_size(txn) >= for_relist || err != MDBX_SUCCESS); cASSERT(&ctx->cursor, backlog_size(txn) >= for_repnl || err != MDBX_SUCCESS);
} }
while (backlog_size(txn) < for_all_after_touch && err == MDBX_SUCCESS) while (backlog_size(txn) < for_all_after_touch && err == MDBX_SUCCESS)
@ -146,10 +146,10 @@ static inline void zeroize_reserved(const MDBX_env *env, MDBX_val pnl) {
static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) { static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
tASSERT(txn, txn->tw.loose_count > 0); tASSERT(txn, txn->tw.loose_count > 0);
/* Return loose page numbers to tw.relist, /* Return loose page numbers to tw.repnl,
* though usually none are left at this point. * though usually none are left at this point.
* The pages themselves remain in dirtylist. */ * The pages themselves remain in dirtylist. */
if (unlikely(!txn->tw.gc.reclaimed && txn->tw.gc.last_reclaimed < 1)) { if (unlikely(!txn->tw.gc.retxl && txn->tw.gc.last_reclaimed < 1)) {
TRACE("%s: try allocate gc-slot for %zu loose-pages", dbg_prefix(ctx), txn->tw.loose_count); TRACE("%s: try allocate gc-slot for %zu loose-pages", dbg_prefix(ctx), txn->tw.loose_count);
int err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err; int err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err;
if (err == MDBX_SUCCESS) { if (err == MDBX_SUCCESS) {
@ -158,7 +158,7 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
} }
/* Put loose page numbers in tw.retired_pages, /* Put loose page numbers in tw.retired_pages,
* since unable to return ones to tw.relist. */ * since unable to return ones to tw.repnl. */
err = pnl_need(&txn->tw.retired_pages, txn->tw.loose_count); err = pnl_need(&txn->tw.retired_pages, txn->tw.loose_count);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
@ -170,10 +170,10 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
TRACE("%s: append %zu loose-pages to retired-pages", dbg_prefix(ctx), txn->tw.loose_count); TRACE("%s: append %zu loose-pages to retired-pages", dbg_prefix(ctx), txn->tw.loose_count);
} else { } else {
/* Room for loose pages + temp PNL with same */ /* Room for loose pages + temp PNL with same */
int err = pnl_need(&txn->tw.relist, 2 * txn->tw.loose_count + 2); int err = pnl_need(&txn->tw.repnl, 2 * txn->tw.loose_count + 2);
if (unlikely(err != MDBX_SUCCESS)) if (unlikely(err != MDBX_SUCCESS))
return err; return err;
pnl_t loose = txn->tw.relist + MDBX_PNL_ALLOCLEN(txn->tw.relist) - txn->tw.loose_count - 1; pnl_t loose = txn->tw.repnl + MDBX_PNL_ALLOCLEN(txn->tw.repnl) - txn->tw.loose_count - 1;
size_t count = 0; size_t count = 0;
for (page_t *lp = txn->tw.loose_pages; lp; lp = page_next(lp)) { for (page_t *lp = txn->tw.loose_pages; lp; lp = page_next(lp)) {
tASSERT(txn, lp->flags == P_LOOSE); tASSERT(txn, lp->flags == P_LOOSE);
@ -184,7 +184,7 @@ static int gcu_loose(MDBX_txn *txn, gcu_t *ctx) {
tASSERT(txn, count == txn->tw.loose_count); tASSERT(txn, count == txn->tw.loose_count);
MDBX_PNL_SETSIZE(loose, count); MDBX_PNL_SETSIZE(loose, count);
pnl_sort(loose, txn->geo.first_unallocated); pnl_sort(loose, txn->geo.first_unallocated);
pnl_merge(txn->tw.relist, loose); pnl_merge(txn->tw.repnl, loose);
TRACE("%s: append %zu loose-pages to reclaimed-pages", dbg_prefix(ctx), txn->tw.loose_count); TRACE("%s: append %zu loose-pages to reclaimed-pages", dbg_prefix(ctx), txn->tw.loose_count);
} }
@ -359,16 +359,15 @@ typedef struct gcu_rid_result {
static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left) { static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left) {
rid_t r; rid_t r;
if (is_lifo(txn)) { if (is_lifo(txn)) {
if (txn->tw.gc.reclaimed == nullptr) { if (txn->tw.gc.retxl == nullptr) {
txn->tw.gc.reclaimed = txl_alloc(); txn->tw.gc.retxl = txl_alloc();
if (unlikely(!txn->tw.gc.reclaimed)) { if (unlikely(!txn->tw.gc.retxl)) {
r.err = MDBX_ENOMEM; r.err = MDBX_ENOMEM;
goto return_error; goto return_error;
} }
} }
if (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max && if (MDBX_PNL_GETSIZE(txn->tw.gc.retxl) < txl_max &&
left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * txn->env->maxgc_large1page && left > (MDBX_PNL_GETSIZE(txn->tw.gc.retxl) - ctx->reused_slot) * txn->env->maxgc_large1page && !ctx->dense) {
!ctx->dense) {
/* Hужен свободный для для сохранения списка страниц. */ /* Hужен свободный для для сохранения списка страниц. */
bool need_cleanup = false; bool need_cleanup = false;
txnid_t snap_oldest = 0; txnid_t snap_oldest = 0;
@ -377,11 +376,11 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left)
r.err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err; r.err = gc_alloc_ex(&ctx->cursor, 0, ALLOC_RESERVE).err;
snap_oldest = txn->env->lck->cached_oldest.weak; snap_oldest = txn->env->lck->cached_oldest.weak;
if (likely(r.err == MDBX_SUCCESS)) { if (likely(r.err == MDBX_SUCCESS)) {
TRACE("%s: took @%" PRIaTXN " from GC", dbg_prefix(ctx), MDBX_PNL_LAST(txn->tw.gc.reclaimed)); TRACE("%s: took @%" PRIaTXN " from GC", dbg_prefix(ctx), MDBX_PNL_LAST(txn->tw.gc.retxl));
need_cleanup = true; need_cleanup = true;
} }
} while (r.err == MDBX_SUCCESS && MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max && } while (r.err == MDBX_SUCCESS && MDBX_PNL_GETSIZE(txn->tw.gc.retxl) < txl_max &&
left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * txn->env->maxgc_large1page); left > (MDBX_PNL_GETSIZE(txn->tw.gc.retxl) - ctx->reused_slot) * txn->env->maxgc_large1page);
if (likely(r.err == MDBX_SUCCESS)) { if (likely(r.err == MDBX_SUCCESS)) {
TRACE("%s: got enough from GC.", dbg_prefix(ctx)); TRACE("%s: got enough from GC.", dbg_prefix(ctx));
@ -390,12 +389,12 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left)
/* LY: some troubles... */ /* LY: some troubles... */
goto return_error; goto return_error;
if (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)) { if (MDBX_PNL_GETSIZE(txn->tw.gc.retxl)) {
if (need_cleanup) { if (need_cleanup) {
txl_sort(txn->tw.gc.reclaimed); txl_sort(txn->tw.gc.retxl);
ctx->cleaned_slot = 0; ctx->cleaned_slot = 0;
} }
ctx->rid = MDBX_PNL_LAST(txn->tw.gc.reclaimed); ctx->rid = MDBX_PNL_LAST(txn->tw.gc.retxl);
} else { } else {
tASSERT(txn, txn->tw.gc.last_reclaimed == 0); tASSERT(txn, txn->tw.gc.last_reclaimed == 0);
if (unlikely(txn_snapshot_oldest(txn) != snap_oldest)) if (unlikely(txn_snapshot_oldest(txn) != snap_oldest))
@ -410,14 +409,14 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left)
/* В GC нет годных к переработке записей, /* В GC нет годных к переработке записей,
* будем использовать свободные id в обратном порядке. */ * будем использовать свободные id в обратном порядке. */
while (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) < txl_max && while (MDBX_PNL_GETSIZE(txn->tw.gc.retxl) < txl_max &&
left > (MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot) * txn->env->maxgc_large1page) { left > (MDBX_PNL_GETSIZE(txn->tw.gc.retxl) - ctx->reused_slot) * txn->env->maxgc_large1page) {
if (unlikely(ctx->rid <= MIN_TXNID)) { if (unlikely(ctx->rid <= MIN_TXNID)) {
ctx->dense = true; ctx->dense = true;
if (unlikely(MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) <= ctx->reused_slot)) { if (unlikely(MDBX_PNL_GETSIZE(txn->tw.gc.retxl) <= ctx->reused_slot)) {
VERBOSE("** restart: reserve depleted (reused_gc_slot %zu >= " VERBOSE("** restart: reserve depleted (reused_gc_slot %zu >= "
"gc.reclaimed %zu)", "gc.reclaimed %zu)",
ctx->reused_slot, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)); ctx->reused_slot, MDBX_PNL_GETSIZE(txn->tw.gc.retxl));
goto return_restart; goto return_restart;
} }
break; break;
@ -445,7 +444,7 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left)
} }
tASSERT(txn, !ctx->dense); tASSERT(txn, !ctx->dense);
r.err = txl_append(&txn->tw.gc.reclaimed, ctx->rid); r.err = txl_append(&txn->tw.gc.retxl, ctx->rid);
if (unlikely(r.err != MDBX_SUCCESS)) if (unlikely(r.err != MDBX_SUCCESS))
goto return_error; goto return_error;
@ -469,12 +468,12 @@ static rid_t get_rid_for_reclaimed(MDBX_txn *txn, gcu_t *ctx, const size_t left)
} }
} }
const size_t i = MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot; const size_t i = MDBX_PNL_GETSIZE(txn->tw.gc.retxl) - ctx->reused_slot;
tASSERT(txn, i > 0 && i <= MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)); tASSERT(txn, i > 0 && i <= MDBX_PNL_GETSIZE(txn->tw.gc.retxl));
r.rid = txn->tw.gc.reclaimed[i]; r.rid = txn->tw.gc.retxl[i];
TRACE("%s: take @%" PRIaTXN " from lifo-reclaimed[%zu]", dbg_prefix(ctx), r.rid, i); TRACE("%s: take @%" PRIaTXN " from lifo-reclaimed[%zu]", dbg_prefix(ctx), r.rid, i);
} else { } else {
tASSERT(txn, txn->tw.gc.reclaimed == nullptr); tASSERT(txn, txn->tw.gc.retxl == nullptr);
if (unlikely(ctx->rid == 0)) { if (unlikely(ctx->rid == 0)) {
ctx->rid = txn_snapshot_oldest(txn); ctx->rid = txn_snapshot_oldest(txn);
MDBX_val key; MDBX_val key;
@ -523,9 +522,9 @@ return_error:
return r; return r;
} }
/* Cleanups reclaimed GC (aka freeDB) records, saves the retired-list (aka /* Cleanups retxl GC (aka freeDB) records, saves the retired-list (aka
* freelist) of current transaction to GC, puts back into GC leftover of the * freelist) of current transaction to GC, puts back into GC leftover of the
* reclaimed pages with chunking. This recursive changes the reclaimed-list, * retxl pages with chunking. This recursive changes the retxl-list,
* loose-list and retired-list. Keep trying until it stabilizes. * loose-list and retired-list. Keep trying until it stabilizes.
* *
* NOTE: This code is a consequence of many iterations of adding crutches (aka * NOTE: This code is a consequence of many iterations of adding crutches (aka
@ -539,7 +538,7 @@ int gc_update(MDBX_txn *txn, gcu_t *ctx) {
txn->cursors[FREE_DBI] = &ctx->cursor; txn->cursors[FREE_DBI] = &ctx->cursor;
int rc; int rc;
/* txn->tw.relist[] can grow and shrink during this call. /* txn->tw.repnl[] can grow and shrink during this call.
* txn->tw.gc.last_reclaimed and txn->tw.retired_pages[] can only grow. * txn->tw.gc.last_reclaimed and txn->tw.retired_pages[] can only grow.
* But page numbers cannot disappear from txn->tw.retired_pages[]. */ * But page numbers cannot disappear from txn->tw.retired_pages[]. */
retry_clean_adj: retry_clean_adj:
@ -548,7 +547,7 @@ retry:
ctx->loop += !(ctx->prev_first_unallocated > txn->geo.first_unallocated); ctx->loop += !(ctx->prev_first_unallocated > txn->geo.first_unallocated);
TRACE(">> restart, loop %u", ctx->loop); TRACE(">> restart, loop %u", ctx->loop);
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
if (unlikely(/* paranoia */ ctx->loop > ((MDBX_DEBUG > 0) ? 12 : 42))) { if (unlikely(/* paranoia */ ctx->loop > ((MDBX_DEBUG > 0) ? 12 : 42))) {
ERROR("txn #%" PRIaTXN " too more loops %u, bailout", txn->txnid, ctx->loop); ERROR("txn #%" PRIaTXN " too more loops %u, bailout", txn->txnid, ctx->loop);
@ -575,17 +574,17 @@ retry:
/* Come back here after each Put() in case retired-list changed */ /* Come back here after each Put() in case retired-list changed */
TRACE("%s", " >> continue"); TRACE("%s", " >> continue");
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
MDBX_val key, data; MDBX_val key, data;
if (is_lifo(txn)) { if (is_lifo(txn)) {
if (ctx->cleaned_slot < (txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0)) { if (ctx->cleaned_slot < (txn->tw.gc.retxl ? MDBX_PNL_GETSIZE(txn->tw.gc.retxl) : 0)) {
ctx->reserved = 0; ctx->reserved = 0;
ctx->cleaned_slot = 0; ctx->cleaned_slot = 0;
ctx->reused_slot = 0; ctx->reused_slot = 0;
ctx->fill_idx = ~0u; ctx->fill_idx = ~0u;
/* LY: cleanup reclaimed records. */ /* LY: cleanup reclaimed records. */
do { do {
ctx->cleaned_id = txn->tw.gc.reclaimed[++ctx->cleaned_slot]; ctx->cleaned_id = txn->tw.gc.retxl[++ctx->cleaned_slot];
tASSERT(txn, ctx->cleaned_slot > 0 && ctx->cleaned_id <= env->lck->cached_oldest.weak); tASSERT(txn, ctx->cleaned_slot > 0 && ctx->cleaned_id <= env->lck->cached_oldest.weak);
key.iov_base = &ctx->cleaned_id; key.iov_base = &ctx->cleaned_id;
key.iov_len = sizeof(ctx->cleaned_id); key.iov_len = sizeof(ctx->cleaned_id);
@ -603,8 +602,8 @@ retry:
rc = cursor_del(&ctx->cursor, 0); rc = cursor_del(&ctx->cursor, 0);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
} while (ctx->cleaned_slot < MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)); } while (ctx->cleaned_slot < MDBX_PNL_GETSIZE(txn->tw.gc.retxl));
txl_sort(txn->tw.gc.reclaimed); txl_sort(txn->tw.gc.retxl);
} }
} else { } else {
/* Удаляем оставшиеся вынутые из GC записи. */ /* Удаляем оставшиеся вынутые из GC записи. */
@ -645,7 +644,7 @@ retry:
} }
} }
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
rc = audit_ex(txn, ctx->retired_stored, false); rc = audit_ex(txn, ctx->retired_stored, false);
@ -655,7 +654,7 @@ retry:
/* return suitable into unallocated space */ /* return suitable into unallocated space */
if (txn_refund(txn)) { if (txn_refund(txn)) {
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
rc = audit_ex(txn, ctx->retired_stored, false); rc = audit_ex(txn, ctx->retired_stored, false);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
@ -674,14 +673,14 @@ retry:
tASSERT(txn, txn->tw.loose_pages == 0); tASSERT(txn, txn->tw.loose_pages == 0);
} }
if (unlikely(ctx->reserved > MDBX_PNL_GETSIZE(txn->tw.relist)) && if (unlikely(ctx->reserved > MDBX_PNL_GETSIZE(txn->tw.repnl)) &&
(ctx->loop < 5 || ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.relist) > env->maxgc_large1page / 2)) { (ctx->loop < 5 || ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.repnl) > env->maxgc_large1page / 2)) {
TRACE("%s: reclaimed-list changed %zu -> %zu, retry", dbg_prefix(ctx), ctx->amount, TRACE("%s: reclaimed-list changed %zu -> %zu, retry", dbg_prefix(ctx), ctx->amount,
MDBX_PNL_GETSIZE(txn->tw.relist)); MDBX_PNL_GETSIZE(txn->tw.repnl));
ctx->reserve_adj += ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.relist); ctx->reserve_adj += ctx->reserved - MDBX_PNL_GETSIZE(txn->tw.repnl);
goto retry; goto retry;
} }
ctx->amount = MDBX_PNL_GETSIZE(txn->tw.relist); ctx->amount = MDBX_PNL_GETSIZE(txn->tw.repnl);
if (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages)) { if (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages)) {
/* store retired-list into GC */ /* store retired-list into GC */
@ -691,7 +690,7 @@ retry:
continue; continue;
} }
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
tASSERT(txn, txn->tw.loose_count == 0); tASSERT(txn, txn->tw.loose_count == 0);
TRACE("%s", " >> reserving"); TRACE("%s", " >> reserving");
@ -705,7 +704,7 @@ retry:
"lifo-reclaimed-slots %zu, " "lifo-reclaimed-slots %zu, "
"reused-gc-slots %zu", "reused-gc-slots %zu",
dbg_prefix(ctx), ctx->amount, ctx->reserved, ctx->reserve_adj, left, dbg_prefix(ctx), ctx->amount, ctx->reserved, ctx->reserve_adj, left,
txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0, ctx->reused_slot); txn->tw.gc.retxl ? MDBX_PNL_GETSIZE(txn->tw.gc.retxl) : 0, ctx->reused_slot);
if (0 >= (intptr_t)left) if (0 >= (intptr_t)left)
break; break;
@ -723,14 +722,14 @@ retry:
size_t chunk = left; size_t chunk = left;
if (unlikely(left > env->maxgc_large1page)) { if (unlikely(left > env->maxgc_large1page)) {
const size_t avail_gc_slots = txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot + 1 const size_t avail_gc_slots = txn->tw.gc.retxl ? MDBX_PNL_GETSIZE(txn->tw.gc.retxl) - ctx->reused_slot + 1
: (ctx->rid < INT16_MAX) ? (size_t)ctx->rid : (ctx->rid < INT16_MAX) ? (size_t)ctx->rid
: INT16_MAX; : INT16_MAX;
if (likely(avail_gc_slots > 1)) { if (likely(avail_gc_slots > 1)) {
#if MDBX_ENABLE_BIGFOOT #if MDBX_ENABLE_BIGFOOT
chunk = env->maxgc_large1page; chunk = env->maxgc_large1page;
if (avail_gc_slots < INT16_MAX && unlikely(left > env->maxgc_large1page * avail_gc_slots)) if (avail_gc_slots < INT16_MAX && unlikely(left > env->maxgc_large1page * avail_gc_slots))
/* TODO: Можно смотреть последовательности какой длины есть в relist /* TODO: Можно смотреть последовательности какой длины есть в repnl
* и пробовать нарезать куски соответствующего размера. * и пробовать нарезать куски соответствующего размера.
* Смысл в том, чтобы не дробить последовательности страниц, * Смысл в том, чтобы не дробить последовательности страниц,
* а использовать целиком. */ * а использовать целиком. */
@ -750,8 +749,8 @@ retry:
size_t avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) /* - 1 + span */; size_t avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) /* - 1 + span */;
if (tail > avail) { if (tail > avail) {
for (size_t i = ctx->amount - span; i > 0; --i) { for (size_t i = ctx->amount - span; i > 0; --i) {
if (MDBX_PNL_ASCENDING ? (txn->tw.relist[i] + span) if (MDBX_PNL_ASCENDING ? (txn->tw.repnl[i] + span)
: (txn->tw.relist[i] - span) == txn->tw.relist[i + span]) { : (txn->tw.repnl[i] - span) == txn->tw.repnl[i + span]) {
span += 1; span += 1;
avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) - 1 + span; avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) - 1 + span;
if (avail >= tail) if (avail >= tail)
@ -792,7 +791,7 @@ retry:
ctx->reserved + chunk + 1, reservation_gc_id); ctx->reserved + chunk + 1, reservation_gc_id);
prepare_backlog(txn, ctx); prepare_backlog(txn, ctx);
rc = cursor_put(&ctx->cursor, &key, &data, MDBX_RESERVE | MDBX_NOOVERWRITE); rc = cursor_put(&ctx->cursor, &key, &data, MDBX_RESERVE | MDBX_NOOVERWRITE);
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
@ -803,14 +802,14 @@ retry:
continue; continue;
} }
tASSERT(txn, ctx->cleaned_slot == (txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0)); tASSERT(txn, ctx->cleaned_slot == (txn->tw.gc.retxl ? MDBX_PNL_GETSIZE(txn->tw.gc.retxl) : 0));
TRACE("%s", " >> filling"); TRACE("%s", " >> filling");
/* Fill in the reserved records */ /* Fill in the reserved records */
size_t excess_slots = 0; size_t excess_slots = 0;
ctx->fill_idx = txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) - ctx->reused_slot : ctx->reused_slot; ctx->fill_idx = txn->tw.gc.retxl ? MDBX_PNL_GETSIZE(txn->tw.gc.retxl) - ctx->reused_slot : ctx->reused_slot;
rc = MDBX_SUCCESS; rc = MDBX_SUCCESS;
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
if (ctx->amount) { if (ctx->amount) {
MDBX_val key, data; MDBX_val key, data;
@ -818,7 +817,7 @@ retry:
key.iov_base = data.iov_base = nullptr; key.iov_base = data.iov_base = nullptr;
size_t left = ctx->amount, excess = 0; size_t left = ctx->amount, excess = 0;
if (txn->tw.gc.reclaimed == nullptr) { if (txn->tw.gc.retxl == nullptr) {
tASSERT(txn, is_lifo(txn) == 0); tASSERT(txn, is_lifo(txn) == 0);
rc = outer_first(&ctx->cursor, &key, &data); rc = outer_first(&ctx->cursor, &key, &data);
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
@ -831,33 +830,33 @@ retry:
while (true) { while (true) {
txnid_t fill_gc_id; txnid_t fill_gc_id;
TRACE("%s: left %zu of %zu", dbg_prefix(ctx), left, MDBX_PNL_GETSIZE(txn->tw.relist)); TRACE("%s: left %zu of %zu", dbg_prefix(ctx), left, MDBX_PNL_GETSIZE(txn->tw.repnl));
if (txn->tw.gc.reclaimed == nullptr) { if (txn->tw.gc.retxl == nullptr) {
tASSERT(txn, is_lifo(txn) == 0); tASSERT(txn, is_lifo(txn) == 0);
fill_gc_id = key.iov_base ? unaligned_peek_u64(4, key.iov_base) : MIN_TXNID; fill_gc_id = key.iov_base ? unaligned_peek_u64(4, key.iov_base) : MIN_TXNID;
if (ctx->fill_idx == 0 || fill_gc_id > txn->tw.gc.last_reclaimed) { if (ctx->fill_idx == 0 || fill_gc_id > txn->tw.gc.last_reclaimed) {
if (!left) if (!left)
break; break;
VERBOSE("** restart: reserve depleted (fill_idx %zu, fill_id %" PRIaTXN " > last_reclaimed %" PRIaTXN VERBOSE("** restart: reserve depleted (fill_idx %zu, fill_id %" PRIaTXN " > last_reclaimed %" PRIaTXN
", left %zu", ", left %zu",
ctx->fill_idx, fill_gc_id, txn->tw.gc.last_reclaimed, left); ctx->fill_idx, fill_gc_id, txn->tw.gc.last_reclaimed, left);
ctx->reserve_adj = (ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0; ctx->reserve_adj = (ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0;
goto retry; goto retry;
} }
ctx->fill_idx -= 1; ctx->fill_idx -= 1;
} else { } else {
tASSERT(txn, is_lifo(txn) != 0); tASSERT(txn, is_lifo(txn) != 0);
if (ctx->fill_idx >= MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)) { if (ctx->fill_idx >= MDBX_PNL_GETSIZE(txn->tw.gc.retxl)) {
if (!left) if (!left)
break; break;
VERBOSE("** restart: reserve depleted (fill_idx %zu >= " VERBOSE("** restart: reserve depleted (fill_idx %zu >= "
"gc.reclaimed %zu, left %zu", "gc.reclaimed %zu, left %zu",
ctx->fill_idx, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed), left); ctx->fill_idx, MDBX_PNL_GETSIZE(txn->tw.gc.retxl), left);
ctx->reserve_adj = (ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0; ctx->reserve_adj = (ctx->reserve_adj > left) ? ctx->reserve_adj - left : 0;
goto retry; goto retry;
} }
ctx->fill_idx += 1; ctx->fill_idx += 1;
fill_gc_id = txn->tw.gc.reclaimed[ctx->fill_idx]; fill_gc_id = txn->tw.gc.retxl[ctx->fill_idx];
TRACE("%s: seek-reservation @%" PRIaTXN " at gc.reclaimed[%zu]", dbg_prefix(ctx), fill_gc_id, ctx->fill_idx); TRACE("%s: seek-reservation @%" PRIaTXN " at gc.reclaimed[%zu]", dbg_prefix(ctx), fill_gc_id, ctx->fill_idx);
key.iov_base = &fill_gc_id; key.iov_base = &fill_gc_id;
key.iov_len = sizeof(fill_gc_id); key.iov_len = sizeof(fill_gc_id);
@ -865,7 +864,7 @@ retry:
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
} }
tASSERT(txn, ctx->cleaned_slot == (txn->tw.gc.reclaimed ? MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) : 0)); tASSERT(txn, ctx->cleaned_slot == (txn->tw.gc.retxl ? MDBX_PNL_GETSIZE(txn->tw.gc.retxl) : 0));
tASSERT(txn, fill_gc_id > 0 && fill_gc_id <= env->lck->cached_oldest.weak); tASSERT(txn, fill_gc_id > 0 && fill_gc_id <= env->lck->cached_oldest.weak);
key.iov_base = &fill_gc_id; key.iov_base = &fill_gc_id;
key.iov_len = sizeof(fill_gc_id); key.iov_len = sizeof(fill_gc_id);
@ -889,16 +888,16 @@ retry:
goto bailout; goto bailout;
zeroize_reserved(env, data); zeroize_reserved(env, data);
if (unlikely(txn->tw.loose_count || ctx->amount != MDBX_PNL_GETSIZE(txn->tw.relist))) { if (unlikely(txn->tw.loose_count || ctx->amount != MDBX_PNL_GETSIZE(txn->tw.repnl))) {
NOTICE("** restart: reclaimed-list changed (%zu -> %zu, loose +%zu)", ctx->amount, NOTICE("** restart: reclaimed-list changed (%zu -> %zu, loose +%zu)", ctx->amount,
MDBX_PNL_GETSIZE(txn->tw.relist), txn->tw.loose_count); MDBX_PNL_GETSIZE(txn->tw.repnl), txn->tw.loose_count);
if (ctx->loop < 5 || (ctx->loop > 10 && (ctx->loop & 1))) if (ctx->loop < 5 || (ctx->loop > 10 && (ctx->loop & 1)))
goto retry_clean_adj; goto retry_clean_adj;
goto retry; goto retry;
} }
if (unlikely(txn->tw.gc.reclaimed ? ctx->cleaned_slot < MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) if (unlikely(txn->tw.gc.retxl ? ctx->cleaned_slot < MDBX_PNL_GETSIZE(txn->tw.gc.retxl)
: ctx->cleaned_id < txn->tw.gc.last_reclaimed)) { : ctx->cleaned_id < txn->tw.gc.last_reclaimed)) {
NOTICE("%s", "** restart: reclaimed-slots changed"); NOTICE("%s", "** restart: reclaimed-slots changed");
goto retry; goto retry;
} }
@ -911,11 +910,11 @@ retry:
pgno_t *dst = data.iov_base; pgno_t *dst = data.iov_base;
*dst++ = (pgno_t)chunk; *dst++ = (pgno_t)chunk;
pgno_t *src = MDBX_PNL_BEGIN(txn->tw.relist) + left - chunk; pgno_t *src = MDBX_PNL_BEGIN(txn->tw.repnl) + left - chunk;
memcpy(dst, src, chunk * sizeof(pgno_t)); memcpy(dst, src, chunk * sizeof(pgno_t));
pgno_t *from = src, *to = src + chunk; pgno_t *from = src, *to = src + chunk;
TRACE("%s: fill %zu [ %zu:%" PRIaPGNO "...%zu:%" PRIaPGNO "] @%" PRIaTXN, dbg_prefix(ctx), chunk, TRACE("%s: fill %zu [ %zu:%" PRIaPGNO "...%zu:%" PRIaPGNO "] @%" PRIaTXN, dbg_prefix(ctx), chunk,
from - txn->tw.relist, from[0], to - txn->tw.relist, to[-1], fill_gc_id); from - txn->tw.repnl, from[0], to - txn->tw.repnl, to[-1], fill_gc_id);
left -= chunk; left -= chunk;
if (AUDIT_ENABLED()) { if (AUDIT_ENABLED()) {
@ -926,7 +925,7 @@ retry:
next: next:
if (txn->tw.gc.reclaimed == nullptr) { if (txn->tw.gc.retxl == nullptr) {
tASSERT(txn, is_lifo(txn) == 0); tASSERT(txn, is_lifo(txn) == 0);
rc = outer_next(&ctx->cursor, &key, &data, MDBX_NEXT); rc = outer_next(&ctx->cursor, &key, &data, MDBX_NEXT);
if (unlikely(rc != MDBX_SUCCESS)) { if (unlikely(rc != MDBX_SUCCESS)) {
@ -951,9 +950,9 @@ retry:
} }
tASSERT(txn, rc == MDBX_SUCCESS); tASSERT(txn, rc == MDBX_SUCCESS);
if (unlikely(txn->tw.loose_count != 0 || ctx->amount != MDBX_PNL_GETSIZE(txn->tw.relist))) { if (unlikely(txn->tw.loose_count != 0 || ctx->amount != MDBX_PNL_GETSIZE(txn->tw.repnl))) {
NOTICE("** restart: got %zu loose pages (reclaimed-list %zu -> %zu)", txn->tw.loose_count, ctx->amount, NOTICE("** restart: got %zu loose pages (reclaimed-list %zu -> %zu)", txn->tw.loose_count, ctx->amount,
MDBX_PNL_GETSIZE(txn->tw.relist)); MDBX_PNL_GETSIZE(txn->tw.repnl));
goto retry; goto retry;
} }
@ -966,12 +965,12 @@ retry:
goto retry; goto retry;
} }
tASSERT(txn, txn->tw.gc.reclaimed == nullptr || ctx->cleaned_slot == MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed)); tASSERT(txn, txn->tw.gc.retxl == nullptr || ctx->cleaned_slot == MDBX_PNL_GETSIZE(txn->tw.gc.retxl));
bailout: bailout:
txn->cursors[FREE_DBI] = ctx->cursor.next; txn->cursors[FREE_DBI] = ctx->cursor.next;
MDBX_PNL_SETSIZE(txn->tw.relist, 0); MDBX_PNL_SETSIZE(txn->tw.repnl, 0);
#if MDBX_ENABLE_PROFGC #if MDBX_ENABLE_PROFGC
env->lck->pgops.gc_prof.wloops += (uint32_t)ctx->loop; env->lck->pgops.gc_prof.wloops += (uint32_t)ctx->loop;
#endif /* MDBX_ENABLE_PROFGC */ #endif /* MDBX_ENABLE_PROFGC */

View File

@ -211,14 +211,14 @@ struct MDBX_txn {
struct { struct {
troika_t troika; troika_t troika;
/* In write txns, array of cursors for each DB */ /* In write txns, array of cursors for each DB */
pnl_t __restrict relist; /* Reclaimed GC pages */ pnl_t __restrict repnl; /* Reclaimed GC pages */
bool prefault_write_activated;
struct { struct {
/* The list of reclaimed txns from GC */ /* The list of reclaimed txn-ids from GC */
txl_t __restrict reclaimed; txl_t __restrict retxl;
txnid_t last_reclaimed; /* ID of last used record */ txnid_t last_reclaimed; /* ID of last used record */
uint64_t time_acc; uint64_t time_acc;
} gc; } gc;
bool prefault_write_activated;
#if MDBX_ENABLE_REFUND #if MDBX_ENABLE_REFUND
pgno_t loose_refund_wl /* FIXME: describe */; pgno_t loose_refund_wl /* FIXME: describe */;
#endif /* MDBX_ENABLE_REFUND */ #endif /* MDBX_ENABLE_REFUND */

View File

@ -608,8 +608,8 @@ status_done:
reclaim: reclaim:
DEBUG("reclaim %zu %s page %" PRIaPGNO, npages, "dirty", pgno); DEBUG("reclaim %zu %s page %" PRIaPGNO, npages, "dirty", pgno);
rc = pnl_insert_span(&txn->tw.relist, pgno, npages); rc = pnl_insert_span(&txn->tw.repnl, pgno, npages);
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
tASSERT(txn, dpl_check(txn)); tASSERT(txn, dpl_check(txn));
return rc; return rc;
} }
@ -679,7 +679,7 @@ __hot int __must_check_result page_dirty(MDBX_txn *txn, page_t *mp, size_t npage
if (txn->tw.loose_count) { if (txn->tw.loose_count) {
page_t *lp = txn->tw.loose_pages; page_t *lp = txn->tw.loose_pages;
DEBUG("purge-and-reclaim loose page %" PRIaPGNO, lp->pgno); DEBUG("purge-and-reclaim loose page %" PRIaPGNO, lp->pgno);
rc = pnl_insert_span(&txn->tw.relist, lp->pgno, 1); rc = pnl_insert_span(&txn->tw.repnl, lp->pgno, 1);
if (unlikely(rc != MDBX_SUCCESS)) if (unlikely(rc != MDBX_SUCCESS))
goto bailout; goto bailout;
size_t di = dpl_search(txn, lp->pgno); size_t di = dpl_search(txn, lp->pgno);

View File

@ -7,7 +7,7 @@
static void refund_reclaimed(MDBX_txn *txn) { static void refund_reclaimed(MDBX_txn *txn) {
/* Scanning in descend order */ /* Scanning in descend order */
pgno_t first_unallocated = txn->geo.first_unallocated; pgno_t first_unallocated = txn->geo.first_unallocated;
const pnl_t pnl = txn->tw.relist; const pnl_t pnl = txn->tw.repnl;
tASSERT(txn, MDBX_PNL_GETSIZE(pnl) && MDBX_PNL_MOST(pnl) == first_unallocated - 1); tASSERT(txn, MDBX_PNL_GETSIZE(pnl) && MDBX_PNL_MOST(pnl) == first_unallocated - 1);
#if MDBX_PNL_ASCENDING #if MDBX_PNL_ASCENDING
size_t i = MDBX_PNL_GETSIZE(pnl); size_t i = MDBX_PNL_GETSIZE(pnl);
@ -28,7 +28,7 @@ static void refund_reclaimed(MDBX_txn *txn) {
VERBOSE("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, txn->geo.first_unallocated - first_unallocated, VERBOSE("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, txn->geo.first_unallocated - first_unallocated,
txn->geo.first_unallocated, first_unallocated); txn->geo.first_unallocated, first_unallocated);
txn->geo.first_unallocated = first_unallocated; txn->geo.first_unallocated = first_unallocated;
tASSERT(txn, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - 1)); tASSERT(txn, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - 1));
} }
static void refund_loose(MDBX_txn *txn) { static void refund_loose(MDBX_txn *txn) {
@ -178,7 +178,7 @@ bool txn_refund(MDBX_txn *txn) {
refund_loose(txn); refund_loose(txn);
while (true) { while (true) {
if (MDBX_PNL_GETSIZE(txn->tw.relist) == 0 || MDBX_PNL_MOST(txn->tw.relist) != txn->geo.first_unallocated - 1) if (MDBX_PNL_GETSIZE(txn->tw.repnl) == 0 || MDBX_PNL_MOST(txn->tw.repnl) != txn->geo.first_unallocated - 1)
break; break;
refund_reclaimed(txn); refund_reclaimed(txn);

View File

@ -889,7 +889,7 @@ retry:
goto retry; goto retry;
} }
if (likely(!involve) && if (likely(!involve) &&
(likely(mc->tree != &mc->txn->dbs[FREE_DBI]) || mc->txn->tw.loose_pages || MDBX_PNL_GETSIZE(mc->txn->tw.relist) || (likely(mc->tree != &mc->txn->dbs[FREE_DBI]) || mc->txn->tw.loose_pages || MDBX_PNL_GETSIZE(mc->txn->tw.repnl) ||
(mc->flags & z_gcu_preparation) || (mc->txn->flags & txn_gc_drained) || room_threshold)) { (mc->flags & z_gcu_preparation) || (mc->txn->flags & txn_gc_drained) || room_threshold)) {
involve = true; involve = true;
goto retry; goto retry;

View File

@ -85,7 +85,7 @@ void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t parent_
} }
/* Remove reclaimed pages from parent's dirty list */ /* Remove reclaimed pages from parent's dirty list */
const pnl_t reclaimed_list = parent->tw.relist; const pnl_t reclaimed_list = parent->tw.repnl;
dpl_sift(parent, reclaimed_list, false); dpl_sift(parent, reclaimed_list, false);
/* Move retired pages from parent's dirty & spilled list to reclaimed */ /* Move retired pages from parent's dirty & spilled list to reclaimed */
@ -139,7 +139,7 @@ void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t parent_
} }
DEBUG("reclaim retired parent's %u -> %zu %s page %" PRIaPGNO, npages, l, kind, pgno); DEBUG("reclaim retired parent's %u -> %zu %s page %" PRIaPGNO, npages, l, kind, pgno);
int err = pnl_insert_span(&parent->tw.relist, pgno, l); int err = pnl_insert_span(&parent->tw.repnl, pgno, l);
ENSURE(txn->env, err == MDBX_SUCCESS); ENSURE(txn->env, err == MDBX_SUCCESS);
} }
MDBX_PNL_SETSIZE(parent->tw.retired_pages, w); MDBX_PNL_SETSIZE(parent->tw.retired_pages, w);
@ -651,8 +651,8 @@ int txn_renew(MDBX_txn *txn, unsigned flags) {
txn->tw.spilled.least_removed = 0; txn->tw.spilled.least_removed = 0;
txn->tw.gc.time_acc = 0; txn->tw.gc.time_acc = 0;
txn->tw.gc.last_reclaimed = 0; txn->tw.gc.last_reclaimed = 0;
if (txn->tw.gc.reclaimed) if (txn->tw.gc.retxl)
MDBX_PNL_SETSIZE(txn->tw.gc.reclaimed, 0); MDBX_PNL_SETSIZE(txn->tw.gc.retxl, 0);
env->txn = txn; env->txn = txn;
if ((txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC) { if ((txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC) {
@ -916,7 +916,7 @@ int txn_end(MDBX_txn *txn, unsigned mode) {
/* Export or close DBI handles created in this txn */ /* Export or close DBI handles created in this txn */
rc = dbi_update(txn, mode & TXN_END_UPDATE); rc = dbi_update(txn, mode & TXN_END_UPDATE);
pnl_shrink(&txn->tw.retired_pages); pnl_shrink(&txn->tw.retired_pages);
pnl_shrink(&txn->tw.relist); pnl_shrink(&txn->tw.repnl);
if (!(env->flags & MDBX_WRITEMAP)) if (!(env->flags & MDBX_WRITEMAP))
dpl_release_shadows(txn); dpl_release_shadows(txn);
/* The writer mutex was locked in mdbx_txn_begin. */ /* The writer mutex was locked in mdbx_txn_begin. */
@ -926,14 +926,14 @@ int txn_end(MDBX_txn *txn, unsigned mode) {
MDBX_txn *const parent = txn->parent; MDBX_txn *const parent = txn->parent;
eASSERT(env, parent->signature == txn_signature); eASSERT(env, parent->signature == txn_signature);
eASSERT(env, parent->nested == txn && (parent->flags & MDBX_TXN_HAS_CHILD) != 0); eASSERT(env, parent->nested == txn && (parent->flags & MDBX_TXN_HAS_CHILD) != 0);
eASSERT(env, pnl_check_allocated(txn->tw.relist, txn->geo.first_unallocated - MDBX_ENABLE_REFUND)); eASSERT(env, pnl_check_allocated(txn->tw.repnl, txn->geo.first_unallocated - MDBX_ENABLE_REFUND));
eASSERT(env, memcmp(&txn->tw.troika, &parent->tw.troika, sizeof(troika_t)) == 0); eASSERT(env, memcmp(&txn->tw.troika, &parent->tw.troika, sizeof(troika_t)) == 0);
txn->owner = 0; txn->owner = 0;
if (txn->tw.gc.reclaimed) { if (txn->tw.gc.retxl) {
eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.gc.reclaimed) >= (uintptr_t)parent->tw.gc.reclaimed); eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.gc.retxl) >= (uintptr_t)parent->tw.gc.retxl);
MDBX_PNL_SETSIZE(txn->tw.gc.reclaimed, (uintptr_t)parent->tw.gc.reclaimed); MDBX_PNL_SETSIZE(txn->tw.gc.retxl, (uintptr_t)parent->tw.gc.retxl);
parent->tw.gc.reclaimed = txn->tw.gc.reclaimed; parent->tw.gc.retxl = txn->tw.gc.retxl;
} }
if (txn->tw.retired_pages) { if (txn->tw.retired_pages) {
@ -949,7 +949,7 @@ int txn_end(MDBX_txn *txn, unsigned mode) {
tASSERT(parent, audit_ex(parent, 0, false) == 0); tASSERT(parent, audit_ex(parent, 0, false) == 0);
dpl_release_shadows(txn); dpl_release_shadows(txn);
dpl_free(txn); dpl_free(txn);
pnl_free(txn->tw.relist); pnl_free(txn->tw.repnl);
if (parent->geo.upper != txn->geo.upper || parent->geo.now != txn->geo.now) { if (parent->geo.upper != txn->geo.upper || parent->geo.now != txn->geo.now) {
/* undo resize performed by child txn */ /* undo resize performed by child txn */