mdbx: замена части PNL-макросов функциями.

This commit is contained in:
Леонид Юрьев (Leonid Yuriev) 2025-06-19 23:18:14 +03:00
parent fa0cd4d8b3
commit 966849646d
14 changed files with 180 additions and 186 deletions

View File

@ -394,7 +394,7 @@ __cold static int copy_with_compacting(MDBX_env *env, MDBX_txn *txn, mdbx_fileha
ERROR("%s/%d: %s", "MDBX_CORRUPTED", MDBX_CORRUPTED, "invalid GC-record content");
return MDBX_CORRUPTED;
}
gc_npages += MDBX_PNL_GETSIZE(pnl);
gc_npages += pnl_size(pnl);
rc = outer_next(&couple.outer, &key, &data, MDBX_NEXT);
}
if (unlikely(rc != MDBX_NOTFOUND))

View File

@ -506,7 +506,7 @@ int mdbx_txn_info(const MDBX_txn *txn, MDBX_txn_info *info, bool scan_rlt) {
info->txn_space_limit_soft = pgno2bytes(env, txn->geo.now);
info->txn_space_limit_hard = pgno2bytes(env, txn->geo.upper);
info->txn_space_retired =
pgno2bytes(env, txn->nested ? (size_t)txn->wr.retired_pages : MDBX_PNL_GETSIZE(txn->wr.retired_pages));
pgno2bytes(env, txn->nested ? (size_t)txn->wr.retired_pages : pnl_size(txn->wr.retired_pages));
info->txn_space_leftover = pgno2bytes(env, txn->wr.dirtyroom);
info->txn_space_dirty =
pgno2bytes(env, txn->wr.dirtylist ? txn->wr.dirtylist->pages_including_loose

View File

@ -27,8 +27,8 @@ static size_t audit_db_used(const tree_t *db) {
__cold static int audit_ex_locked(MDBX_txn *txn, const size_t retired_stored, const bool dont_filter_gc) {
const MDBX_env *const env = txn->env;
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
const size_t pending = txn->wr.loose_count + MDBX_PNL_GETSIZE(txn->wr.repnl) +
(MDBX_PNL_GETSIZE(txn->wr.retired_pages) - retired_stored);
const size_t pending =
txn->wr.loose_count + pnl_size(txn->wr.repnl) + (pnl_size(txn->wr.retired_pages) - retired_stored);
cursor_couple_t cx;
int rc = cursor_init(&cx.outer, txn, FREE_DBI);
@ -87,8 +87,8 @@ __cold static int audit_ex_locked(MDBX_txn *txn, const size_t retired_stored, co
if ((txn->flags & MDBX_TXN_RDONLY) == 0)
ERROR("audit @%" PRIaTXN ": %zu(pending) = %zu(loose) + "
"%zu(reclaimed) + %zu(retired-pending) - %zu(retired-stored)",
txn->txnid, pending, txn->wr.loose_count, MDBX_PNL_GETSIZE(txn->wr.repnl),
txn->wr.retired_pages ? MDBX_PNL_GETSIZE(txn->wr.retired_pages) : 0, retired_stored);
txn->txnid, pending, txn->wr.loose_count, pnl_size(txn->wr.repnl),
txn->wr.retired_pages ? pnl_size(txn->wr.retired_pages) : 0, retired_stored);
ERROR("audit @%" PRIaTXN ": %zu(pending) + %zu"
"(gc) + %zu(count) = %zu(total) <> %zu"
"(allocated)",

View File

@ -365,8 +365,8 @@ __cold bool dpl_check(MDBX_txn *txn) {
}
const size_t rpa = pnl_search(txn->wr.repnl, dp->pgno, txn->geo.first_unallocated);
tASSERT(txn, rpa > MDBX_PNL_GETSIZE(txn->wr.repnl) || txn->wr.repnl[rpa] != dp->pgno);
if (rpa <= MDBX_PNL_GETSIZE(txn->wr.repnl) && unlikely(txn->wr.repnl[rpa] == dp->pgno))
tASSERT(txn, rpa > pnl_size(txn->wr.repnl) || txn->wr.repnl[rpa] != dp->pgno);
if (rpa <= pnl_size(txn->wr.repnl) && unlikely(txn->wr.repnl[rpa] == dp->pgno))
return false;
if (num > 1) {
const size_t rpb = pnl_search(txn->wr.repnl, dp->pgno + num - 1, txn->geo.first_unallocated);
@ -384,7 +384,7 @@ __cold bool dpl_check(MDBX_txn *txn) {
if (unlikely(pages != dl->pages_including_loose))
return false;
for (size_t i = 1; i <= MDBX_PNL_GETSIZE(txn->wr.retired_pages); ++i) {
for (size_t i = 1; i <= pnl_size(txn->wr.retired_pages); ++i) {
const page_t *const dp = debug_dpl_find(txn, txn->wr.retired_pages[i]);
tASSERT(txn, !dp);
if (unlikely(dp))
@ -413,14 +413,14 @@ __noinline void dpl_lru_reduce(MDBX_txn *txn) {
void dpl_sift(MDBX_txn *const txn, pnl_t pl, const bool spilled) {
tASSERT(txn, (txn->flags & MDBX_TXN_RDONLY) == 0);
tASSERT(txn, (txn->flags & MDBX_WRITEMAP) == 0 || MDBX_AVOID_MSYNC);
if (MDBX_PNL_GETSIZE(pl) && txn->wr.dirtylist->length) {
if (pnl_size(pl) && txn->wr.dirtylist->length) {
tASSERT(txn, pnl_check_allocated(pl, (size_t)txn->geo.first_unallocated << spilled));
dpl_t *dl = dpl_sort(txn);
/* Scanning in ascend order */
const intptr_t step = MDBX_PNL_ASCENDING ? 1 : -1;
const intptr_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_GETSIZE(pl);
const intptr_t end = MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(pl) + 1 : 0;
const intptr_t begin = MDBX_PNL_ASCENDING ? 1 : pnl_size(pl);
const intptr_t end = MDBX_PNL_ASCENDING ? pnl_size(pl) + 1 : 0;
tASSERT(txn, pl[begin] <= pl[end - step]);
size_t w, r = dpl_search(txn, pl[begin] >> spilled);

View File

@ -162,9 +162,9 @@ MDBX_MAYBE_UNUSED __hot static pgno_t *scan4seq_fallback(pgno_t *range, const si
}
MDBX_MAYBE_UNUSED static const pgno_t *scan4range_checker(const pnl_t pnl, const size_t seq) {
size_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_GETSIZE(pnl);
size_t begin = MDBX_PNL_ASCENDING ? 1 : pnl_size(pnl);
#if MDBX_PNL_ASCENDING
while (seq <= MDBX_PNL_GETSIZE(pnl) - begin) {
while (seq <= pnl_size(pnl) - begin) {
if (pnl[begin + seq] - pnl[begin] == seq)
return pnl + begin;
++begin;
@ -588,7 +588,7 @@ static inline bool is_reclaimable(MDBX_txn *txn, const MDBX_cursor *mc, const ui
}
__hot static pgno_t repnl_get_single(MDBX_txn *txn) {
const size_t len = MDBX_PNL_GETSIZE(txn->wr.repnl);
const size_t len = pnl_size(txn->wr.repnl);
assert(len > 0);
pgno_t *target = MDBX_PNL_EDGE(txn->wr.repnl);
const ptrdiff_t dir = MDBX_PNL_ASCENDING ? 1 : -1;
@ -658,7 +658,7 @@ __hot static pgno_t repnl_get_single(MDBX_txn *txn) {
#else
/* вырезаем элемент с перемещением хвоста */
const pgno_t pgno = *scan;
MDBX_PNL_SETSIZE(txn->wr.repnl, len - 1);
pnl_setsize(txn->wr.repnl, len - 1);
while (++scan <= target)
scan[-1] = *scan;
return pgno;
@ -671,18 +671,18 @@ __hot static pgno_t repnl_get_single(MDBX_txn *txn) {
const pgno_t pgno = *target;
#if MDBX_PNL_ASCENDING
/* вырезаем элемент с перемещением хвоста */
MDBX_PNL_SETSIZE(txn->wr.repnl, len - 1);
pnl_setsize(txn->wr.repnl, len - 1);
for (const pgno_t *const end = txn->wr.repnl + len - 1; target <= end; ++target)
*target = target[1];
#else
/* перемещать хвост не нужно, просто усекам список */
MDBX_PNL_SETSIZE(txn->wr.repnl, len - 1);
pnl_setsize(txn->wr.repnl, len - 1);
#endif
return pgno;
}
__hot static pgno_t repnl_get_sequence(MDBX_txn *txn, const size_t num, uint8_t flags) {
const size_t len = MDBX_PNL_GETSIZE(txn->wr.repnl);
const size_t len = pnl_size(txn->wr.repnl);
pgno_t *edge = MDBX_PNL_EDGE(txn->wr.repnl);
assert(len >= num && num > 1);
const size_t seq = num - 1;
@ -692,7 +692,7 @@ __hot static pgno_t repnl_get_sequence(MDBX_txn *txn, const size_t num, uint8_t
return P_INVALID;
assert(edge == scan4range_checker(txn->wr.repnl, seq));
/* перемещать хвост не нужно, просто усекам список */
MDBX_PNL_SETSIZE(txn->wr.repnl, len - num);
pnl_setsize(txn->wr.repnl, len - num);
return *edge;
}
#endif
@ -703,7 +703,7 @@ __hot static pgno_t repnl_get_sequence(MDBX_txn *txn, const size_t num, uint8_t
return P_INVALID;
const pgno_t pgno = *target;
/* вырезаем найденную последовательность с перемещением хвоста */
MDBX_PNL_SETSIZE(txn->wr.repnl, len - num);
pnl_setsize(txn->wr.repnl, len - num);
#if MDBX_PNL_ASCENDING
for (const pgno_t *const end = txn->wr.repnl + len - num; target <= end; ++target)
*target = target[num];
@ -864,7 +864,7 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags)
#if MDBX_ENABLE_PROFGC
prof->xpages += 1;
#endif /* MDBX_ENABLE_PROFGC */
if (MDBX_PNL_GETSIZE(txn->wr.repnl) >= num) {
if (pnl_size(txn->wr.repnl) >= num) {
eASSERT(env, MDBX_PNL_LAST(txn->wr.repnl) < txn->geo.first_unallocated &&
MDBX_PNL_FIRST(txn->wr.repnl) < txn->geo.first_unallocated);
pgno = repnl_get_sequence(txn, num, flags);
@ -872,7 +872,7 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags)
goto done;
}
} else {
eASSERT(env, num == 0 || MDBX_PNL_GETSIZE(txn->wr.repnl) == 0 || (flags & ALLOC_RESERVE));
eASSERT(env, num == 0 || pnl_size(txn->wr.repnl) == 0 || (flags & ALLOC_RESERVE));
}
//---------------------------------------------------------------------------
@ -887,7 +887,7 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags)
/* Не коагулируем записи в случае запроса слота для возврата страниц в GC. Иначе попытка увеличить резерв
* может приводить к необходимости ещё большего резерва из-за увеличения списка переработанных страниц. */
if (num > 0 && txn->dbs[FREE_DBI].branch_pages && MDBX_PNL_GETSIZE(txn->wr.repnl) < env->maxgc_large1page / 2)
if (num > 0 && txn->dbs[FREE_DBI].branch_pages && pnl_size(txn->wr.repnl) < env->maxgc_large1page / 2)
flags += ALLOC_COALESCE;
txn->wr.prefault_write_activated = !env->incore && env->options.prefault_write;
@ -991,8 +991,8 @@ next_gc:
goto fail;
}
const size_t gc_len = MDBX_PNL_GETSIZE(gc_pnl);
TRACE("gc-read: id #%" PRIaTXN " len %zu, re-list will %zu ", id, gc_len, gc_len + MDBX_PNL_GETSIZE(txn->wr.repnl));
const size_t gc_len = pnl_size(gc_pnl);
TRACE("gc-read: id #%" PRIaTXN " len %zu, re-list will %zu ", id, gc_len, gc_len + pnl_size(txn->wr.repnl));
if (unlikely(!num)) {
/* TODO: Проверка критериев пункта 2 сформулированного в gc_provide_slots().
@ -1000,22 +1000,21 @@ next_gc:
* слотов и их дефиците для возврата wr.repl. */
if (gc_len > env->maxgc_large1page / 4 * 3
/* если запись достаточно длинная, то переработка слота не особо увеличит место для возврата wr.repl, и т.п. */
&& MDBX_PNL_GETSIZE(txn->wr.repnl) + gc_len > env->maxgc_large1page /* не помещается в хвост */) {
&& pnl_size(txn->wr.repnl) + gc_len > env->maxgc_large1page /* не помещается в хвост */) {
DEBUG("avoid reclaiming %" PRIaTXN " slot, since it is too long (%zu)", id, gc_len);
ret.err = MDBX_NOTFOUND;
goto reserve_done;
}
}
if (unlikely(gc_len + MDBX_PNL_GETSIZE(txn->wr.repnl) /* Don't try to coalesce too much. */ >=
env->maxgc_large1page)) {
if (unlikely(gc_len + pnl_size(txn->wr.repnl) /* Don't try to coalesce too much. */ >= env->maxgc_large1page)) {
if (flags & ALLOC_SHOULD_SCAN) {
eASSERT(env, (flags & ALLOC_COALESCE) /* && !(flags & ALLOC_RESERVE) */ && num > 0);
#if MDBX_ENABLE_PROFGC
env->lck->pgops.gc_prof.coalescences += 1;
#endif /* MDBX_ENABLE_PROFGC */
TRACE("clear %s %s", "ALLOC_COALESCE", "since got threshold");
if (MDBX_PNL_GETSIZE(txn->wr.repnl) >= num) {
if (pnl_size(txn->wr.repnl) >= num) {
eASSERT(env, MDBX_PNL_LAST(txn->wr.repnl) < txn->geo.first_unallocated &&
MDBX_PNL_FIRST(txn->wr.repnl) < txn->geo.first_unallocated);
if (likely(num == 1)) {
@ -1028,20 +1027,19 @@ next_gc:
}
}
flags &= ~(ALLOC_COALESCE | ALLOC_SHOULD_SCAN);
if (unlikely(/* list is too long already */ MDBX_PNL_GETSIZE(txn->wr.repnl) >= env->options.rp_augment_limit) &&
if (unlikely(/* list is too long already */ pnl_size(txn->wr.repnl) >= env->options.rp_augment_limit) &&
((/* not a slot-request from gc-update */ num &&
/* have enough unallocated space */ txn->geo.upper >= txn->geo.first_unallocated + num &&
monotime_since_cached(monotime_begin, &now_cache) + txn->wr.gc.spent >= env->options.gc_time_limit) ||
gc_len + MDBX_PNL_GETSIZE(txn->wr.repnl) >= PAGELIST_LIMIT)) {
gc_len + pnl_size(txn->wr.repnl) >= PAGELIST_LIMIT)) {
/* Stop reclaiming to avoid large/overflow the page list. This is a rare
* case while search for a continuously multi-page region in a large database,
* see https://libmdbx.dqdkfa.ru/dead-github/issues/123 */
NOTICE("stop reclaiming %s: %zu (current) + %zu "
"(chunk) >= %zu, rp_augment_limit %u",
likely(gc_len + MDBX_PNL_GETSIZE(txn->wr.repnl) < PAGELIST_LIMIT) ? "since rp_augment_limit was reached"
likely(gc_len + pnl_size(txn->wr.repnl) < PAGELIST_LIMIT) ? "since rp_augment_limit was reached"
: "to avoid PNL overflow",
MDBX_PNL_GETSIZE(txn->wr.repnl), gc_len, gc_len + MDBX_PNL_GETSIZE(txn->wr.repnl),
env->options.rp_augment_limit);
pnl_size(txn->wr.repnl), gc_len, gc_len + pnl_size(txn->wr.repnl), env->options.rp_augment_limit);
goto depleted_gc;
}
}
@ -1065,7 +1063,7 @@ next_gc:
pnl_merge(txn->wr.repnl, gc_pnl);
#if MDBX_ENABLE_PROFGC
prof->pnl_merge.calls += 1;
prof->pnl_merge.volume += MDBX_PNL_GETSIZE(txn->wr.repnl);
prof->pnl_merge.volume += pnl_size(txn->wr.repnl);
prof->pnl_merge.time += osal_monotime() - merge_begin;
#endif /* MDBX_ENABLE_PROFGC */
flags |= ALLOC_SHOULD_SCAN;
@ -1080,8 +1078,8 @@ next_gc:
}
eASSERT(env, dpl_check(txn));
eASSERT(env, MDBX_PNL_GETSIZE(txn->wr.repnl) == 0 || MDBX_PNL_MOST(txn->wr.repnl) < txn->geo.first_unallocated);
if (MDBX_ENABLE_REFUND && MDBX_PNL_GETSIZE(txn->wr.repnl) &&
eASSERT(env, pnl_size(txn->wr.repnl) == 0 || MDBX_PNL_MOST(txn->wr.repnl) < txn->geo.first_unallocated);
if (MDBX_ENABLE_REFUND && pnl_size(txn->wr.repnl) &&
unlikely(MDBX_PNL_MOST(txn->wr.repnl) == txn->geo.first_unallocated - 1)) {
/* Refund suitable pages into "unallocated" space */
txn_refund(txn);
@ -1091,7 +1089,7 @@ next_gc:
rkl_t *rkl = &txn->wr.gc.reclaimed;
const char *rkl_name = "reclaimed";
if (mc->dbi_state != txn->dbi_state &&
(MDBX_DEBUG || MDBX_PNL_GETSIZE(txn->wr.repnl) > (size_t)gc->tree->height + gc->tree->height + 3)) {
(MDBX_DEBUG || pnl_size(txn->wr.repnl) > (size_t)gc->tree->height + gc->tree->height + 3)) {
gc->next = txn->cursors[FREE_DBI];
txn->cursors[FREE_DBI] = gc;
ret.err = cursor_del(gc, 0);
@ -1100,8 +1098,8 @@ next_gc:
rkl = &txn->wr.gc.ready4reuse;
rkl_name = "ready4reuse";
} else {
VERBOSE("gc-early-clean: err %d, repnl %zu, gc-height %u (%u branch, %u leafs)", ret.err,
MDBX_PNL_GETSIZE(txn->wr.repnl), gc->tree->height, gc->tree->branch_pages, gc->tree->leaf_pages);
VERBOSE("gc-early-clean: err %d, repnl %zu, gc-height %u (%u branch, %u leafs)", ret.err, pnl_size(txn->wr.repnl),
gc->tree->height, gc->tree->branch_pages, gc->tree->leaf_pages);
if (unlikely(txn->flags & MDBX_TXN_ERROR))
goto fail;
}
@ -1113,8 +1111,8 @@ next_gc:
eASSERT(env, op == MDBX_PREV || op == MDBX_NEXT);
if (flags & ALLOC_COALESCE) {
if (MDBX_PNL_GETSIZE(txn->wr.repnl) < env->maxgc_large1page / 2) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "coalesce-continue", id, MDBX_PNL_GETSIZE(txn->wr.repnl));
if (pnl_size(txn->wr.repnl) < env->maxgc_large1page / 2) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "coalesce-continue", id, pnl_size(txn->wr.repnl));
goto next_gc;
}
flags -= ALLOC_COALESCE;
@ -1123,14 +1121,14 @@ next_gc:
scan:
if ((flags & ALLOC_RESERVE) && num < 2) {
/* Если был нужен только slot/id для gc_reclaim_slot() или gc_reserve4stockpile() */
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "reserve-done", id, MDBX_PNL_GETSIZE(txn->wr.repnl));
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "reserve-done", id, pnl_size(txn->wr.repnl));
ret.err = MDBX_SUCCESS;
goto reserve_done;
}
eASSERT(env, flags & ALLOC_SHOULD_SCAN);
eASSERT(env, num > 0);
if (MDBX_PNL_GETSIZE(txn->wr.repnl) >= num) {
if (pnl_size(txn->wr.repnl) >= num) {
eASSERT(env, MDBX_PNL_LAST(txn->wr.repnl) < txn->geo.first_unallocated &&
MDBX_PNL_FIRST(txn->wr.repnl) < txn->geo.first_unallocated);
if (likely(num == 1)) {
@ -1144,12 +1142,12 @@ scan:
}
flags -= ALLOC_SHOULD_SCAN;
if ((txn->flags & txn_gc_drained) == 0) {
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "continue-search", id, MDBX_PNL_GETSIZE(txn->wr.repnl));
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "continue-search", id, pnl_size(txn->wr.repnl));
goto next_gc;
}
depleted_gc:
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "gc-depleted", id, MDBX_PNL_GETSIZE(txn->wr.repnl));
TRACE("%s: last id #%" PRIaTXN ", re-len %zu", "gc-depleted", id, pnl_size(txn->wr.repnl));
txn->flags |= txn_gc_drained;
if (flags & ALLOC_SHOULD_SCAN)
goto scan;
@ -1310,7 +1308,7 @@ done:
"0x%x, re-list-len %zu, loose-count %zu, gc: height %u, "
"branch %zu, leaf %zu, large %zu, entries %zu\n",
(flags & ALLOC_RESERVE) ? "reserve" : "alloc", num, flags, ret.err, txn->flags,
MDBX_PNL_GETSIZE(txn->wr.repnl), txn->wr.loose_count, txn->dbs[FREE_DBI].height,
pnl_size(txn->wr.repnl), txn->wr.loose_count, txn->dbs[FREE_DBI].height,
(size_t)txn->dbs[FREE_DBI].branch_pages, (size_t)txn->dbs[FREE_DBI].leaf_pages,
(size_t)txn->dbs[FREE_DBI].large_pages, (size_t)txn->dbs[FREE_DBI].items);
else
@ -1318,10 +1316,9 @@ done:
"unable fetch-slot, alloc-flags 0x%x, err %d, txn-flags "
"0x%x, re-list-len %zu, loose-count %zu, gc: height %u, "
"branch %zu, leaf %zu, large %zu, entries %zu\n",
flags, ret.err, txn->flags, MDBX_PNL_GETSIZE(txn->wr.repnl), txn->wr.loose_count,
txn->dbs[FREE_DBI].height, (size_t)txn->dbs[FREE_DBI].branch_pages,
(size_t)txn->dbs[FREE_DBI].leaf_pages, (size_t)txn->dbs[FREE_DBI].large_pages,
(size_t)txn->dbs[FREE_DBI].items);
flags, ret.err, txn->flags, pnl_size(txn->wr.repnl), txn->wr.loose_count, txn->dbs[FREE_DBI].height,
(size_t)txn->dbs[FREE_DBI].branch_pages, (size_t)txn->dbs[FREE_DBI].leaf_pages,
(size_t)txn->dbs[FREE_DBI].large_pages, (size_t)txn->dbs[FREE_DBI].items);
}
ret.page = nullptr;
}
@ -1368,7 +1365,7 @@ __hot pgr_t gc_alloc_single(const MDBX_cursor *const mc) {
return ret;
}
if (likely(MDBX_PNL_GETSIZE(txn->wr.repnl) > 0))
if (likely(pnl_size(txn->wr.repnl) > 0))
return page_alloc_finalize(txn->env, txn, mc, repnl_get_single(txn), 1);
return gc_alloc_ex(mc, 1, ALLOC_DEFAULT);

View File

@ -228,14 +228,14 @@ static int gc_prepare_stockpile(MDBX_txn *txn, gcu_t *ctx, const size_t for_reti
TRACE(">> retired-stored %zu, retired-left %zi, stockpile %zu, now-need %zu (4list %zu, "
"4cow %zu, 4tree %zu)",
ctx->retired_stored, MDBX_PNL_GETSIZE(txn->wr.retired_pages) - ctx->retired_stored, gc_stockpile(txn),
ctx->retired_stored, pnl_size(txn->wr.retired_pages) - ctx->retired_stored, gc_stockpile(txn),
for_all_before_touch, for_retired, for_cow, for_tree_before_touch);
int err = gc_touch(ctx);
TRACE("== after-touch, stockpile %zu, err %d", gc_stockpile(txn), err);
if (!MDBX_ENABLE_BIGFOOT && unlikely(for_retired > 1) &&
MDBX_PNL_GETSIZE(txn->wr.retired_pages) != ctx->retired_stored && err == MDBX_SUCCESS) {
if (!MDBX_ENABLE_BIGFOOT && unlikely(for_retired > 1) && pnl_size(txn->wr.retired_pages) != ctx->retired_stored &&
err == MDBX_SUCCESS) {
if (unlikely(ctx->retired_stored)) {
err = gc_clean_stored_retired(txn, ctx);
if (unlikely(err != MDBX_SUCCESS))
@ -263,7 +263,7 @@ static int gc_prepare_stockpile(MDBX_txn *txn, gcu_t *ctx, const size_t for_reti
static int gc_prepare_stockpile4update(MDBX_txn *txn, gcu_t *ctx) { return gc_prepare_stockpile(txn, ctx, 0); }
static int gc_prepare_stockpile4retired(MDBX_txn *txn, gcu_t *ctx) {
const size_t retired_whole = MDBX_PNL_GETSIZE(txn->wr.retired_pages);
const size_t retired_whole = pnl_size(txn->wr.retired_pages);
const intptr_t retired_left = retired_whole - ctx->retired_stored;
size_t for_retired = 0;
if (retired_left > 0) {
@ -307,7 +307,7 @@ static int gc_merge_loose(MDBX_txn *txn, gcu_t *ctx) {
int err = pnl_need(&txn->wr.repnl, 2 * txn->wr.loose_count + 2);
if (unlikely(err != MDBX_SUCCESS))
return err;
pnl_t loose = txn->wr.repnl + MDBX_PNL_ALLOCLEN(txn->wr.repnl) - txn->wr.loose_count - 1;
pnl_t loose = txn->wr.repnl + pnl_alloclen(txn->wr.repnl) - txn->wr.loose_count - 1;
size_t count = 0;
for (page_t *lp = txn->wr.loose_pages; lp; lp = page_next(lp)) {
tASSERT(txn, lp->flags == P_LOOSE);
@ -316,7 +316,7 @@ static int gc_merge_loose(MDBX_txn *txn, gcu_t *ctx) {
VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *));
}
tASSERT(txn, count == txn->wr.loose_count);
MDBX_PNL_SETSIZE(loose, count);
pnl_setsize(loose, count);
pnl_sort(loose, txn->geo.first_unallocated);
pnl_merge(txn->wr.repnl, loose);
}
@ -381,7 +381,7 @@ static int gc_store_retired(MDBX_txn *txn, gcu_t *ctx) {
return err;
pnl_sort(txn->wr.retired_pages, txn->geo.first_unallocated);
retired_before = MDBX_PNL_GETSIZE(txn->wr.retired_pages);
retired_before = pnl_size(txn->wr.retired_pages);
should_retry = false;
ctx->retired_stored = 0;
ctx->bigfoot = txn->txnid;
@ -409,7 +409,7 @@ static int gc_store_retired(MDBX_txn *txn, gcu_t *ctx) {
memset(data.iov_base, 0xBB, data.iov_len);
#endif /* MDBX_DEBUG && (ENABLE_MEMCHECK || __SANITIZE_ADDRESS__) */
const size_t retired_after = MDBX_PNL_GETSIZE(txn->wr.retired_pages);
const size_t retired_after = pnl_size(txn->wr.retired_pages);
const size_t left_after = retired_after - ctx->retired_stored;
const size_t chunk = (left_after < chunk_hi) ? left_after : chunk_hi;
should_retry = retired_before != retired_after && chunk < retired_after;
@ -430,7 +430,7 @@ static int gc_store_retired(MDBX_txn *txn, gcu_t *ctx) {
ctx->bigfoot, (unsigned)(ctx->bigfoot - txn->txnid), chunk, at, at + chunk, retired_before);
}
ctx->retired_stored += chunk;
} while (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->wr.retired_pages) && (++ctx->bigfoot, true));
} while (ctx->retired_stored < pnl_size(txn->wr.retired_pages) && (++ctx->bigfoot, true));
} while (unlikely(should_retry));
#else
/* Write to last page of GC */
@ -454,7 +454,7 @@ static int gc_store_retired(MDBX_txn *txn, gcu_t *ctx) {
/* Retry if wr.retired_pages[] grew during the Put() */
} while (data.iov_len < MDBX_PNL_SIZEOF(txn->wr.retired_pages));
ctx->retired_stored = MDBX_PNL_GETSIZE(txn->wr.retired_pages);
ctx->retired_stored = pnl_size(txn->wr.retired_pages);
pnl_sort(txn->wr.retired_pages, txn->geo.first_unallocated);
tASSERT(txn, data.iov_len == MDBX_PNL_SIZEOF(txn->wr.retired_pages));
memcpy(data.iov_base, txn->wr.retired_pages, data.iov_len);
@ -540,7 +540,7 @@ static int gc_push_sequel(MDBX_txn *txn, gcu_t *ctx, txnid_t id) {
static void gc_dense_hist(MDBX_txn *txn, gcu_t *ctx) {
memset(&ctx->dense_histogram, 0, sizeof(ctx->dense_histogram));
size_t seqlen = 0, seqmax = 1;
for (size_t i = 2; i <= MDBX_PNL_GETSIZE(txn->wr.repnl); ++i) {
for (size_t i = 2; i <= pnl_size(txn->wr.repnl); ++i) {
seqlen += 1;
if (seqlen == ARRAY_LENGTH(ctx->dense_histogram.array) ||
!MDBX_PNL_CONTIGUOUS(txn->wr.repnl[i - 1], txn->wr.repnl[i], 1)) {
@ -702,7 +702,7 @@ static bool solve_recursive(const sr_context_t *const ct, sr_state_t *const st,
static int gc_dense_solve(MDBX_txn *txn, gcu_t *ctx, gc_dense_histogram_t *const solution) {
sr_state_t st = {
.left_slots = rkl_len(&txn->wr.gc.ready4reuse), .left_volume = ctx->return_left, .hist = ctx->dense_histogram};
assert(st.left_slots > 0 && st.left_volume > 0 && MDBX_PNL_GETSIZE(txn->wr.repnl) > 0);
assert(st.left_slots > 0 && st.left_volume > 0 && pnl_size(txn->wr.repnl) > 0);
if (unlikely(!st.left_slots || !st.left_volume)) {
ERROR("%s/%d: %s", "MDBX_PROBLEM", MDBX_PROBLEM, "recursive-solving preconditions violated");
return MDBX_PROBLEM;
@ -750,7 +750,7 @@ static int gc_dense_solve(MDBX_txn *txn, gcu_t *ctx, gc_dense_histogram_t *const
// .left_volume = 8463,
// .hist = {.end = 31, .array = {6493, 705, 120, 14, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4}}};
// assert(st.left_slots > 0 && st.left_volume > 0 && MDBX_PNL_GETSIZE(txn->wr.repnl) > 0);
// assert(st.left_slots > 0 && st.left_volume > 0 && pnl_size(txn->wr.repnl) > 0);
// if (unlikely(!st.left_slots || !st.left_volume)) {
// ERROR("%s/%d: %s", "MDBX_PROBLEM", MDBX_PROBLEM, "recursive-solving preconditions violated");
// return MDBX_PROBLEM;
@ -974,8 +974,7 @@ static inline int gc_reserve4return(MDBX_txn *txn, gcu_t *ctx, const size_t chun
ctx->return_reserved_hi += chunk_hi;
if (unlikely(!rkl_empty(&txn->wr.gc.reclaimed))) {
NOTICE("%s: restart since %zu slot(s) reclaimed (reserved %zu...%zu of %zu)", dbg_prefix(ctx),
rkl_len(&txn->wr.gc.reclaimed), ctx->return_reserved_lo, ctx->return_reserved_hi,
MDBX_PNL_GETSIZE(txn->wr.repnl));
rkl_len(&txn->wr.gc.reclaimed), ctx->return_reserved_lo, ctx->return_reserved_hi, pnl_size(txn->wr.repnl));
return MDBX_RESULT_TRUE;
}
@ -1069,12 +1068,12 @@ static int gc_handle_dense(MDBX_txn *txn, gcu_t *ctx, size_t left_min, size_t le
size_t chunk_lo = chunk_hi - txn->env->maxgc_large1page + ctx->goodchunk;
TRACE("%s: dense-chunk (seq-len %zu, %d of %d) %zu...%zu, gc-per-ovpage %u", dbg_prefix(ctx), i, n + 1,
solution.array[i - 1], chunk_lo, chunk_hi, txn->env->maxgc_large1page);
size_t amount = MDBX_PNL_GETSIZE(txn->wr.repnl);
size_t amount = pnl_size(txn->wr.repnl);
err = gc_reserve4return(txn, ctx, chunk_lo, chunk_hi);
if (unlikely(err != MDBX_SUCCESS))
return err;
const size_t now = MDBX_PNL_GETSIZE(txn->wr.repnl);
const size_t now = pnl_size(txn->wr.repnl);
if (span < amount - now - txn->dbs[FREE_DBI].height || span > amount - now + txn->dbs[FREE_DBI].height)
TRACE("dense-%s-reservation: miss %zu (expected) != %zi (got)", "solve", span, amount - now);
amount = now;
@ -1085,8 +1084,7 @@ static int gc_handle_dense(MDBX_txn *txn, gcu_t *ctx, size_t left_min, size_t le
}
} else if (rkl_len(&txn->wr.gc.comeback)) {
NOTICE("%s: restart since %zu slot(s) comemack non-dense (reserved %zu...%zu of %zu)", dbg_prefix(ctx),
rkl_len(&txn->wr.gc.comeback), ctx->return_reserved_lo, ctx->return_reserved_hi,
MDBX_PNL_GETSIZE(txn->wr.repnl));
rkl_len(&txn->wr.gc.comeback), ctx->return_reserved_lo, ctx->return_reserved_hi, pnl_size(txn->wr.repnl));
return /* повтор цикла */ MDBX_RESULT_TRUE;
}
@ -1100,7 +1098,7 @@ static int gc_handle_dense(MDBX_txn *txn, gcu_t *ctx, size_t left_min, size_t le
}
const size_t per_page = txn->env->ps / sizeof(pgno_t);
size_t amount = MDBX_PNL_GETSIZE(txn->wr.repnl);
size_t amount = pnl_size(txn->wr.repnl);
do {
if (rkl_empty(&txn->wr.gc.ready4reuse)) {
NOTICE("%s: restart since no slot(s) available (reserved %zu...%zu of %zu)", dbg_prefix(ctx),
@ -1125,7 +1123,7 @@ static int gc_handle_dense(MDBX_txn *txn, gcu_t *ctx, size_t left_min, size_t le
err = gc_reserve4return(txn, ctx, chunk_lo, chunk_hi);
if (unlikely(err != MDBX_SUCCESS))
return err;
const size_t now = MDBX_PNL_GETSIZE(txn->wr.repnl);
const size_t now = pnl_size(txn->wr.repnl);
if (base - adjusted + txn->dbs[FREE_DBI].height < amount - now ||
base - adjusted > amount - now + txn->dbs[FREE_DBI].height)
TRACE("dense-%s-reservation: miss %zu (expected) != %zi (got)", "unsolve", base - adjusted, amount - now);
@ -1135,7 +1133,7 @@ static int gc_handle_dense(MDBX_txn *txn, gcu_t *ctx, size_t left_min, size_t le
if (unlikely(err != MDBX_SUCCESS))
ERROR("unable provide IDs and/or to fit returned PNL (%zd+%zd pages, %zd+%zd slots), err %d", ctx->retired_stored,
MDBX_PNL_GETSIZE(txn->wr.repnl), rkl_len(&txn->wr.gc.comeback), rkl_len(&txn->wr.gc.ready4reuse), err);
pnl_size(txn->wr.repnl), rkl_len(&txn->wr.gc.comeback), rkl_len(&txn->wr.gc.ready4reuse), err);
return err;
}
@ -1154,7 +1152,7 @@ static int gc_rerere(MDBX_txn *txn, gcu_t *ctx) {
// gc_solve_test(txn, ctx);
tASSERT(txn, rkl_empty(&txn->wr.gc.reclaimed));
const size_t amount = MDBX_PNL_GETSIZE(txn->wr.repnl);
const size_t amount = pnl_size(txn->wr.repnl);
if (ctx->return_reserved_hi >= amount) {
if (unlikely(ctx->dense)) {
ctx->dense = false;
@ -1264,7 +1262,7 @@ static int gc_fill_returned(MDBX_txn *txn, gcu_t *ctx) {
* Если считать что резерва достаточно и имеющийся избыток допустим, то задача заполнения сводится
* к распределению излишков резерва по записям с учётом их размера, а далее просто к записи данных.
* При этом желательно обойтись без каких-то сложных операций типа деления и т.п. */
const size_t amount = MDBX_PNL_GETSIZE(txn->wr.repnl);
const size_t amount = pnl_size(txn->wr.repnl);
tASSERT(txn, amount > 0 && amount <= ctx->return_reserved_hi && !rkl_empty(&txn->wr.gc.comeback));
const size_t slots = rkl_len(&txn->wr.gc.comeback);
if (likely(slots == 1)) {
@ -1276,15 +1274,15 @@ static int gc_fill_returned(MDBX_txn *txn, gcu_t *ctx) {
if (likely(err == MDBX_SUCCESS)) {
pgno_t *const from = MDBX_PNL_BEGIN(txn->wr.repnl), *const to = MDBX_PNL_END(txn->wr.repnl);
TRACE("%s: fill %zu [ %zu:%" PRIaPGNO "...%zu:%" PRIaPGNO "] @%" PRIaTXN " (%s)", dbg_prefix(ctx),
MDBX_PNL_GETSIZE(txn->wr.repnl), from - txn->wr.repnl, from[0], to - txn->wr.repnl, to[-1], id, "at-once");
tASSERT(txn, data.iov_len >= gc_chunk_bytes(MDBX_PNL_GETSIZE(txn->wr.repnl)));
if (unlikely(data.iov_len - gc_chunk_bytes(MDBX_PNL_GETSIZE(txn->wr.repnl)) >= txn->env->ps * 2)) {
pnl_size(txn->wr.repnl), from - txn->wr.repnl, from[0], to - txn->wr.repnl, to[-1], id, "at-once");
tASSERT(txn, data.iov_len >= gc_chunk_bytes(pnl_size(txn->wr.repnl)));
if (unlikely(data.iov_len - gc_chunk_bytes(pnl_size(txn->wr.repnl)) >= txn->env->ps * 2)) {
NOTICE("too long %s-comeback-reserve @%" PRIaTXN ", have %zu bytes, need %zu bytes", "single", id, data.iov_len,
gc_chunk_bytes(MDBX_PNL_GETSIZE(txn->wr.repnl)));
gc_chunk_bytes(pnl_size(txn->wr.repnl)));
return MDBX_RESULT_TRUE;
}
/* coverity[var_deref_model] */
memcpy(data.iov_base, txn->wr.repnl, gc_chunk_bytes(MDBX_PNL_GETSIZE(txn->wr.repnl)));
memcpy(data.iov_base, txn->wr.repnl, gc_chunk_bytes(pnl_size(txn->wr.repnl)));
}
return err;
}
@ -1433,7 +1431,7 @@ retry:
tASSERT(txn, txn->wr.loose_pages == 0);
}
if (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->wr.retired_pages)) {
if (ctx->retired_stored < pnl_size(txn->wr.retired_pages)) {
/* store retired-list into GC */
err = gc_store_retired(txn, ctx);
if (unlikely(err != MDBX_SUCCESS))
@ -1449,17 +1447,17 @@ retry:
goto bailout;
}
if (unlikely(MDBX_PNL_GETSIZE(txn->wr.repnl) + env->maxgc_large1page <= ctx->return_reserved_lo) && !ctx->dense) {
if (unlikely(pnl_size(txn->wr.repnl) + env->maxgc_large1page <= ctx->return_reserved_lo) && !ctx->dense) {
/* после резервирования было израсходованно слишком много страниц и получилось слишком много резерва */
TRACE("%s: reclaimed-list %zu < reversed %zu, retry", dbg_prefix(ctx), MDBX_PNL_GETSIZE(txn->wr.repnl),
TRACE("%s: reclaimed-list %zu < reversed %zu, retry", dbg_prefix(ctx), pnl_size(txn->wr.repnl),
ctx->return_reserved_lo);
goto retry;
}
if (ctx->return_reserved_hi < MDBX_PNL_GETSIZE(txn->wr.repnl)) {
if (ctx->return_reserved_hi < pnl_size(txn->wr.repnl)) {
/* верхней границы резерва НЕ хватает, продолжаем резервирование */
TRACE(">> %s, %zu...%zu, %s %zu", "reserving", ctx->return_reserved_lo, ctx->return_reserved_hi, "return-left",
MDBX_PNL_GETSIZE(txn->wr.repnl) - ctx->return_reserved_hi);
pnl_size(txn->wr.repnl) - ctx->return_reserved_hi);
err = gc_rerere(txn, ctx);
if (unlikely(err != MDBX_SUCCESS)) {
if (err == MDBX_RESULT_TRUE)
@ -1469,8 +1467,8 @@ retry:
continue;
}
if (MDBX_PNL_GETSIZE(txn->wr.repnl) > 0) {
TRACE(">> %s, %s %zu -> %zu...%zu", "filling", "return-reserved", MDBX_PNL_GETSIZE(txn->wr.repnl),
if (pnl_size(txn->wr.repnl) > 0) {
TRACE(">> %s, %s %zu -> %zu...%zu", "filling", "return-reserved", pnl_size(txn->wr.repnl),
ctx->return_reserved_lo, ctx->return_reserved_hi);
err = gc_fill_returned(txn, ctx);
if (unlikely(err != MDBX_SUCCESS)) {
@ -1484,7 +1482,7 @@ retry:
tASSERT(txn, err == MDBX_SUCCESS);
if (AUDIT_ENABLED()) {
err = audit_ex(txn, ctx->retired_stored + MDBX_PNL_GETSIZE(txn->wr.repnl), true);
err = audit_ex(txn, ctx->retired_stored + pnl_size(txn->wr.repnl), true);
if (unlikely(err != MDBX_SUCCESS))
goto bailout;
}
@ -1496,7 +1494,7 @@ retry:
bailout:
txn->cursors[FREE_DBI] = ctx->cursor.next;
MDBX_PNL_SETSIZE(txn->wr.repnl, 0);
pnl_setsize(txn->wr.repnl, 0);
#if MDBX_ENABLE_PROFGC
env->lck->pgops.gc_prof.wloops += (uint32_t)ctx->loop;
#endif /* MDBX_ENABLE_PROFGC */

View File

@ -62,7 +62,7 @@ MDBX_INTERNAL pgr_t gc_alloc_single(const MDBX_cursor *const mc);
MDBX_INTERNAL int gc_update(MDBX_txn *txn, gcu_t *ctx);
MDBX_NOTHROW_PURE_FUNCTION static inline size_t gc_stockpile(const MDBX_txn *txn) {
return MDBX_PNL_GETSIZE(txn->wr.repnl) + txn->wr.loose_count;
return pnl_size(txn->wr.repnl) + txn->wr.loose_count;
}
MDBX_NOTHROW_PURE_FUNCTION static inline size_t gc_chunk_bytes(const size_t chunk) {

View File

@ -24,7 +24,7 @@ void pnl_free(pnl_t pnl) {
}
pnl_t pnl_clone(const pnl_t src) {
pnl_t pl = pnl_alloc(MDBX_PNL_ALLOCLEN(src));
pnl_t pl = pnl_alloc(pnl_alloclen(src));
if (likely(pl))
memcpy(pl, src, MDBX_PNL_SIZEOF(src));
return pl;
@ -33,9 +33,9 @@ pnl_t pnl_clone(const pnl_t src) {
void pnl_shrink(pnl_t __restrict *__restrict ppnl) {
assert(pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) >= MDBX_PNL_INITIAL &&
pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) < MDBX_PNL_INITIAL * 3 / 2);
assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
MDBX_PNL_SETSIZE(*ppnl, 0);
if (unlikely(MDBX_PNL_ALLOCLEN(*ppnl) >
assert(pnl_size(*ppnl) <= PAGELIST_LIMIT && pnl_alloclen(*ppnl) >= pnl_size(*ppnl));
pnl_setsize(*ppnl, 0);
if (unlikely(pnl_alloclen(*ppnl) >
MDBX_PNL_INITIAL * (MDBX_PNL_PREALLOC_FOR_RADIXSORT ? 8 : 4) - MDBX_CACHELINE_SIZE / sizeof(pgno_t))) {
size_t bytes = pnl_size2bytes(MDBX_PNL_INITIAL * 2);
pnl_t pnl = osal_realloc(*ppnl - 1, bytes);
@ -50,8 +50,8 @@ void pnl_shrink(pnl_t __restrict *__restrict ppnl) {
}
int pnl_reserve(pnl_t __restrict *__restrict ppnl, const size_t wanna) {
const size_t allocated = MDBX_PNL_ALLOCLEN(*ppnl);
assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
const size_t allocated = pnl_alloclen(*ppnl);
assert(pnl_size(*ppnl) <= PAGELIST_LIMIT && pnl_alloclen(*ppnl) >= pnl_size(*ppnl));
if (unlikely(allocated >= wanna))
return MDBX_SUCCESS;
@ -89,15 +89,15 @@ static __always_inline int __must_check_result pnl_append_stepped(unsigned step,
}
#if MDBX_PNL_ASCENDING
size_t w = MDBX_PNL_GETSIZE(pnl);
size_t w = pnl_size(pnl);
do {
pnl[++w] = pgno;
pgno += step;
} while (--n);
MDBX_PNL_SETSIZE(pnl, w);
pnl_setsize(pnl, w);
#else
size_t w = MDBX_PNL_GETSIZE(pnl) + n;
MDBX_PNL_SETSIZE(pnl, w);
size_t w = pnl_size(pnl) + n;
pnl_setsize(pnl, w);
do {
pnl[w--] = pgno;
pgno += step;
@ -121,8 +121,8 @@ __hot int __must_check_result pnl_insert_span(__restrict pnl_t *ppnl, pgno_t pgn
return rc;
const pnl_t pnl = *ppnl;
size_t r = MDBX_PNL_GETSIZE(pnl), w = r + n;
MDBX_PNL_SETSIZE(pnl, w);
size_t r = pnl_size(pnl), w = r + n;
pnl_setsize(pnl, w);
while (r && MDBX_PNL_DISORDERED(pnl[r], pgno))
pnl[w--] = pnl[r--];
@ -134,15 +134,15 @@ __hot int __must_check_result pnl_insert_span(__restrict pnl_t *ppnl, pgno_t pgn
__hot __noinline bool pnl_check(const const_pnl_t pnl, const size_t limit) {
assert(limit >= MIN_PAGENO - MDBX_ENABLE_REFUND);
if (likely(MDBX_PNL_GETSIZE(pnl))) {
if (unlikely(MDBX_PNL_GETSIZE(pnl) > PAGELIST_LIMIT))
if (likely(pnl_size(pnl))) {
if (unlikely(pnl_size(pnl) > PAGELIST_LIMIT))
return false;
if (unlikely(MDBX_PNL_LEAST(pnl) < MIN_PAGENO))
return false;
if (unlikely(MDBX_PNL_MOST(pnl) >= limit))
return false;
if ((!MDBX_DISABLE_VALIDATION || AUDIT_ENABLED()) && likely(MDBX_PNL_GETSIZE(pnl) > 1)) {
if ((!MDBX_DISABLE_VALIDATION || AUDIT_ENABLED()) && likely(pnl_size(pnl) > 1)) {
const pgno_t *scan = MDBX_PNL_BEGIN(pnl);
const pgno_t *const end = MDBX_PNL_END(pnl);
pgno_t prev = *scan++;
@ -189,10 +189,10 @@ static __always_inline void pnl_merge_inner(pgno_t *__restrict dst, const pgno_t
__hot size_t pnl_merge(pnl_t dst, const pnl_t src) {
assert(pnl_check_allocated(dst, MAX_PAGENO + 1));
assert(pnl_check(src, MAX_PAGENO + 1));
const size_t src_len = MDBX_PNL_GETSIZE(src);
const size_t dst_len = MDBX_PNL_GETSIZE(dst);
const size_t src_len = pnl_size(src);
const size_t dst_len = pnl_size(dst);
size_t total = dst_len;
assert(MDBX_PNL_ALLOCLEN(dst) >= total);
assert(pnl_alloclen(dst) >= total);
if (likely(src_len > 0)) {
total += src_len;
if (!MDBX_DEBUG && total < (MDBX_HAVE_CMOV ? 21 : 12))
@ -207,7 +207,7 @@ __hot size_t pnl_merge(pnl_t dst, const pnl_t src) {
dst[0] = /* the detent */ (MDBX_PNL_ASCENDING ? 0 : P_INVALID);
pnl_merge_inner(dst + total, dst + dst_len, src + src_len, src);
}
MDBX_PNL_SETSIZE(dst, total);
pnl_setsize(dst, total);
}
assert(pnl_check_allocated(dst, MAX_PAGENO + 1));
return total;
@ -223,8 +223,8 @@ RADIXSORT_IMPL(pgno, pgno_t, MDBX_PNL_EXTRACT_KEY, MDBX_PNL_PREALLOC_FOR_RADIXSO
SORT_IMPL(pgno_sort, false, pgno_t, MDBX_PNL_ORDERED)
__hot __noinline void pnl_sort_nochk(pnl_t pnl) {
if (likely(MDBX_PNL_GETSIZE(pnl) < MDBX_RADIXSORT_THRESHOLD) ||
unlikely(!pgno_radixsort(&MDBX_PNL_FIRST(pnl), MDBX_PNL_GETSIZE(pnl))))
if (likely(pnl_size(pnl) < MDBX_RADIXSORT_THRESHOLD) ||
unlikely(!pgno_radixsort(&MDBX_PNL_FIRST(pnl), pnl_size(pnl))))
pgno_sort(MDBX_PNL_BEGIN(pnl), MDBX_PNL_END(pnl));
}
@ -232,8 +232,8 @@ SEARCH_IMPL(pgno_bsearch, pgno_t, pgno_t, MDBX_PNL_ORDERED)
__hot __noinline size_t pnl_search_nochk(const pnl_t pnl, pgno_t pgno) {
const pgno_t *begin = MDBX_PNL_BEGIN(pnl);
const pgno_t *it = pgno_bsearch(begin, MDBX_PNL_GETSIZE(pnl), pgno);
const pgno_t *end = begin + MDBX_PNL_GETSIZE(pnl);
const pgno_t *it = pgno_bsearch(begin, pnl_size(pnl), pgno);
const pgno_t *end = begin + pnl_size(pnl);
assert(it >= begin && it <= end);
if (it != begin)
assert(MDBX_PNL_ORDERED(it[-1], pgno));
@ -243,7 +243,7 @@ __hot __noinline size_t pnl_search_nochk(const pnl_t pnl, pgno_t pgno) {
}
size_t pnl_maxspan(const pnl_t pnl) {
size_t len = MDBX_PNL_GETSIZE(pnl);
size_t len = pnl_size(pnl);
if (len > 1) {
size_t span = 1, left = len - span;
const pgno_t *scan = MDBX_PNL_BEGIN(pnl);

View File

@ -28,18 +28,19 @@ typedef const pgno_t *const_pnl_t;
#define MDBX_PNL_GRANULATE (1 << MDBX_PNL_GRANULATE_LOG2)
#define MDBX_PNL_INITIAL (MDBX_PNL_GRANULATE - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(pgno_t))
#define MDBX_PNL_ALLOCLEN(pl) ((pl)[-1])
#define MDBX_PNL_GETSIZE(pl) ((size_t)((pl)[0]))
#define MDBX_PNL_SETSIZE(pl, size) \
do { \
const size_t __size = size; \
assert(__size < INT_MAX); \
(pl)[0] = (pgno_t)__size; \
} while (0)
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline size_t pnl_alloclen(const_pnl_t pnl) { return pnl[-1]; }
MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline size_t pnl_size(const_pnl_t pnl) { return pnl[0]; }
MDBX_MAYBE_UNUSED static inline void pnl_setsize(pnl_t pnl, size_t len) {
assert(len < INT_MAX);
pnl[0] = (pgno_t)len;
}
#define MDBX_PNL_FIRST(pl) ((pl)[1])
#define MDBX_PNL_LAST(pl) ((pl)[MDBX_PNL_GETSIZE(pl)])
#define MDBX_PNL_LAST(pl) ((pl)[pnl_size(pl)])
#define MDBX_PNL_BEGIN(pl) (&(pl)[1])
#define MDBX_PNL_END(pl) (&(pl)[MDBX_PNL_GETSIZE(pl) + 1])
#define MDBX_PNL_END(pl) (&(pl)[pnl_size(pl) + 1])
#if MDBX_PNL_ASCENDING
#define MDBX_PNL_EDGE(pl) ((pl) + 1)
@ -47,14 +48,14 @@ typedef const pgno_t *const_pnl_t;
#define MDBX_PNL_MOST(pl) MDBX_PNL_LAST(pl)
#define MDBX_PNL_CONTIGUOUS(prev, next, span) ((next) - (prev)) == (span))
#else
#define MDBX_PNL_EDGE(pl) ((pl) + MDBX_PNL_GETSIZE(pl))
#define MDBX_PNL_EDGE(pl) ((pl) + pnl_size(pl))
#define MDBX_PNL_LEAST(pl) MDBX_PNL_LAST(pl)
#define MDBX_PNL_MOST(pl) MDBX_PNL_FIRST(pl)
#define MDBX_PNL_CONTIGUOUS(prev, next, span) (((prev) - (next)) == (span))
#endif
#define MDBX_PNL_SIZEOF(pl) ((MDBX_PNL_GETSIZE(pl) + 1) * sizeof(pgno_t))
#define MDBX_PNL_IS_EMPTY(pl) (MDBX_PNL_GETSIZE(pl) == 0)
#define MDBX_PNL_SIZEOF(pl) ((pnl_size(pl) + 1) * sizeof(pgno_t))
#define MDBX_PNL_IS_EMPTY(pl) (pnl_size(pl) == 0)
MDBX_NOTHROW_PURE_FUNCTION MDBX_MAYBE_UNUSED static inline pgno_t pnl_bytes2size(const size_t bytes) {
size_t size = bytes / sizeof(pgno_t);
@ -91,16 +92,16 @@ MDBX_MAYBE_UNUSED MDBX_INTERNAL pnl_t pnl_clone(const pnl_t src);
MDBX_INTERNAL int pnl_reserve(pnl_t __restrict *__restrict ppnl, const size_t wanna);
MDBX_MAYBE_UNUSED static inline int __must_check_result pnl_need(pnl_t __restrict *__restrict ppnl, size_t num) {
assert(MDBX_PNL_GETSIZE(*ppnl) <= PAGELIST_LIMIT && MDBX_PNL_ALLOCLEN(*ppnl) >= MDBX_PNL_GETSIZE(*ppnl));
assert(pnl_size(*ppnl) <= PAGELIST_LIMIT && pnl_alloclen(*ppnl) >= pnl_size(*ppnl));
assert(num <= PAGELIST_LIMIT);
const size_t wanna = MDBX_PNL_GETSIZE(*ppnl) + num;
return likely(MDBX_PNL_ALLOCLEN(*ppnl) >= wanna) ? MDBX_SUCCESS : pnl_reserve(ppnl, wanna);
const size_t wanna = pnl_size(*ppnl) + num;
return likely(pnl_alloclen(*ppnl) >= wanna) ? MDBX_SUCCESS : pnl_reserve(ppnl, wanna);
}
MDBX_MAYBE_UNUSED static inline void pnl_append_prereserved(__restrict pnl_t pnl, pgno_t pgno) {
assert(MDBX_PNL_GETSIZE(pnl) < MDBX_PNL_ALLOCLEN(pnl));
assert(pnl_size(pnl) < pnl_alloclen(pnl));
if (AUDIT_ENABLED()) {
for (size_t i = MDBX_PNL_GETSIZE(pnl); i > 0; --i)
for (size_t i = pnl_size(pnl); i > 0; --i)
assert(pgno != pnl[i]);
}
*pnl += 1;
@ -129,7 +130,7 @@ MDBX_INTERNAL void pnl_sort_nochk(pnl_t pnl);
MDBX_INTERNAL bool pnl_check(const const_pnl_t pnl, const size_t limit);
MDBX_MAYBE_UNUSED static inline bool pnl_check_allocated(const const_pnl_t pnl, const size_t limit) {
return pnl == nullptr || (MDBX_PNL_ALLOCLEN(pnl) >= MDBX_PNL_GETSIZE(pnl) && pnl_check(pnl, limit));
return pnl == nullptr || (pnl_alloclen(pnl) >= pnl_size(pnl) && pnl_check(pnl, limit));
}
MDBX_MAYBE_UNUSED static inline void pnl_sort(pnl_t pnl, size_t limit4check) {

View File

@ -8,20 +8,20 @@ static void refund_reclaimed(MDBX_txn *txn) {
/* Scanning in descend order */
pgno_t first_unallocated = txn->geo.first_unallocated;
const pnl_t pnl = txn->wr.repnl;
tASSERT(txn, MDBX_PNL_GETSIZE(pnl) && MDBX_PNL_MOST(pnl) == first_unallocated - 1);
tASSERT(txn, pnl_size(pnl) && MDBX_PNL_MOST(pnl) == first_unallocated - 1);
#if MDBX_PNL_ASCENDING
size_t i = MDBX_PNL_GETSIZE(pnl);
size_t i = pnl_size(pnl);
tASSERT(txn, pnl[i] == first_unallocated - 1);
while (--first_unallocated, --i > 0 && pnl[i] == first_unallocated - 1)
;
MDBX_PNL_SETSIZE(pnl, i);
pnl_setsize(pnl, i);
#else
size_t i = 1;
tASSERT(txn, pnl[i] == first_unallocated - 1);
size_t len = MDBX_PNL_GETSIZE(pnl);
size_t len = pnl_size(pnl);
while (--first_unallocated, ++i <= len && pnl[i] == first_unallocated - 1)
;
MDBX_PNL_SETSIZE(pnl, len -= i - 1);
pnl_setsize(pnl, len -= i - 1);
for (size_t move = 0; move < len; ++move)
pnl[1 + move] = pnl[i + move];
#endif
@ -62,7 +62,7 @@ static void refund_loose(MDBX_txn *txn) {
tASSERT(txn, lp->flags == P_LOOSE);
tASSERT(txn, txn->geo.first_unallocated > lp->pgno);
if (likely(txn->geo.first_unallocated - txn->wr.loose_count <= lp->pgno)) {
tASSERT(txn, w < ((suitable == onstack) ? pnl_bytes2size(sizeof(onstack)) : MDBX_PNL_ALLOCLEN(suitable)));
tASSERT(txn, w < ((suitable == onstack) ? pnl_bytes2size(sizeof(onstack)) : pnl_alloclen(suitable)));
suitable[++w] = lp->pgno;
most = (lp->pgno > most) ? lp->pgno : most;
}
@ -72,13 +72,13 @@ static void refund_loose(MDBX_txn *txn) {
if (most + 1 == txn->geo.first_unallocated) {
/* Sort suitable list and refund pages at the tail. */
MDBX_PNL_SETSIZE(suitable, w);
pnl_setsize(suitable, w);
pnl_sort(suitable, MAX_PAGENO + 1);
/* Scanning in descend order */
const intptr_t step = MDBX_PNL_ASCENDING ? -1 : 1;
const intptr_t begin = MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(suitable) : 1;
const intptr_t end = MDBX_PNL_ASCENDING ? 0 : MDBX_PNL_GETSIZE(suitable) + 1;
const intptr_t begin = MDBX_PNL_ASCENDING ? pnl_size(suitable) : 1;
const intptr_t end = MDBX_PNL_ASCENDING ? 0 : pnl_size(suitable) + 1;
tASSERT(txn, suitable[begin] >= suitable[end - step]);
tASSERT(txn, most == suitable[begin]);
@ -178,7 +178,7 @@ bool txn_refund(MDBX_txn *txn) {
refund_loose(txn);
while (true) {
if (MDBX_PNL_GETSIZE(txn->wr.repnl) == 0 || MDBX_PNL_MOST(txn->wr.repnl) != txn->geo.first_unallocated - 1)
if (pnl_size(txn->wr.repnl) == 0 || MDBX_PNL_MOST(txn->wr.repnl) != txn->geo.first_unallocated - 1)
break;
refund_reclaimed(txn);

View File

@ -4,16 +4,15 @@
#include "internals.h"
void spill_remove(MDBX_txn *txn, size_t idx, size_t npages) {
tASSERT(txn, idx > 0 && idx <= MDBX_PNL_GETSIZE(txn->wr.spilled.list) && txn->wr.spilled.least_removed > 0);
tASSERT(txn, idx > 0 && idx <= pnl_size(txn->wr.spilled.list) && txn->wr.spilled.least_removed > 0);
txn->wr.spilled.least_removed = (idx < txn->wr.spilled.least_removed) ? idx : txn->wr.spilled.least_removed;
txn->wr.spilled.list[idx] |= 1;
MDBX_PNL_SETSIZE(txn->wr.spilled.list,
MDBX_PNL_GETSIZE(txn->wr.spilled.list) - (idx == MDBX_PNL_GETSIZE(txn->wr.spilled.list)));
pnl_setsize(txn->wr.spilled.list, pnl_size(txn->wr.spilled.list) - (idx == pnl_size(txn->wr.spilled.list)));
while (unlikely(npages > 1)) {
const pgno_t pgno = (txn->wr.spilled.list[idx] >> 1) + 1;
if (MDBX_PNL_ASCENDING) {
if (++idx > MDBX_PNL_GETSIZE(txn->wr.spilled.list) || (txn->wr.spilled.list[idx] >> 1) != pgno)
if (++idx > pnl_size(txn->wr.spilled.list) || (txn->wr.spilled.list[idx] >> 1) != pgno)
return;
} else {
if (--idx < 1 || (txn->wr.spilled.list[idx] >> 1) != pgno)
@ -21,8 +20,7 @@ void spill_remove(MDBX_txn *txn, size_t idx, size_t npages) {
txn->wr.spilled.least_removed = (idx < txn->wr.spilled.least_removed) ? idx : txn->wr.spilled.least_removed;
}
txn->wr.spilled.list[idx] |= 1;
MDBX_PNL_SETSIZE(txn->wr.spilled.list,
MDBX_PNL_GETSIZE(txn->wr.spilled.list) - (idx == MDBX_PNL_GETSIZE(txn->wr.spilled.list)));
pnl_setsize(txn->wr.spilled.list, pnl_size(txn->wr.spilled.list) - (idx == pnl_size(txn->wr.spilled.list)));
--npages;
}
}
@ -31,17 +29,17 @@ pnl_t spill_purge(MDBX_txn *txn) {
tASSERT(txn, txn->wr.spilled.least_removed > 0);
const pnl_t sl = txn->wr.spilled.list;
if (txn->wr.spilled.least_removed != INT_MAX) {
size_t len = MDBX_PNL_GETSIZE(sl), r, w;
size_t len = pnl_size(sl), r, w;
for (w = r = txn->wr.spilled.least_removed; r <= len; ++r) {
sl[w] = sl[r];
w += 1 - (sl[r] & 1);
}
for (size_t i = 1; i < w; ++i)
tASSERT(txn, (sl[i] & 1) == 0);
MDBX_PNL_SETSIZE(sl, w - 1);
pnl_setsize(sl, w - 1);
txn->wr.spilled.least_removed = INT_MAX;
} else {
for (size_t i = 1; i <= MDBX_PNL_GETSIZE(sl); ++i)
for (size_t i = 1; i <= pnl_size(sl); ++i)
tASSERT(txn, (sl[i] & 1) == 0);
}
return sl;

View File

@ -18,14 +18,14 @@ static inline size_t spill_search(const MDBX_txn *txn, pgno_t pgno) {
return 0;
pgno <<= 1;
size_t n = pnl_search(pnl, pgno, (size_t)MAX_PAGENO + MAX_PAGENO + 1);
return (n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] == pgno) ? n : 0;
return (n <= pnl_size(pnl) && pnl[n] == pgno) ? n : 0;
}
static inline bool spill_intersect(const MDBX_txn *txn, pgno_t pgno, size_t npages) {
const pnl_t pnl = txn->wr.spilled.list;
if (likely(!pnl))
return false;
const size_t len = MDBX_PNL_GETSIZE(pnl);
const size_t len = pnl_size(pnl);
if (LOG_ENABLED(MDBX_LOG_EXTRA)) {
DEBUG_EXTRA("PNL len %zu [", len);
for (size_t i = 1; i <= len; ++i)
@ -36,12 +36,12 @@ static inline bool spill_intersect(const MDBX_txn *txn, pgno_t pgno, size_t npag
const pgno_t spilled_range_last = ((pgno + (pgno_t)npages) << 1) - 1;
#if MDBX_PNL_ASCENDING
const size_t n = pnl_search(pnl, spilled_range_begin, (size_t)(MAX_PAGENO + 1) << 1);
tASSERT(txn, n && (n == MDBX_PNL_GETSIZE(pnl) + 1 || spilled_range_begin <= pnl[n]));
const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] <= spilled_range_last;
tASSERT(txn, n && (n == pnl_size(pnl) + 1 || spilled_range_begin <= pnl[n]));
const bool rc = n <= pnl_size(pnl) && pnl[n] <= spilled_range_last;
#else
const size_t n = pnl_search(pnl, spilled_range_last, (size_t)MAX_PAGENO + MAX_PAGENO + 1);
tASSERT(txn, n && (n == MDBX_PNL_GETSIZE(pnl) + 1 || spilled_range_last >= pnl[n]));
const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] >= spilled_range_begin;
tASSERT(txn, n && (n == pnl_size(pnl) + 1 || spilled_range_last >= pnl[n]));
const bool rc = n <= pnl_size(pnl) && pnl[n] >= spilled_range_begin;
#endif
if (ASSERT_ENABLED()) {
bool check = false;

View File

@ -124,7 +124,7 @@ int txn_basal_start(MDBX_txn *txn, unsigned flags) {
#if MDBX_ENABLE_REFUND
txn->wr.loose_refund_wl = 0;
#endif /* MDBX_ENABLE_REFUND */
MDBX_PNL_SETSIZE(txn->wr.retired_pages, 0);
pnl_setsize(txn->wr.retired_pages, 0);
txn->wr.spilled.list = nullptr;
txn->wr.spilled.least_removed = 0;
txn->wr.gc.spent = 0;
@ -295,7 +295,7 @@ int txn_basal_commit(MDBX_txn *txn, struct commit_timestamp *ts) {
ts->audit = ts->gc;
}
if (AUDIT_ENABLED()) {
rc = audit_ex(txn, MDBX_PNL_GETSIZE(txn->wr.retired_pages), true);
rc = audit_ex(txn, pnl_size(txn->wr.retired_pages), true);
if (ts)
ts->audit = osal_monotime();
if (unlikely(rc != MDBX_SUCCESS))
@ -347,7 +347,7 @@ int txn_basal_commit(MDBX_txn *txn, struct commit_timestamp *ts) {
meta.validator_id = head.ptr_c->validator_id;
meta.extra_pagehdr = head.ptr_c->extra_pagehdr;
unaligned_poke_u64(4, meta.pages_retired,
unaligned_peek_u64(4, head.ptr_c->pages_retired) + MDBX_PNL_GETSIZE(txn->wr.retired_pages));
unaligned_peek_u64(4, head.ptr_c->pages_retired) + pnl_size(txn->wr.retired_pages));
meta.geometry = txn->geo;
meta.trees.gc = txn->dbs[FREE_DBI];
meta.trees.main = txn->dbs[MAIN_DBI];

View File

@ -29,7 +29,7 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
/* Move retired pages from parent's dirty & spilled list to reclaimed */
size_t r, w, d, s, l;
for (r = w = parent_retired_len; ++r <= MDBX_PNL_GETSIZE(parent->wr.retired_pages);) {
for (r = w = parent_retired_len; ++r <= pnl_size(parent->wr.retired_pages);) {
const pgno_t pgno = parent->wr.retired_pages[r];
const size_t di = dpl_exist(parent, pgno);
const size_t si = !di ? spill_search(parent, pgno) : 0;
@ -54,7 +54,7 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
/* Список retired страниц не сортирован, но для ускорения сортировки
* дополняется в соответствии с MDBX_PNL_ASCENDING */
#if MDBX_PNL_ASCENDING
const size_t len = MDBX_PNL_GETSIZE(parent->wr.retired_pages);
const size_t len = pnl_size(parent->wr.retired_pages);
while (r < len && parent->wr.retired_pages[r + 1] == pgno + l) {
++r;
if (++l == npages)
@ -81,24 +81,24 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
int err = pnl_insert_span(&parent->wr.repnl, pgno, l);
ENSURE(txn->env, err == MDBX_SUCCESS);
}
MDBX_PNL_SETSIZE(parent->wr.retired_pages, w);
pnl_setsize(parent->wr.retired_pages, w);
/* Filter-out parent spill list */
if (parent->wr.spilled.list && MDBX_PNL_GETSIZE(parent->wr.spilled.list) > 0) {
if (parent->wr.spilled.list && pnl_size(parent->wr.spilled.list) > 0) {
const pnl_t sl = spill_purge(parent);
size_t len = MDBX_PNL_GETSIZE(sl);
size_t len = pnl_size(sl);
if (len) {
/* Remove refunded pages from parent's spill list */
if (MDBX_ENABLE_REFUND && MDBX_PNL_MOST(sl) >= (parent->geo.first_unallocated << 1)) {
#if MDBX_PNL_ASCENDING
size_t i = MDBX_PNL_GETSIZE(sl);
size_t i = pnl_size(sl);
assert(MDBX_PNL_MOST(sl) == MDBX_PNL_LAST(sl));
do {
if ((sl[i] & 1) == 0)
DEBUG("refund parent's spilled page %" PRIaPGNO, sl[i] >> 1);
i -= 1;
} while (i && sl[i] >= (parent->geo.first_unallocated << 1));
MDBX_PNL_SETSIZE(sl, i);
pnl_setsize(sl, i);
#else
assert(MDBX_PNL_MOST(sl) == MDBX_PNL_FIRST(sl));
size_t i = 0;
@ -107,14 +107,14 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
if ((sl[i] & 1) == 0)
DEBUG("refund parent's spilled page %" PRIaPGNO, sl[i] >> 1);
} while (i < len && sl[i + 1] >= (parent->geo.first_unallocated << 1));
MDBX_PNL_SETSIZE(sl, len -= i);
pnl_setsize(sl, len -= i);
memmove(sl + 1, sl + 1 + i, len * sizeof(sl[0]));
#endif
}
tASSERT(txn, pnl_check_allocated(sl, (size_t)parent->geo.first_unallocated << 1));
/* Remove reclaimed pages from parent's spill list */
s = MDBX_PNL_GETSIZE(sl), r = MDBX_PNL_GETSIZE(reclaimed_list);
s = pnl_size(sl), r = pnl_size(reclaimed_list);
/* Scanning from end to begin */
while (s && r) {
if (sl[s] & 1) {
@ -138,9 +138,9 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
/* Remove anything in our dirty list from parent's spill list */
/* Scanning spill list in descend order */
const intptr_t step = MDBX_PNL_ASCENDING ? -1 : 1;
s = MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(sl) : 1;
s = MDBX_PNL_ASCENDING ? pnl_size(sl) : 1;
d = src->length;
while (d && (MDBX_PNL_ASCENDING ? s > 0 : s <= MDBX_PNL_GETSIZE(sl))) {
while (d && (MDBX_PNL_ASCENDING ? s > 0 : s <= pnl_size(sl))) {
if (sl[s] & 1) {
s += step;
continue;
@ -328,7 +328,7 @@ static void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, const size_t
if (parent->wr.spilled.list) {
assert(pnl_check_allocated(parent->wr.spilled.list, (size_t)parent->geo.first_unallocated << 1));
if (MDBX_PNL_GETSIZE(parent->wr.spilled.list))
if (pnl_size(parent->wr.spilled.list))
parent->flags |= MDBX_TXN_SPILLS;
}
}
@ -368,7 +368,7 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
if (unlikely(err != MDBX_SUCCESS))
return LOG_IFERR(err);
const size_t len = MDBX_PNL_GETSIZE(parent->wr.repnl) + parent->wr.loose_count;
const size_t len = pnl_size(parent->wr.repnl) + parent->wr.loose_count;
txn->wr.repnl = pnl_alloc((len > MDBX_PNL_INITIAL) ? len : MDBX_PNL_INITIAL);
if (unlikely(!txn->wr.repnl))
return LOG_IFERR(MDBX_ENOMEM);
@ -403,7 +403,7 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
if (parent->wr.spilled.list)
spill_purge(parent);
tASSERT(txn, MDBX_PNL_ALLOCLEN(txn->wr.repnl) >= MDBX_PNL_GETSIZE(parent->wr.repnl));
tASSERT(txn, pnl_alloclen(txn->wr.repnl) >= pnl_size(parent->wr.repnl));
memcpy(txn->wr.repnl, parent->wr.repnl, MDBX_PNL_SIZEOF(parent->wr.repnl));
/* coverity[assignment_where_comparison_intended] */
tASSERT(txn, pnl_check_allocated(txn->wr.repnl, (txn->geo.first_unallocated /* LY: intentional assignment
@ -421,7 +421,7 @@ int txn_nested_create(MDBX_txn *parent, const MDBX_txn_flags_t flags) {
return err;
txn->wr.retired_pages = parent->wr.retired_pages;
parent->wr.retired_pages = (void *)(intptr_t)MDBX_PNL_GETSIZE(parent->wr.retired_pages);
parent->wr.retired_pages = (void *)(intptr_t)pnl_size(parent->wr.retired_pages);
txn->cursors[FREE_DBI] = nullptr;
txn->cursors[MAIN_DBI] = nullptr;
@ -448,8 +448,8 @@ void txn_nested_abort(MDBX_txn *nested) {
rkl_destroy(&nested->wr.gc.ready4reuse);
if (nested->wr.retired_pages) {
tASSERT(parent, MDBX_PNL_GETSIZE(nested->wr.retired_pages) >= (uintptr_t)parent->wr.retired_pages);
MDBX_PNL_SETSIZE(nested->wr.retired_pages, (uintptr_t)parent->wr.retired_pages);
tASSERT(parent, pnl_size(nested->wr.retired_pages) >= (uintptr_t)parent->wr.retired_pages);
pnl_setsize(nested->wr.retired_pages, (uintptr_t)parent->wr.retired_pages);
parent->wr.retired_pages = nested->wr.retired_pages;
}
@ -475,7 +475,7 @@ int txn_nested_join(MDBX_txn *txn, struct commit_timestamp *ts) {
tASSERT(txn, memcmp(&parent->geo, &txn->geo, sizeof(parent->geo)) == 0);
tASSERT(txn, memcmp(&parent->canary, &txn->canary, sizeof(parent->canary)) == 0);
tASSERT(txn, !txn->wr.spilled.list || MDBX_PNL_GETSIZE(txn->wr.spilled.list) == 0);
tASSERT(txn, !txn->wr.spilled.list || pnl_size(txn->wr.spilled.list) == 0);
tASSERT(txn, txn->wr.loose_count == 0);
/* Update parent's DBs array */
@ -497,8 +497,8 @@ int txn_nested_join(MDBX_txn *txn, struct commit_timestamp *ts) {
/* Preserve space for spill list to avoid parent's state corruption
* if allocation fails. */
const size_t parent_retired_len = (uintptr_t)parent->wr.retired_pages;
tASSERT(txn, parent_retired_len <= MDBX_PNL_GETSIZE(txn->wr.retired_pages));
const size_t retired_delta = MDBX_PNL_GETSIZE(txn->wr.retired_pages) - parent_retired_len;
tASSERT(txn, parent_retired_len <= pnl_size(txn->wr.retired_pages));
const size_t retired_delta = pnl_size(txn->wr.retired_pages) - parent_retired_len;
if (retired_delta) {
int err = pnl_need(&txn->wr.repnl, retired_delta);
if (unlikely(err != MDBX_SUCCESS))
@ -507,7 +507,7 @@ int txn_nested_join(MDBX_txn *txn, struct commit_timestamp *ts) {
if (txn->wr.spilled.list) {
if (parent->wr.spilled.list) {
int err = pnl_need(&parent->wr.spilled.list, MDBX_PNL_GETSIZE(txn->wr.spilled.list));
int err = pnl_need(&parent->wr.spilled.list, pnl_size(txn->wr.spilled.list));
if (unlikely(err != MDBX_SUCCESS))
return err;
}
@ -587,7 +587,7 @@ int txn_nested_join(MDBX_txn *txn, struct commit_timestamp *ts) {
VALGRIND_MAKE_MEM_DEFINED(&page_next(lp), sizeof(page_t *));
}
/* Check parent's reclaimed pages not suitable for refund */
if (MDBX_PNL_GETSIZE(parent->wr.repnl))
if (pnl_size(parent->wr.repnl))
tASSERT(parent, MDBX_PNL_MOST(parent->wr.repnl) + 1 < parent->geo.first_unallocated);
}
#endif /* MDBX_ENABLE_REFUND */