diff --git a/src/core.c b/src/core.c index 3b8734ac..2dab0edb 100644 --- a/src/core.c +++ b/src/core.c @@ -40,22 +40,20 @@ /*------------------------------------------------------------------------------ * Internal inline functions */ -MDBX_NOTHROW_CONST_FUNCTION static unsigned branchless_abs(int value) { +MDBX_NOTHROW_CONST_FUNCTION static size_t branchless_abs(intptr_t value) { assert(value > INT_MIN); - const unsigned expanded_sign = - (unsigned)(value >> (sizeof(value) * CHAR_BIT - 1)); - return ((unsigned)value + expanded_sign) ^ expanded_sign; + const size_t expanded_sign = + (size_t)(value >> (sizeof(value) * CHAR_BIT - 1)); + return ((size_t)value + expanded_sign) ^ expanded_sign; } /* Pack/Unpack 16-bit values for Grow step & Shrink threshold */ -MDBX_NOTHROW_CONST_FUNCTION static __inline pgno_t me2v(unsigned m, - unsigned e) { +MDBX_NOTHROW_CONST_FUNCTION static __inline pgno_t me2v(size_t m, size_t e) { assert(m < 2048 && e < 8); return (pgno_t)(32768 + ((m + 1) << (e + 8))); } -MDBX_NOTHROW_CONST_FUNCTION static __inline uint16_t v2me(size_t v, - unsigned e) { +MDBX_NOTHROW_CONST_FUNCTION static __inline uint16_t v2me(size_t v, size_t e) { assert(v > (e ? me2v(2047, e - 1) : 32768)); assert(v <= me2v(2047, e)); size_t m = (v - 32768 + ((size_t)1 << (e + 8)) - 1) >> (e + 8); @@ -103,9 +101,9 @@ MDBX_NOTHROW_CONST_FUNCTION static uint16_t pages2pv(size_t pages) { /*------------------------------------------------------------------------------ * Unaligned access */ -MDBX_MAYBE_UNUSED MDBX_NOTHROW_CONST_FUNCTION static __always_inline unsigned -field_alignment(unsigned alignment_baseline, size_t field_offset) { - unsigned merge = alignment_baseline | (unsigned)field_offset; +MDBX_MAYBE_UNUSED MDBX_NOTHROW_CONST_FUNCTION static __always_inline size_t +field_alignment(size_t alignment_baseline, size_t field_offset) { + size_t merge = alignment_baseline | (size_t)field_offset; return merge & -(int)merge; } @@ -122,7 +120,7 @@ static __always_inline void poke_u8(uint8_t *const __restrict ptr, } MDBX_NOTHROW_PURE_FUNCTION static __always_inline uint16_t -unaligned_peek_u16(const unsigned expected_alignment, const void *const ptr) { +unaligned_peek_u16(const size_t expected_alignment, const void *const ptr) { assert((uintptr_t)ptr % expected_alignment == 0); if (MDBX_UNALIGNED_OK >= 2 || (expected_alignment % sizeof(uint16_t)) == 0) return *(const uint16_t *)ptr; @@ -138,9 +136,9 @@ unaligned_peek_u16(const unsigned expected_alignment, const void *const ptr) { } } -static __always_inline void -unaligned_poke_u16(const unsigned expected_alignment, - void *const __restrict ptr, const uint16_t v) { +static __always_inline void unaligned_poke_u16(const size_t expected_alignment, + void *const __restrict ptr, + const uint16_t v) { assert((uintptr_t)ptr % expected_alignment == 0); if (MDBX_UNALIGNED_OK >= 2 || (expected_alignment % sizeof(v)) == 0) *(uint16_t *)ptr = v; @@ -155,7 +153,7 @@ unaligned_poke_u16(const unsigned expected_alignment, } MDBX_NOTHROW_PURE_FUNCTION static __always_inline uint32_t unaligned_peek_u32( - const unsigned expected_alignment, const void *const __restrict ptr) { + const size_t expected_alignment, const void *const __restrict ptr) { assert((uintptr_t)ptr % expected_alignment == 0); if (MDBX_UNALIGNED_OK >= 4 || (expected_alignment % sizeof(uint32_t)) == 0) return *(const uint32_t *)ptr; @@ -177,9 +175,9 @@ MDBX_NOTHROW_PURE_FUNCTION static __always_inline uint32_t unaligned_peek_u32( } } -static __always_inline void -unaligned_poke_u32(const unsigned expected_alignment, - void *const __restrict ptr, const uint32_t v) { +static __always_inline void unaligned_poke_u32(const size_t expected_alignment, + void *const __restrict ptr, + const uint32_t v) { assert((uintptr_t)ptr % expected_alignment == 0); if (MDBX_UNALIGNED_OK >= 4 || (expected_alignment % sizeof(v)) == 0) *(uint32_t *)ptr = v; @@ -198,7 +196,7 @@ unaligned_poke_u32(const unsigned expected_alignment, } MDBX_NOTHROW_PURE_FUNCTION static __always_inline uint64_t unaligned_peek_u64( - const unsigned expected_alignment, const void *const __restrict ptr) { + const size_t expected_alignment, const void *const __restrict ptr) { assert((uintptr_t)ptr % expected_alignment == 0); if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(uint64_t)) == 0) return *(const uint64_t *)ptr; @@ -221,7 +219,7 @@ MDBX_NOTHROW_PURE_FUNCTION static __always_inline uint64_t unaligned_peek_u64( } static __always_inline uint64_t -unaligned_peek_u64_volatile(const unsigned expected_alignment, +unaligned_peek_u64_volatile(const size_t expected_alignment, const volatile void *const __restrict ptr) { assert((uintptr_t)ptr % expected_alignment == 0); assert(expected_alignment % sizeof(uint32_t) == 0); @@ -241,9 +239,9 @@ unaligned_peek_u64_volatile(const unsigned expected_alignment, } } -static __always_inline void -unaligned_poke_u64(const unsigned expected_alignment, - void *const __restrict ptr, const uint64_t v) { +static __always_inline void unaligned_poke_u64(const size_t expected_alignment, + void *const __restrict ptr, + const uint64_t v) { assert((uintptr_t)ptr % expected_alignment == 0); if (MDBX_UNALIGNED_OK >= 8 || (expected_alignment % sizeof(v)) == 0) *(uint64_t *)ptr = v; @@ -439,7 +437,7 @@ node_largedata_pgno(const MDBX_node *const __restrict node) { (EVEN_FLOOR(PAGEROOM(pagesize) / 2) - sizeof(indx_t)) #define MAX_GC1OVPAGE(pagesize) (PAGEROOM(pagesize) / sizeof(pgno_t) - 1) -static __inline unsigned keysize_max(size_t pagesize, MDBX_db_flags_t flags) { +static __inline size_t keysize_max(size_t pagesize, MDBX_db_flags_t flags) { assert(pagesize >= MIN_PAGESIZE && pagesize <= MAX_PAGESIZE && is_powerof2(pagesize)); STATIC_ASSERT(BRANCH_NODE_MAX(MIN_PAGESIZE) - NODESIZE >= 8); @@ -454,11 +452,10 @@ static __inline unsigned keysize_max(size_t pagesize, MDBX_db_flags_t flags) { (MDBX_DUPSORT | MDBX_DUPFIXED | MDBX_REVERSEDUP | MDBX_INTEGERDUP)) { const intptr_t max_dupsort_leaf_key = LEAF_NODE_MAX(pagesize) - NODESIZE - sizeof(MDBX_db); - return (max_branch_key < max_dupsort_leaf_key) - ? (unsigned)max_branch_key - : (unsigned)max_dupsort_leaf_key; + return (max_branch_key < max_dupsort_leaf_key) ? max_branch_key + : max_dupsort_leaf_key; } - return (unsigned)max_branch_key; + return max_branch_key; } static __inline size_t valsize_max(size_t pagesize, MDBX_db_flags_t flags) { @@ -601,13 +598,13 @@ flags_db2sub(uint16_t db_flags) { /*----------------------------------------------------------------------------*/ MDBX_NOTHROW_PURE_FUNCTION static __always_inline size_t -pgno2bytes(const MDBX_env *env, pgno_t pgno) { +pgno2bytes(const MDBX_env *env, size_t pgno) { eASSERT(env, (1u << env->me_psize2log) == env->me_psize); return ((size_t)pgno) << env->me_psize2log; } MDBX_NOTHROW_PURE_FUNCTION static __always_inline MDBX_page * -pgno2page(const MDBX_env *env, pgno_t pgno) { +pgno2page(const MDBX_env *env, size_t pgno) { return (MDBX_page *)(env->me_map + pgno2bytes(env, pgno)); } @@ -618,12 +615,12 @@ bytes2pgno(const MDBX_env *env, size_t bytes) { } MDBX_NOTHROW_PURE_FUNCTION static size_t -pgno_align2os_bytes(const MDBX_env *env, pgno_t pgno) { +pgno_align2os_bytes(const MDBX_env *env, size_t pgno) { return ceil_powerof2(pgno2bytes(env, pgno), env->me_os_psize); } MDBX_NOTHROW_PURE_FUNCTION static pgno_t pgno_align2os_pgno(const MDBX_env *env, - pgno_t pgno) { + size_t pgno) { return bytes2pgno(env, pgno_align2os_bytes(env, pgno)); } @@ -649,25 +646,25 @@ page_meta(MDBX_page *mp) { } /* Number of nodes on a page */ -MDBX_NOTHROW_PURE_FUNCTION static __always_inline unsigned +MDBX_NOTHROW_PURE_FUNCTION static __always_inline size_t page_numkeys(const MDBX_page *mp) { return mp->mp_lower >> 1; } /* The amount of space remaining in the page */ -MDBX_NOTHROW_PURE_FUNCTION static __always_inline unsigned +MDBX_NOTHROW_PURE_FUNCTION static __always_inline size_t page_room(const MDBX_page *mp) { return mp->mp_upper - mp->mp_lower; } /* Maximum free space in an empty page */ -MDBX_NOTHROW_PURE_FUNCTION static __always_inline unsigned +MDBX_NOTHROW_PURE_FUNCTION static __always_inline size_t page_space(const MDBX_env *env) { STATIC_ASSERT(PAGEHDRSZ % 2 == 0); return env->me_psize - PAGEHDRSZ; } -MDBX_NOTHROW_PURE_FUNCTION static __always_inline unsigned +MDBX_NOTHROW_PURE_FUNCTION static __always_inline size_t page_used(const MDBX_env *env, const MDBX_page *mp) { return page_space(env) - page_room(mp); } @@ -750,9 +747,9 @@ __cold static void MDBX_PRINTF_ARGS(2, 3) /* Address of node i in page p */ MDBX_NOTHROW_PURE_FUNCTION static __always_inline MDBX_node * -page_node(const MDBX_page *mp, unsigned i) { +page_node(const MDBX_page *mp, size_t i) { assert(PAGETYPE_COMPAT(mp) == P_LEAF || PAGETYPE_WHOLE(mp) == P_BRANCH); - assert(page_numkeys(mp) > (unsigned)(i)); + assert(page_numkeys(mp) > i); assert(mp->mp_ptrs[i] % 2 == 0); return (MDBX_node *)((char *)mp + mp->mp_ptrs[i] + PAGEHDRSZ); } @@ -761,7 +758,7 @@ page_node(const MDBX_page *mp, unsigned i) { * LEAF2 pages are used for MDBX_DUPFIXED sorted-duplicate sub-DBs. * There are no node headers, keys are stored contiguously. */ MDBX_NOTHROW_PURE_FUNCTION static __always_inline void * -page_leaf2key(const MDBX_page *mp, unsigned i, size_t keysize) { +page_leaf2key(const MDBX_page *mp, size_t i, size_t keysize) { assert(PAGETYPE_COMPAT(mp) == (P_LEAF | P_LEAF2)); assert(mp->mp_leaf2_ksize == keysize); (void)keysize; @@ -1318,7 +1315,7 @@ __cold void thread_dtor(void *rthc) { osal_thread_self(), rthc); const uint32_t self_pid = osal_getpid(); - for (unsigned i = 0; i < rthc_count; ++i) { + for (size_t i = 0; i < rthc_count; ++i) { const osal_thread_key_t key = rthc_table[i].thr_tls_key; MDBX_reader *const reader = thread_rthc_get(key); if (reader < rthc_table[i].begin || reader >= rthc_table[i].end) @@ -1333,7 +1330,7 @@ __cold void thread_dtor(void *rthc) { #endif TRACE("== thread 0x%" PRIxPTR - ", rthc %p, [%i], %p ... %p (%+i), rtch-pid %i, " + ", rthc %p, [%zi], %p ... %p (%+i), rtch-pid %i, " "current-pid %i", osal_thread_self(), __Wpedantic_format_voidptr(reader), i, __Wpedantic_format_voidptr(rthc_table[i].begin), @@ -1444,12 +1441,12 @@ __cold void global_dtor(void) { #endif const uint32_t self_pid = osal_getpid(); - for (unsigned i = 0; i < rthc_count; ++i) { + for (size_t i = 0; i < rthc_count; ++i) { const osal_thread_key_t key = rthc_table[i].thr_tls_key; thread_key_delete(key); for (MDBX_reader *rthc = rthc_table[i].begin; rthc < rthc_table[i].end; ++rthc) { - TRACE("== [%i] = key %" PRIuPTR ", %p ... %p, rthc %p (%+i), " + TRACE("== [%zi] = key %" PRIuPTR ", %p ... %p, rthc %p (%+i), " "rthc-pid %i, current-pid %i", i, (uintptr_t)key, __Wpedantic_format_voidptr(rthc_table[i].begin), __Wpedantic_format_voidptr(rthc_table[i].end), @@ -1531,10 +1528,10 @@ __cold void rthc_remove(const osal_thread_key_t key) { TRACE(">> key %zu, rthc_count %u, rthc_limit %u", (uintptr_t)key, rthc_count, rthc_limit); - for (unsigned i = 0; i < rthc_count; ++i) { + for (size_t i = 0; i < rthc_count; ++i) { if (key == rthc_table[i].thr_tls_key) { const uint32_t self_pid = osal_getpid(); - TRACE("== [%i], %p ...%p, current-pid %d", i, + TRACE("== [%zi], %p ...%p, current-pid %d", i, __Wpedantic_format_voidptr(rthc_table[i].begin), __Wpedantic_format_voidptr(rthc_table[i].end), self_pid); @@ -1982,7 +1979,7 @@ static int lcklist_detach_locked(MDBX_env *env) { \ __hot static void NAME(TYPE *const __restrict begin, \ TYPE *const __restrict end) { \ - NAME##_stack stack[sizeof(unsigned) * CHAR_BIT], *__restrict top = stack; \ + NAME##_stack stack[sizeof(size_t) * CHAR_BIT], *__restrict top = stack; \ \ TYPE *__restrict hi = end - 1; \ TYPE *__restrict lo = begin; \ @@ -2043,8 +2040,7 @@ static int lcklist_detach_locked(MDBX_env *env) { #define RADIXSORT_IMPL(NAME, TYPE, EXTRACT_KEY, BUFFER_PREALLOCATED, END_GAP) \ \ - __hot static bool NAME##_radixsort(TYPE *const begin, \ - const unsigned length) { \ + __hot static bool NAME##_radixsort(TYPE *const begin, const size_t length) { \ TYPE *tmp; \ if (BUFFER_PREALLOCATED) { \ tmp = begin + length + END_GAP; \ @@ -2055,37 +2051,37 @@ static int lcklist_detach_locked(MDBX_env *env) { return false; \ } \ \ - unsigned key_shift = 0, key_diff_mask; \ + size_t key_shift = 0, key_diff_mask; \ do { \ struct { \ - unsigned a[256], b[256]; \ + pgno_t a[256], b[256]; \ } counters; \ memset(&counters, 0, sizeof(counters)); \ \ key_diff_mask = 0; \ - unsigned prev_key = EXTRACT_KEY(begin) >> key_shift; \ + size_t prev_key = EXTRACT_KEY(begin) >> key_shift; \ TYPE *r = begin, *end = begin + length; \ do { \ - const unsigned key = EXTRACT_KEY(r) >> key_shift; \ + const size_t key = EXTRACT_KEY(r) >> key_shift; \ counters.a[key & 255]++; \ counters.b[(key >> 8) & 255]++; \ key_diff_mask |= prev_key ^ key; \ prev_key = key; \ } while (++r != end); \ \ - unsigned ta = 0, tb = 0; \ - for (unsigned i = 0; i < 256; ++i) { \ - const unsigned ia = counters.a[i]; \ + pgno_t ta = 0, tb = 0; \ + for (size_t i = 0; i < 256; ++i) { \ + const pgno_t ia = counters.a[i]; \ counters.a[i] = ta; \ ta += ia; \ - const unsigned ib = counters.b[i]; \ + const pgno_t ib = counters.b[i]; \ counters.b[i] = tb; \ tb += ib; \ } \ \ r = begin; \ do { \ - const unsigned key = EXTRACT_KEY(r) >> key_shift; \ + const size_t key = EXTRACT_KEY(r) >> key_shift; \ tmp[counters.a[key & 255]++] = *r; \ } while (++r != end); \ \ @@ -2095,7 +2091,7 @@ static int lcklist_detach_locked(MDBX_env *env) { } \ end = (r = tmp) + length; \ do { \ - const unsigned key = EXTRACT_KEY(r) >> key_shift; \ + const size_t key = EXTRACT_KEY(r) >> key_shift; \ begin[counters.b[(key >> 8) & 255]++] = *r; \ } while (++r != end); \ \ @@ -2135,7 +2131,7 @@ static int lcklist_detach_locked(MDBX_env *env) { /* clang-format off */ #define SEARCH_IMPL(NAME, TYPE_LIST, TYPE_ARG, CMP) \ static __always_inline const TYPE_LIST *NAME( \ - const TYPE_LIST *it, unsigned length, const TYPE_ARG item) { \ + const TYPE_LIST *it, size_t length, const TYPE_ARG item) { \ const TYPE_LIST *const begin = it, *const end = begin + length; \ \ if (MDBX_HAVE_CMOV) \ @@ -2237,9 +2233,9 @@ static void pnl_shrink(MDBX_PNL *ppl) { assert(pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) >= MDBX_PNL_INITIAL && pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) < MDBX_PNL_INITIAL * 3 / 2); - assert(MDBX_PNL_SIZE(*ppl) <= MDBX_PGL_LIMIT && - MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_SIZE(*ppl)); - MDBX_PNL_SIZE(*ppl) = 0; + assert(MDBX_PNL_GETSIZE(*ppl) <= MDBX_PGL_LIMIT && + MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_GETSIZE(*ppl)); + MDBX_PNL_SETSIZE(*ppl, 0); if (unlikely(MDBX_PNL_ALLOCLEN(*ppl) > MDBX_PNL_INITIAL * 2 - MDBX_CACHELINE_SIZE / sizeof(pgno_t))) { size_t bytes = pnl_size2bytes(MDBX_PNL_INITIAL); @@ -2257,8 +2253,8 @@ static void pnl_shrink(MDBX_PNL *ppl) { /* Grow the PNL to the size growed to at least given size */ static int pnl_reserve(MDBX_PNL *ppl, const size_t wanna) { const size_t allocated = MDBX_PNL_ALLOCLEN(*ppl); - assert(MDBX_PNL_SIZE(*ppl) <= MDBX_PGL_LIMIT && - MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_SIZE(*ppl)); + assert(MDBX_PNL_GETSIZE(*ppl) <= MDBX_PGL_LIMIT && + MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_GETSIZE(*ppl)); if (likely(allocated >= wanna)) return MDBX_SUCCESS; @@ -2287,21 +2283,21 @@ static int pnl_reserve(MDBX_PNL *ppl, const size_t wanna) { /* Make room for num additional elements in an PNL */ static __always_inline int __must_check_result pnl_need(MDBX_PNL *ppl, size_t num) { - assert(MDBX_PNL_SIZE(*ppl) <= MDBX_PGL_LIMIT && - MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_SIZE(*ppl)); + assert(MDBX_PNL_GETSIZE(*ppl) <= MDBX_PGL_LIMIT && + MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_GETSIZE(*ppl)); assert(num <= MDBX_PGL_LIMIT); - const size_t wanna = MDBX_PNL_SIZE(*ppl) + num; + const size_t wanna = MDBX_PNL_GETSIZE(*ppl) + num; return likely(MDBX_PNL_ALLOCLEN(*ppl) >= wanna) ? MDBX_SUCCESS : pnl_reserve(ppl, wanna); } static __always_inline void pnl_xappend(MDBX_PNL pl, pgno_t pgno) { - assert(MDBX_PNL_SIZE(pl) < MDBX_PNL_ALLOCLEN(pl)); + assert(MDBX_PNL_GETSIZE(pl) < MDBX_PNL_ALLOCLEN(pl)); if (AUDIT_ENABLED()) { - for (unsigned i = MDBX_PNL_SIZE(pl); i > 0; --i) + for (size_t i = MDBX_PNL_GETSIZE(pl); i > 0; --i) assert(pgno != pl[i]); } - MDBX_PNL_SIZE(pl) += 1; + *pl += 1; MDBX_PNL_LAST(pl) = pgno; } @@ -2309,7 +2305,7 @@ static __always_inline void pnl_xappend(MDBX_PNL pl, pgno_t pgno) { __always_inline static int __must_check_result pnl_append_range(bool spilled, MDBX_PNL *ppl, pgno_t pgno, - unsigned n) { + size_t n) { assert(n > 0); int rc = pnl_need(ppl, n); if (unlikely(rc != MDBX_SUCCESS)) @@ -2317,15 +2313,15 @@ __always_inline static int __must_check_result pnl_append_range(bool spilled, const MDBX_PNL pnl = *ppl; #if MDBX_PNL_ASCENDING - unsigned w = MDBX_PNL_SIZE(pnl); + size_t w = MDBX_PNL_GETSIZE(pnl); do { pnl[++w] = pgno; pgno += spilled ? 2 : 1; } while (--n); - MDBX_PNL_SIZE(pnl) = w; + MDBX_PNL_SETSIZE(pnl, w); #else - unsigned w = MDBX_PNL_SIZE(pnl) + n; - MDBX_PNL_SIZE(pnl) = w; + size_t w = MDBX_PNL_GETSIZE(pnl) + n; + MDBX_PNL_SETSIZE(pnl, w); do { pnl[w--] = pgno; pgno += spilled ? 2 : 1; @@ -2337,15 +2333,15 @@ __always_inline static int __must_check_result pnl_append_range(bool spilled, /* Append an pgno range into the sorted PNL */ __hot static int __must_check_result pnl_insert_range(MDBX_PNL *ppl, - pgno_t pgno, unsigned n) { + pgno_t pgno, size_t n) { assert(n > 0); int rc = pnl_need(ppl, n); if (unlikely(rc != MDBX_SUCCESS)) return rc; const MDBX_PNL pnl = *ppl; - unsigned r = MDBX_PNL_SIZE(pnl), w = r + n; - MDBX_PNL_SIZE(pnl) = w; + size_t r = MDBX_PNL_GETSIZE(pnl), w = r + n; + MDBX_PNL_SETSIZE(pnl, w); while (r && MDBX_PNL_DISORDERED(pnl[r], pgno)) pnl[w--] = pnl[r--]; @@ -2357,8 +2353,8 @@ __hot static int __must_check_result pnl_insert_range(MDBX_PNL *ppl, __hot static bool pnl_check(const pgno_t *pl, const size_t limit) { assert(limit >= MIN_PAGENO - MDBX_ENABLE_REFUND); - if (likely(MDBX_PNL_SIZE(pl))) { - if (unlikely(MDBX_PNL_SIZE(pl) > MDBX_PGL_LIMIT)) + if (likely(MDBX_PNL_GETSIZE(pl))) { + if (unlikely(MDBX_PNL_GETSIZE(pl) > MDBX_PGL_LIMIT)) return false; if (unlikely(MDBX_PNL_LEAST(pl) < MIN_PAGENO)) return false; @@ -2366,7 +2362,7 @@ __hot static bool pnl_check(const pgno_t *pl, const size_t limit) { return false; if ((!MDBX_DISABLE_VALIDATION || AUDIT_ENABLED()) && - likely(MDBX_PNL_SIZE(pl) > 1)) { + likely(MDBX_PNL_GETSIZE(pl) > 1)) { const pgno_t *scan = MDBX_PNL_BEGIN(pl); const pgno_t *const end = MDBX_PNL_END(pl); pgno_t prev = *scan++; @@ -2382,8 +2378,8 @@ __hot static bool pnl_check(const pgno_t *pl, const size_t limit) { static __always_inline bool pnl_check_allocated(const pgno_t *pl, const size_t limit) { - return pl == nullptr || - (MDBX_PNL_ALLOCLEN(pl) >= MDBX_PNL_SIZE(pl) && pnl_check(pl, limit)); + return pl == nullptr || (MDBX_PNL_ALLOCLEN(pl) >= MDBX_PNL_GETSIZE(pl) && + pnl_check(pl, limit)); } static __always_inline void @@ -2421,31 +2417,32 @@ pnl_merge_inner(pgno_t *__restrict dst, const pgno_t *__restrict src_a, __hot static void pnl_merge(MDBX_PNL dst, const MDBX_PNL src) { assert(pnl_check_allocated(dst, MAX_PAGENO + 1)); assert(pnl_check(src, MAX_PAGENO + 1)); - const pgno_t src_len = MDBX_PNL_SIZE(src); - const pgno_t dst_len = MDBX_PNL_SIZE(dst); + const size_t src_len = MDBX_PNL_GETSIZE(src); + const size_t dst_len = MDBX_PNL_GETSIZE(dst); if (likely(src_len > 0)) { - const pgno_t total = dst_len + src_len; + const size_t total = dst_len + src_len; assert(MDBX_PNL_ALLOCLEN(dst) >= total); dst[0] = /* the detent */ (MDBX_PNL_ASCENDING ? 0 : P_INVALID); pnl_merge_inner(dst + total, dst + dst_len, src + src_len, src); - MDBX_PNL_SIZE(dst) = total; + MDBX_PNL_SETSIZE(dst, total); } assert(pnl_check_allocated(dst, MAX_PAGENO + 1)); } -static void spill_remove(MDBX_txn *txn, unsigned idx, unsigned npages) { - tASSERT(txn, idx > 0 && idx <= MDBX_PNL_SIZE(txn->tw.spill_pages) && +static void spill_remove(MDBX_txn *txn, size_t idx, pgno_t npages) { + tASSERT(txn, idx > 0 && idx <= MDBX_PNL_GETSIZE(txn->tw.spill_pages) && txn->tw.spill_least_removed > 0); txn->tw.spill_least_removed = (idx < txn->tw.spill_least_removed) ? idx : txn->tw.spill_least_removed; txn->tw.spill_pages[idx] |= 1; - MDBX_PNL_SIZE(txn->tw.spill_pages) -= - (idx == MDBX_PNL_SIZE(txn->tw.spill_pages)); + MDBX_PNL_SETSIZE(txn->tw.spill_pages, + MDBX_PNL_GETSIZE(txn->tw.spill_pages) - + (idx == MDBX_PNL_GETSIZE(txn->tw.spill_pages))); while (unlikely(npages > 1)) { const pgno_t pgno = (txn->tw.spill_pages[idx] >> 1) + 1; if (MDBX_PNL_ASCENDING) { - if (++idx > MDBX_PNL_SIZE(txn->tw.spill_pages) || + if (++idx > MDBX_PNL_GETSIZE(txn->tw.spill_pages) || (txn->tw.spill_pages[idx] >> 1) != pgno) return; } else { @@ -2456,8 +2453,9 @@ static void spill_remove(MDBX_txn *txn, unsigned idx, unsigned npages) { : txn->tw.spill_least_removed; } txn->tw.spill_pages[idx] |= 1; - MDBX_PNL_SIZE(txn->tw.spill_pages) -= - (idx == MDBX_PNL_SIZE(txn->tw.spill_pages)); + MDBX_PNL_SETSIZE(txn->tw.spill_pages, + MDBX_PNL_GETSIZE(txn->tw.spill_pages) - + (idx == MDBX_PNL_GETSIZE(txn->tw.spill_pages))); --npages; } } @@ -2466,17 +2464,17 @@ static MDBX_PNL spill_purge(MDBX_txn *txn) { tASSERT(txn, txn->tw.spill_least_removed > 0); const MDBX_PNL sl = txn->tw.spill_pages; if (txn->tw.spill_least_removed != INT_MAX) { - unsigned len = MDBX_PNL_SIZE(sl), r, w; + size_t len = MDBX_PNL_GETSIZE(sl), r, w; for (w = r = txn->tw.spill_least_removed; r <= len; ++r) { sl[w] = sl[r]; w += 1 - (sl[r] & 1); } for (size_t i = 1; i < w; ++i) tASSERT(txn, (sl[i] & 1) == 0); - MDBX_PNL_SIZE(sl) = w - 1; + MDBX_PNL_SETSIZE(sl, w - 1); txn->tw.spill_least_removed = INT_MAX; } else { - for (size_t i = 1; i <= MDBX_PNL_SIZE(sl); ++i) + for (size_t i = 1; i <= MDBX_PNL_GETSIZE(sl); ++i) tASSERT(txn, (sl[i] & 1) == 0); } return sl; @@ -2493,8 +2491,8 @@ RADIXSORT_IMPL(pgno, pgno_t, MDBX_PNL_EXTRACT_KEY, SORT_IMPL(pgno_sort, false, pgno_t, MDBX_PNL_ORDERED) __hot __noinline static void pnl_sort_nochk(MDBX_PNL pnl) { - if (likely(MDBX_PNL_SIZE(pnl) < MDBX_RADIXSORT_THRESHOLD) || - unlikely(!pgno_radixsort(&MDBX_PNL_FIRST(pnl), MDBX_PNL_SIZE(pnl)))) + if (likely(MDBX_PNL_GETSIZE(pnl) < MDBX_RADIXSORT_THRESHOLD) || + unlikely(!pgno_radixsort(&MDBX_PNL_FIRST(pnl), MDBX_PNL_GETSIZE(pnl)))) pgno_sort(MDBX_PNL_BEGIN(pnl), MDBX_PNL_END(pnl)); } @@ -2508,45 +2506,45 @@ static __inline void pnl_sort(MDBX_PNL pnl, size_t limit4check) { * Returns The index of the first item greater than or equal to pgno. */ SEARCH_IMPL(pgno_bsearch, pgno_t, pgno_t, MDBX_PNL_ORDERED) -__hot __noinline static unsigned pnl_search_nochk(const MDBX_PNL pnl, - pgno_t pgno) { +__hot __noinline static size_t pnl_search_nochk(const MDBX_PNL pnl, + pgno_t pgno) { const pgno_t *begin = MDBX_PNL_BEGIN(pnl); - const pgno_t *it = pgno_bsearch(begin, MDBX_PNL_SIZE(pnl), pgno); - const pgno_t *end = begin + MDBX_PNL_SIZE(pnl); + const pgno_t *it = pgno_bsearch(begin, MDBX_PNL_GETSIZE(pnl), pgno); + const pgno_t *end = begin + MDBX_PNL_GETSIZE(pnl); assert(it >= begin && it <= end); if (it != begin) assert(MDBX_PNL_ORDERED(it[-1], pgno)); if (it != end) assert(!MDBX_PNL_ORDERED(it[0], pgno)); - return (unsigned)(it - begin + 1); + return it - begin + 1; } -static __inline unsigned pnl_search(const MDBX_PNL pnl, pgno_t pgno, - size_t limit) { +static __inline size_t pnl_search(const MDBX_PNL pnl, pgno_t pgno, + size_t limit) { assert(pnl_check_allocated(pnl, limit)); assert(pgno < limit); (void)limit; return pnl_search_nochk(pnl, pgno); } -static __inline unsigned search_spilled(const MDBX_txn *txn, pgno_t pgno) { +static __inline size_t search_spilled(const MDBX_txn *txn, pgno_t pgno) { const MDBX_PNL pnl = txn->tw.spill_pages; if (likely(!pnl)) return 0; pgno <<= 1; - unsigned n = pnl_search(pnl, pgno, (size_t)(MAX_PAGENO + 1) << 1); - return (n <= MDBX_PNL_SIZE(pnl) && pnl[n] == pgno) ? n : 0; + size_t n = pnl_search(pnl, pgno, (size_t)(MAX_PAGENO + 1) << 1); + return (n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] == pgno) ? n : 0; } static __inline bool intersect_spilled(const MDBX_txn *txn, pgno_t pgno, - unsigned npages) { + pgno_t npages) { const MDBX_PNL pnl = txn->tw.spill_pages; if (likely(!pnl)) return false; - const unsigned len = MDBX_PNL_SIZE(pnl); + const size_t len = MDBX_PNL_GETSIZE(pnl); if (LOG_ENABLED(MDBX_LOG_EXTRA)) { - DEBUG_EXTRA("PNL len %u [", len); - for (unsigned i = 1; i <= len; ++i) + DEBUG_EXTRA("PNL len %zu [", len); + for (size_t i = 1; i <= len; ++i) DEBUG_EXTRA_PRINT(" %li", (pnl[i] & 1) ? -(long)(pnl[i] >> 1) : (long)(pnl[i] >> 1)); DEBUG_EXTRA_PRINT("%s\n", "]"); @@ -2554,20 +2552,21 @@ static __inline bool intersect_spilled(const MDBX_txn *txn, pgno_t pgno, const pgno_t spilled_range_begin = pgno << 1; const pgno_t spilled_range_last = ((pgno + npages) << 1) - 1; #if MDBX_PNL_ASCENDING - const unsigned n = + const size_t n = pnl_search(pnl, spilled_range_begin, (size_t)(MAX_PAGENO + 1) << 1); - assert(n && (n == MDBX_PNL_SIZE(pnl) + 1 || spilled_range_begin <= pnl[n])); - const bool rc = n <= MDBX_PNL_SIZE(pnl) && pnl[n] <= spilled_range_last; + assert(n && + (n == MDBX_PNL_GETSIZE(pnl) + 1 || spilled_range_begin <= pnl[n])); + const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] <= spilled_range_last; #else - const unsigned n = + const size_t n = pnl_search(pnl, spilled_range_last, (size_t)(MAX_PAGENO + 1) << 1); - assert(n && (n == MDBX_PNL_SIZE(pnl) + 1 || spilled_range_last >= pnl[n])); - const bool rc = n <= MDBX_PNL_SIZE(pnl) && pnl[n] >= spilled_range_begin; + assert(n && (n == MDBX_PNL_GETSIZE(pnl) + 1 || spilled_range_last >= pnl[n])); + const bool rc = n <= MDBX_PNL_GETSIZE(pnl) && pnl[n] >= spilled_range_begin; #endif if (ASSERT_ENABLED()) { bool check = false; - for (unsigned i = 0; i < npages; ++i) - check |= search_spilled(txn, pgno + i) != 0; + for (size_t i = 0; i < npages; ++i) + check |= search_spilled(txn, (pgno_t)(pgno + i)) != 0; assert(check == rc); } return rc; @@ -2612,8 +2611,8 @@ static void txl_free(MDBX_TXL tl) { static int txl_reserve(MDBX_TXL *ptl, const size_t wanna) { const size_t allocated = (size_t)MDBX_PNL_ALLOCLEN(*ptl); - assert(MDBX_PNL_SIZE(*ptl) <= MDBX_TXL_MAX && - MDBX_PNL_ALLOCLEN(*ptl) >= MDBX_PNL_SIZE(*ptl)); + assert(MDBX_PNL_GETSIZE(*ptl) <= MDBX_TXL_MAX && + MDBX_PNL_ALLOCLEN(*ptl) >= MDBX_PNL_GETSIZE(*ptl)); if (likely(allocated >= wanna)) return MDBX_SUCCESS; @@ -2641,17 +2640,17 @@ static int txl_reserve(MDBX_TXL *ptl, const size_t wanna) { static __always_inline int __must_check_result txl_need(MDBX_TXL *ptl, size_t num) { - assert(MDBX_PNL_SIZE(*ptl) <= MDBX_TXL_MAX && - MDBX_PNL_ALLOCLEN(*ptl) >= MDBX_PNL_SIZE(*ptl)); + assert(MDBX_PNL_GETSIZE(*ptl) <= MDBX_TXL_MAX && + MDBX_PNL_ALLOCLEN(*ptl) >= MDBX_PNL_GETSIZE(*ptl)); assert(num <= MDBX_PGL_LIMIT); - const size_t wanna = (size_t)MDBX_PNL_SIZE(*ptl) + num; + const size_t wanna = (size_t)MDBX_PNL_GETSIZE(*ptl) + num; return likely(MDBX_PNL_ALLOCLEN(*ptl) >= wanna) ? MDBX_SUCCESS : txl_reserve(ptl, wanna); } static __always_inline void txl_xappend(MDBX_TXL tl, txnid_t id) { - assert(MDBX_PNL_SIZE(tl) < MDBX_PNL_ALLOCLEN(tl)); - MDBX_PNL_SIZE(tl) += 1; + assert(MDBX_PNL_GETSIZE(tl) < MDBX_PNL_ALLOCLEN(tl)); + tl[0] += 1; MDBX_PNL_LAST(tl) = id; } @@ -2662,7 +2661,7 @@ static void txl_sort(MDBX_TXL tl) { } static int __must_check_result txl_append(MDBX_TXL *ptl, txnid_t id) { - if (unlikely(MDBX_PNL_SIZE(*ptl) == MDBX_PNL_ALLOCLEN(*ptl))) { + if (unlikely(MDBX_PNL_GETSIZE(*ptl) == MDBX_PNL_ALLOCLEN(*ptl))) { int rc = txl_need(ptl, MDBX_TXL_GRANULATE); if (unlikely(rc != MDBX_SUCCESS)) return rc; @@ -2698,7 +2697,7 @@ static __always_inline size_t dpl_size2bytes(ptrdiff_t size) { return bytes; } -static __always_inline unsigned dpl_bytes2size(const ptrdiff_t bytes) { +static __always_inline size_t dpl_bytes2size(const ptrdiff_t bytes) { size_t size = (bytes - sizeof(MDBX_dpl)) / sizeof(MDBX_dp); assert(size > CURSOR_STACK + MDBX_DPL_RESERVE_GAP && size <= MDBX_PGL_LIMIT + MDBX_PNL_GRANULATE); @@ -2706,10 +2705,10 @@ static __always_inline unsigned dpl_bytes2size(const ptrdiff_t bytes) { #if MDBX_DPL_PREALLOC_FOR_RADIXSORT size >>= 1; #endif /* MDBX_DPL_PREALLOC_FOR_RADIXSORT */ - return (unsigned)size; + return size; } -static __always_inline unsigned dpl_setlen(MDBX_dpl *dl, unsigned len) { +static __always_inline size_t dpl_setlen(MDBX_dpl *dl, size_t len) { static const MDBX_page dpl_stub_pageE = { {0}, 0, P_BAD, {0}, /* pgno */ ~(pgno_t)0}; assert(dpl_stub_pageE.mp_flags == P_BAD && @@ -2783,7 +2782,7 @@ SORT_IMPL(dp_sort, false, MDBX_dp, DP_SORT_CMP) __hot __noinline static MDBX_dpl *dpl_sort_slowpath(const MDBX_txn *txn) { MDBX_dpl *dl = txn->tw.dirtylist; assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); - const unsigned unsorted = dl->length - dl->sorted; + const size_t unsorted = dl->length - dl->sorted; if (likely(unsorted < MDBX_RADIXSORT_THRESHOLD) || unlikely(!dpl_radixsort(dl->items + 1, dl->length))) { if (dl->sorted > unsorted / 4 + 4 && @@ -2818,7 +2817,7 @@ __hot __noinline static MDBX_dpl *dpl_sort_slowpath(const MDBX_txn *txn) { assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); if (ASSERT_ENABLED()) - for (unsigned i = 0; i <= dl->length; ++i) + for (size_t i = 0; i <= dl->length; ++i) assert(dl->items[i].pgno < dl->items[i + 1].pgno); } else { dp_sort(dl->items + 1, dl->items + dl->length + 1); @@ -2846,7 +2845,7 @@ static __always_inline MDBX_dpl *dpl_sort(const MDBX_txn *txn) { #define DP_SEARCH_CMP(dp, id) ((dp).pgno < (id)) SEARCH_IMPL(dp_bsearch, MDBX_dp, pgno_t, DP_SEARCH_CMP) -__hot __noinline static unsigned dpl_search(const MDBX_txn *txn, pgno_t pgno) { +__hot __noinline static size_t dpl_search(const MDBX_txn *txn, pgno_t pgno) { MDBX_dpl *dl = txn->tw.dirtylist; assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); if (AUDIT_ENABLED()) { @@ -2884,28 +2883,28 @@ __hot __noinline static unsigned dpl_search(const MDBX_txn *txn, pgno_t pgno) { /* continue bsearch on the sorted part */ break; } - return (unsigned)(dp_bsearch(dl->items + 1, dl->sorted, pgno) - dl->items); + return dp_bsearch(dl->items + 1, dl->sorted, pgno) - dl->items; } MDBX_NOTHROW_PURE_FUNCTION static __inline unsigned -dpl_npages(const MDBX_dpl *dl, unsigned i) { - assert(0 <= (int)i && i <= dl->length); +dpl_npages(const MDBX_dpl *dl, size_t i) { + assert(0 <= (intptr_t)i && i <= dl->length); unsigned n = likely(!dl->items[i].multi) ? 1 : dl->items[i].ptr->mp_pages; assert(n == (IS_OVERFLOW(dl->items[i].ptr) ? dl->items[i].ptr->mp_pages : 1)); return n; } -MDBX_NOTHROW_PURE_FUNCTION static __inline unsigned -dpl_endpgno(const MDBX_dpl *dl, unsigned i) { +MDBX_NOTHROW_PURE_FUNCTION static __inline pgno_t +dpl_endpgno(const MDBX_dpl *dl, size_t i) { return dpl_npages(dl, i) + dl->items[i].pgno; } static __inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno, - unsigned npages) { + pgno_t npages) { MDBX_dpl *dl = txn->tw.dirtylist; assert(dl->sorted == dl->length); assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); - unsigned const n = dpl_search(txn, pgno); + size_t const n = dpl_search(txn, pgno); assert(n >= 1 && n <= dl->length + 1); assert(pgno <= dl->items[n].pgno); assert(pgno > dl->items[n - 1].pgno); @@ -2914,7 +2913,7 @@ static __inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno, /* intersection with prev */ dpl_endpgno(dl, n - 1) > pgno; if (ASSERT_ENABLED()) { bool check = false; - for (unsigned i = 1; i <= dl->length; ++i) { + for (size_t i = 1; i <= dl->length; ++i) { const MDBX_page *const dp = dl->items[i].ptr; if (!(dp->mp_pgno /* begin */ >= /* end */ pgno + npages || dpl_endpgno(dl, i) /* end */ <= /* begin */ pgno)) @@ -2925,9 +2924,9 @@ static __inline bool dpl_intersect(const MDBX_txn *txn, pgno_t pgno, return rc; } -static __always_inline unsigned dpl_exist(MDBX_txn *txn, pgno_t pgno) { +static __always_inline size_t dpl_exist(MDBX_txn *txn, pgno_t pgno) { MDBX_dpl *dl = txn->tw.dirtylist; - unsigned i = dpl_search(txn, pgno); + size_t i = dpl_search(txn, pgno); assert((int)i > 0); return (dl->items[i].pgno == pgno) ? i : 0; } @@ -2936,22 +2935,21 @@ MDBX_MAYBE_UNUSED static const MDBX_page *debug_dpl_find(const MDBX_txn *txn, const pgno_t pgno) { const MDBX_dpl *dl = txn->tw.dirtylist; assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); - for (unsigned i = dl->length; i > dl->sorted; --i) + for (size_t i = dl->length; i > dl->sorted; --i) if (dl->items[i].pgno == pgno) return dl->items[i].ptr; if (dl->sorted) { - const unsigned i = - (unsigned)(dp_bsearch(dl->items + 1, dl->sorted, pgno) - dl->items); + const size_t i = dp_bsearch(dl->items + 1, dl->sorted, pgno) - dl->items; if (dl->items[i].pgno == pgno) return dl->items[i].ptr; } return nullptr; } -static void dpl_remove_ex(const MDBX_txn *txn, unsigned i, unsigned npages) { +static void dpl_remove_ex(const MDBX_txn *txn, size_t i, pgno_t npages) { MDBX_dpl *dl = txn->tw.dirtylist; - assert((int)i > 0 && i <= dl->length); + assert((intptr_t)i > 0 && i <= dl->length); assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); dl->pages_including_loose -= npages; dl->sorted -= dl->sorted >= i; @@ -2961,29 +2959,29 @@ static void dpl_remove_ex(const MDBX_txn *txn, unsigned i, unsigned npages) { assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); } -static void dpl_remove(const MDBX_txn *txn, unsigned i) { +static void dpl_remove(const MDBX_txn *txn, size_t i) { dpl_remove_ex(txn, i, dpl_npages(txn->tw.dirtylist, i)); } static __always_inline int __must_check_result dpl_append(MDBX_txn *txn, pgno_t pgno, MDBX_page *page, - unsigned npages) { + pgno_t npages) { MDBX_dpl *dl = txn->tw.dirtylist; assert(dl->length <= MDBX_PGL_LIMIT + MDBX_PNL_GRANULATE); assert(dl->items[0].pgno == 0 && dl->items[dl->length + 1].pgno == P_INVALID); if (AUDIT_ENABLED()) { - for (unsigned i = dl->length; i > 0; --i) { + for (size_t i = dl->length; i > 0; --i) { assert(dl->items[i].pgno != pgno); if (unlikely(dl->items[i].pgno == pgno)) { - ERROR("Page %u already exist in the DPL at %u", pgno, i); + ERROR("Page %u already exist in the DPL at %zu", pgno, i); return MDBX_PROBLEM; } } } - const unsigned length = dl->length + 1; - const unsigned sorted = + const size_t length = dl->length + 1; + const size_t sorted = (dl->sorted == dl->length && dl->items[dl->length].pgno < pgno) ? length : dl->sorted; @@ -3016,9 +3014,9 @@ static __always_inline int __must_check_result dpl_append(MDBX_txn *txn, return MDBX_SUCCESS; } -static __inline uint32_t dpl_age(const MDBX_txn *txn, unsigned i) { +static __inline uint32_t dpl_age(const MDBX_txn *txn, size_t i) { const MDBX_dpl *dl = txn->tw.dirtylist; - assert((int)i > 0 && i <= dl->length); + assert((intptr_t)i > 0 && i <= dl->length); /* overflow could be here */ return (txn->tw.dirtylru - dl->items[i].lru) & UINT32_C(0x7fffFFFF); } @@ -3033,7 +3031,7 @@ static __must_check_result __inline int page_retire(MDBX_cursor *mc, MDBX_page *mp); static int __must_check_result page_dirty(MDBX_txn *txn, MDBX_page *mp, - unsigned npages); + pgno_t npages); typedef struct page_result { MDBX_page *page; int err; @@ -3042,7 +3040,7 @@ typedef struct page_result { static txnid_t kick_longlived_readers(MDBX_env *env, const txnid_t laggard); static pgr_t page_new(MDBX_cursor *mc, const unsigned flags); -static pgr_t page_new_large(MDBX_cursor *mc, const unsigned npages); +static pgr_t page_new_large(MDBX_cursor *mc, const pgno_t npages); static int page_touch(MDBX_cursor *mc); static int cursor_touch(MDBX_cursor *mc); static int touch_dbi(MDBX_cursor *mc); @@ -3122,7 +3120,7 @@ static bool coherency_check_meta(const MDBX_env *env, static int __must_check_result validate_meta_copy(MDBX_env *env, const MDBX_meta *meta, MDBX_meta *dest); -static int __must_check_result override_meta(MDBX_env *env, unsigned target, +static int __must_check_result override_meta(MDBX_env *env, size_t target, txnid_t txnid, const MDBX_meta *shape); static int __must_check_result read_header(MDBX_env *env, MDBX_meta *meta, @@ -3140,17 +3138,17 @@ struct node_result { static struct node_result node_search(MDBX_cursor *mc, const MDBX_val *key); -static int __must_check_result node_add_branch(MDBX_cursor *mc, unsigned indx, +static int __must_check_result node_add_branch(MDBX_cursor *mc, size_t indx, const MDBX_val *key, pgno_t pgno); -static int __must_check_result node_add_leaf(MDBX_cursor *mc, unsigned indx, +static int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx, const MDBX_val *key, MDBX_val *data, unsigned flags); -static int __must_check_result node_add_leaf2(MDBX_cursor *mc, unsigned indx, +static int __must_check_result node_add_leaf2(MDBX_cursor *mc, size_t indx, const MDBX_val *key); static void node_del(MDBX_cursor *mc, size_t ksize); -static void node_shrink(MDBX_page *mp, unsigned indx); +static void node_shrink(MDBX_page *mp, size_t indx); static int __must_check_result node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft); static int __must_check_result node_read(MDBX_cursor *mc, const MDBX_node *leaf, @@ -3161,7 +3159,7 @@ static int __must_check_result update_key(MDBX_cursor *mc, const MDBX_val *key); static void cursor_pop(MDBX_cursor *mc); static int __must_check_result cursor_push(MDBX_cursor *mc, MDBX_page *mp); -static int __must_check_result audit_ex(MDBX_txn *txn, unsigned retired_stored, +static int __must_check_result audit_ex(MDBX_txn *txn, size_t retired_stored, bool dont_filter_gc); static int __must_check_result page_check(MDBX_cursor *const mc, @@ -3192,7 +3190,7 @@ static int __must_check_result cursor_last(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data); static int __must_check_result cursor_init(MDBX_cursor *mc, MDBX_txn *txn, - MDBX_dbi dbi); + size_t dbi); static int __must_check_result cursor_xinit0(MDBX_cursor *mc); static int __must_check_result cursor_xinit1(MDBX_cursor *mc, MDBX_node *node, const MDBX_page *mp); @@ -3203,7 +3201,7 @@ static void cursor_copy(const MDBX_cursor *csrc, MDBX_cursor *cdst); static int __must_check_result drop_tree(MDBX_cursor *mc, const bool may_have_subDBs); -static int __must_check_result fetch_sdb(MDBX_txn *txn, MDBX_dbi dbi); +static int __must_check_result fetch_sdb(MDBX_txn *txn, size_t dbi); static int __must_check_result setup_dbx(MDBX_dbx *const dbx, const MDBX_db *const db, const unsigned pagesize); @@ -3423,7 +3421,7 @@ const char *mdbx_dump_val(const MDBX_val *key, char *const buf, bool is_ascii = true; const uint8_t *const data = key->iov_base; - for (unsigned i = 0; i < key->iov_len; i++) + for (size_t i = 0; i < key->iov_len; i++) if (data[i] < ' ' || data[i] > '~') { is_ascii = false; break; @@ -3433,13 +3431,13 @@ const char *mdbx_dump_val(const MDBX_val *key, char *const buf, int len = snprintf(buf, bufsize, "%.*s", (key->iov_len > INT_MAX) ? INT_MAX : (int)key->iov_len, data); - assert(len > 0 && (unsigned)len < bufsize); + assert(len > 0 && (size_t)len < bufsize); (void)len; } else { char *const detent = buf + bufsize - 2; char *ptr = buf; *ptr++ = '<'; - for (unsigned i = 0; i < key->iov_len; i++) { + for (size_t i = 0; i < key->iov_len; i++) { const ptrdiff_t left = detent - ptr; assert(left > 0); int len = snprintf(ptr, left, "%02x", data[i]); @@ -3471,7 +3469,7 @@ MDBX_MAYBE_UNUSED static void page_list(MDBX_page *mp) { pgno_t pgno = mp->mp_pgno; const char *type; MDBX_node *node; - unsigned i, nkeys, nsize, total = 0; + size_t i, nkeys, nsize, total = 0; MDBX_val key; DKBUF; @@ -3504,37 +3502,37 @@ MDBX_MAYBE_UNUSED static void page_list(MDBX_page *mp) { } nkeys = page_numkeys(mp); - VERBOSE("%s %" PRIaPGNO " numkeys %u\n", type, pgno, nkeys); + VERBOSE("%s %" PRIaPGNO " numkeys %zu\n", type, pgno, nkeys); for (i = 0; i < nkeys; i++) { if (IS_LEAF2(mp)) { /* LEAF2 pages have no mp_ptrs[] or node headers */ key.iov_len = nsize = mp->mp_leaf2_ksize; key.iov_base = page_leaf2key(mp, i, nsize); total += nsize; - VERBOSE("key %u: nsize %u, %s\n", i, nsize, DKEY(&key)); + VERBOSE("key %zu: nsize %zu, %s\n", i, nsize, DKEY(&key)); continue; } node = page_node(mp, i); key.iov_len = node_ks(node); key.iov_base = node->mn_data; - nsize = (unsigned)(NODESIZE + key.iov_len); + nsize = NODESIZE + key.iov_len; if (IS_BRANCH(mp)) { - VERBOSE("key %u: page %" PRIaPGNO ", %s\n", i, node_pgno(node), + VERBOSE("key %zu: page %" PRIaPGNO ", %s\n", i, node_pgno(node), DKEY(&key)); total += nsize; } else { if (node_flags(node) & F_BIGDATA) nsize += sizeof(pgno_t); else - nsize += (unsigned)node_ds(node); + nsize += node_ds(node); total += nsize; nsize += sizeof(indx_t); - VERBOSE("key %u: nsize %u, %s%s\n", i, nsize, DKEY(&key), + VERBOSE("key %zu: nsize %zu, %s%s\n", i, nsize, DKEY(&key), leafnode_type(node)); } total = EVEN(total); } - VERBOSE("Total: header %u + contents %u + unused %u\n", + VERBOSE("Total: header %zu + contents %zu + unused %zu\n", IS_LEAF2(mp) ? PAGEHDRSZ : PAGEHDRSZ + mp->mp_lower, total, page_room(mp)); } @@ -3601,7 +3599,7 @@ int mdbx_dcmp(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, /* Allocate memory for a page. * Re-use old malloc'ed pages first for singletons, otherwise just malloc. * Set MDBX_TXN_ERROR on failure. */ -static MDBX_page *page_malloc(MDBX_txn *txn, unsigned num) { +static MDBX_page *page_malloc(MDBX_txn *txn, size_t num) { MDBX_env *env = txn->mt_env; MDBX_page *np = env->me_dp_reserve; size_t size = env->me_psize; @@ -3636,12 +3634,12 @@ static MDBX_page *page_malloc(MDBX_txn *txn, unsigned num) { #endif VALGRIND_MAKE_MEM_UNDEFINED(np, size); np->mp_flags = 0; - np->mp_pages = num; + np->mp_pages = (pgno_t)num; return np; } /* Free a shadow dirty page */ -static void dpage_free(MDBX_env *env, MDBX_page *dp, unsigned npages) { +static void dpage_free(MDBX_env *env, MDBX_page *dp, pgno_t npages) { VALGRIND_MAKE_MEM_UNDEFINED(dp, pgno2bytes(env, npages)); MDBX_ASAN_UNPOISON_MEMORY_REGION(dp, pgno2bytes(env, npages)); if (unlikely(env->me_flags & MDBX_PAGEPERTURB)) @@ -3666,7 +3664,7 @@ static void dlist_free(MDBX_txn *txn) { MDBX_env *env = txn->mt_env; MDBX_dpl *const dl = txn->tw.dirtylist; - for (unsigned i = 1; i <= dl->length; i++) + for (size_t i = 1; i <= dl->length; i++) dpage_free(env, dl->items[i].ptr, dpl_npages(dl, i)); dpl_clear(dl); @@ -3691,8 +3689,8 @@ MDBX_MAYBE_UNUSED __cold static bool dirtylist_check(MDBX_txn *txn) { if (!AUDIT_ENABLED()) return true; - unsigned loose = 0, pages = 0; - for (unsigned i = dl->length; i > 0; --i) { + size_t loose = 0, pages = 0; + for (size_t i = dl->length; i > 0; --i) { const MDBX_page *const dp = dl->items[i].ptr; if (!dp) continue; @@ -3724,16 +3722,16 @@ MDBX_MAYBE_UNUSED __cold static bool dirtylist_check(MDBX_txn *txn) { return false; } - const unsigned rpa = + const size_t rpa = pnl_search(txn->tw.reclaimed_pglist, dp->mp_pgno, txn->mt_next_pgno); - tASSERT(txn, rpa > MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) || + tASSERT(txn, rpa > MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) || txn->tw.reclaimed_pglist[rpa] != dp->mp_pgno); - if (rpa <= MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) && + if (rpa <= MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) && unlikely(txn->tw.reclaimed_pglist[rpa] == dp->mp_pgno)) return false; if (num > 1) { - const unsigned rpb = pnl_search(txn->tw.reclaimed_pglist, - dp->mp_pgno + num - 1, txn->mt_next_pgno); + const size_t rpb = pnl_search(txn->tw.reclaimed_pglist, + dp->mp_pgno + num - 1, txn->mt_next_pgno); tASSERT(txn, rpa == rpb); if (unlikely(rpa != rpb)) return false; @@ -3748,7 +3746,7 @@ MDBX_MAYBE_UNUSED __cold static bool dirtylist_check(MDBX_txn *txn) { if (unlikely(pages != dl->pages_including_loose)) return false; - for (unsigned i = 1; i <= MDBX_PNL_SIZE(txn->tw.retired_pages); ++i) { + for (size_t i = 1; i <= MDBX_PNL_GETSIZE(txn->tw.retired_pages); ++i) { const MDBX_page *const dp = debug_dpl_find(txn, txn->tw.retired_pages[i]); tASSERT(txn, !dp); if (unlikely(dp)) @@ -3763,21 +3761,21 @@ static void refund_reclaimed(MDBX_txn *txn) { /* Scanning in descend order */ pgno_t next_pgno = txn->mt_next_pgno; const MDBX_PNL pnl = txn->tw.reclaimed_pglist; - tASSERT(txn, MDBX_PNL_SIZE(pnl) && MDBX_PNL_MOST(pnl) == next_pgno - 1); + tASSERT(txn, MDBX_PNL_GETSIZE(pnl) && MDBX_PNL_MOST(pnl) == next_pgno - 1); #if MDBX_PNL_ASCENDING - unsigned i = MDBX_PNL_SIZE(pnl); + size_t i = MDBX_PNL_GETSIZE(pnl); tASSERT(txn, pnl[i] == next_pgno - 1); while (--next_pgno, --i > 0 && pnl[i] == next_pgno - 1) ; - MDBX_PNL_SIZE(pnl) = i; + MDBX_PNL_SETSIZE(pnl, i); #else - unsigned i = 1; + size_t i = 1; tASSERT(txn, pnl[i] == next_pgno - 1); - unsigned len = MDBX_PNL_SIZE(pnl); + size_t len = MDBX_PNL_GETSIZE(pnl); while (--next_pgno, ++i <= len && pnl[i] == next_pgno - 1) ; - MDBX_PNL_SIZE(pnl) = len -= i - 1; - for (unsigned move = 0; move < len; ++move) + MDBX_PNL_SETSIZE(pnl, len -= i - 1); + for (size_t move = 0; move < len; ++move) pnl[1 + move] = pnl[i + move]; #endif VERBOSE("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, @@ -3808,7 +3806,7 @@ static void refund_loose(MDBX_txn *txn) { /* Collect loose-pages which may be refunded. */ tASSERT(txn, txn->mt_next_pgno >= MIN_PAGENO + txn->tw.loose_count); pgno_t most = MIN_PAGENO; - unsigned w = 0; + size_t w = 0; for (const MDBX_page *lp = txn->tw.loose_pages; lp; lp = lp->mp_next) { tASSERT(txn, lp->mp_flags == P_LOOSE); tASSERT(txn, txn->mt_next_pgno > lp->mp_pgno); @@ -3823,23 +3821,25 @@ static void refund_loose(MDBX_txn *txn) { if (most + 1 == txn->mt_next_pgno) { /* Sort suitable list and refund pages at the tail. */ - MDBX_PNL_SIZE(suitable) = w; + MDBX_PNL_SETSIZE(suitable, w); pnl_sort(suitable, MAX_PAGENO + 1); /* Scanning in descend order */ - const int step = MDBX_PNL_ASCENDING ? -1 : 1; - const int begin = MDBX_PNL_ASCENDING ? MDBX_PNL_SIZE(suitable) : 1; - const int end = MDBX_PNL_ASCENDING ? 0 : MDBX_PNL_SIZE(suitable) + 1; + const intptr_t step = MDBX_PNL_ASCENDING ? -1 : 1; + const intptr_t begin = + MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(suitable) : 1; + const intptr_t end = + MDBX_PNL_ASCENDING ? 0 : MDBX_PNL_GETSIZE(suitable) + 1; tASSERT(txn, suitable[begin] >= suitable[end - step]); tASSERT(txn, most == suitable[begin]); - for (int i = begin + step; i != end; i += step) { + for (intptr_t i = begin + step; i != end; i += step) { if (suitable[i] != most - 1) break; most -= 1; } - const unsigned refunded = txn->mt_next_pgno - most; - DEBUG("refund-suitable %u pages %" PRIaPGNO " -> %" PRIaPGNO, refunded, + const size_t refunded = txn->mt_next_pgno - most; + DEBUG("refund-suitable %zu pages %" PRIaPGNO " -> %" PRIaPGNO, refunded, most, txn->mt_next_pgno); txn->tw.loose_count -= refunded; txn->tw.dirtyroom += refunded; @@ -3848,7 +3848,7 @@ static void refund_loose(MDBX_txn *txn) { txn->mt_next_pgno = most; /* Filter-out dirty list */ - unsigned r = 0; + size_t r = 0; w = 0; if (dl->sorted) { do { @@ -3880,7 +3880,7 @@ static void refund_loose(MDBX_txn *txn) { tASSERT(txn, dl->sorted == dl->length); /* Scan dirtylist tail-forward and cutoff suitable pages. */ - unsigned n; + size_t n; for (n = dl->length; dl->items[n].pgno == txn->mt_next_pgno - 1 && dl->items[n].ptr->mp_flags == P_LOOSE; --n) { @@ -3893,7 +3893,7 @@ static void refund_loose(MDBX_txn *txn) { dpl_setlen(dl, n); if (dl->sorted != dl->length) { - const unsigned refunded = dl->sorted - dl->length; + const size_t refunded = dl->sorted - dl->length; dl->sorted = dl->length; txn->tw.loose_count -= refunded; txn->tw.dirtyroom += refunded; @@ -3931,7 +3931,7 @@ static bool txn_refund(MDBX_txn *txn) { refund_loose(txn); while (true) { - if (MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) == 0 || + if (MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) == 0 || MDBX_PNL_MOST(txn->tw.reclaimed_pglist) != txn->mt_next_pgno - 1) break; @@ -3963,7 +3963,7 @@ static __inline bool txn_refund(MDBX_txn *txn) { #endif /* MDBX_ENABLE_REFUND */ __cold static void kill_page(MDBX_txn *txn, MDBX_page *mp, pgno_t pgno, - unsigned npages) { + pgno_t npages) { MDBX_env *const env = txn->mt_env; DEBUG("kill %u page(s) %" PRIaPGNO, npages, pgno); eASSERT(env, pgno >= NUM_METAS && npages); @@ -3977,8 +3977,7 @@ __cold static void kill_page(MDBX_txn *txn, MDBX_page *mp, pgno_t pgno, struct iovec iov[MDBX_COMMIT_PAGES]; iov[0].iov_len = env->me_psize; iov[0].iov_base = (char *)env->me_pbuf + env->me_psize; - size_t iov_off = pgno2bytes(env, pgno); - unsigned n = 1; + size_t iov_off = pgno2bytes(env, pgno), n = 1; while (--npages) { iov[n] = iov[0]; if (++n == MDBX_COMMIT_PAGES) { @@ -3992,8 +3991,8 @@ __cold static void kill_page(MDBX_txn *txn, MDBX_page *mp, pgno_t pgno, } /* Remove page from dirty list */ -static __inline void page_wash(MDBX_txn *txn, const unsigned di, - MDBX_page *const mp, const unsigned npages) { +static __inline void page_wash(MDBX_txn *txn, const size_t di, + MDBX_page *const mp, const pgno_t npages) { tASSERT(txn, di && di <= txn->tw.dirtylist->length && txn->tw.dirtylist->items[di].ptr == mp); dpl_remove_ex(txn, di, npages); @@ -4039,7 +4038,8 @@ static int page_retire_ex(MDBX_cursor *mc, const pgno_t pgno, * requires support the list of dirty pages and avoid explicit spilling. * So for flexibility and avoid extra internal dependencies we just * fallback to reading if dirty list was not allocated yet. */ - unsigned di = 0, si = 0, npages = 1; + size_t di = 0, si = 0; + pgno_t npages = 1; bool is_frozen = false, is_spilled = false, is_shadowed = false; if (unlikely(!mp)) { if (ASSERT_ENABLED() && pageflags) { @@ -4331,7 +4331,7 @@ typedef struct iov_ctx { } iov_ctx_t; __must_check_result static int iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, - unsigned items, pgno_t npages) { + size_t items, size_t npages) { ctx->env = txn->mt_env; ctx->ior = &txn->mt_env->me_ioring; ctx->err = osal_ioring_reserve(ctx->ior, items, @@ -4418,7 +4418,7 @@ __must_check_result static int iov_write(iov_ctx_t *ctx) { } __must_check_result static int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, - MDBX_page *dp, unsigned npages) { + MDBX_page *dp, pgno_t npages) { MDBX_env *const env = txn->mt_env; tASSERT(txn, ctx->err == MDBX_SUCCESS); tASSERT(txn, dp->mp_pgno >= MIN_PAGENO && dp->mp_pgno < txn->mt_next_pgno); @@ -4471,7 +4471,7 @@ __must_check_result static int iov_page(MDBX_txn *txn, iov_ctx_t *ctx, } static int spill_page(MDBX_txn *txn, iov_ctx_t *ctx, MDBX_page *dp, - const unsigned npages) { + const pgno_t npages) { #if !MDBX_AVOID_MSYNC tASSERT(txn, !(txn->mt_flags & MDBX_WRITEMAP)); #endif /* MDBX_AVOID_MSYNC */ @@ -4488,13 +4488,13 @@ static int spill_page(MDBX_txn *txn, iov_ctx_t *ctx, MDBX_page *dp, /* Set unspillable LRU-label for dirty pages watched by txn. * Returns the number of pages marked as unspillable. */ -static unsigned cursor_keep(MDBX_txn *txn, MDBX_cursor *mc) { - unsigned keep = 0; +static size_t cursor_keep(MDBX_txn *txn, MDBX_cursor *mc) { + size_t keep = 0; while (mc->mc_flags & C_INITIALIZED) { - for (unsigned i = 0; i < mc->mc_snum; ++i) { + for (size_t i = 0; i < mc->mc_snum; ++i) { const MDBX_page *mp = mc->mc_pg[i]; if (IS_MODIFIABLE(txn, mp) && !IS_SUBP(mp)) { - unsigned const n = dpl_search(txn, mp->mp_pgno); + size_t const n = dpl_search(txn, mp->mp_pgno); if (txn->tw.dirtylist->items[n].pgno == mp->mp_pgno && dpl_age(txn, n)) { txn->tw.dirtylist->items[n].lru = txn->tw.dirtylru; @@ -4509,9 +4509,9 @@ static unsigned cursor_keep(MDBX_txn *txn, MDBX_cursor *mc) { return keep; } -static unsigned txn_keep(MDBX_txn *txn, MDBX_cursor *m0) { - unsigned keep = m0 ? cursor_keep(txn, m0) : 0; - for (unsigned i = FREE_DBI; i < txn->mt_numdbs; ++i) +static size_t txn_keep(MDBX_txn *txn, MDBX_cursor *m0) { + size_t keep = m0 ? cursor_keep(txn, m0) : 0; + for (size_t i = FREE_DBI; i < txn->mt_numdbs; ++i) if (F_ISSET(txn->mt_dbistate[i], DBI_DIRTY | DBI_VALID) && txn->mt_dbs[i].md_root != P_INVALID) for (MDBX_cursor *mc = txn->mt_cursors[i]; mc; mc = mc->mc_next) @@ -4524,7 +4524,7 @@ static unsigned txn_keep(MDBX_txn *txn, MDBX_cursor *m0) { * 0 = should be spilled; * ... * > 255 = must not be spilled. */ -static unsigned spill_prio(const MDBX_txn *txn, const unsigned i, +static unsigned spill_prio(const MDBX_txn *txn, const size_t i, const uint32_t reciprocal) { MDBX_dpl *const dl = txn->tw.dirtylist; const uint32_t age = dpl_age(txn, i); @@ -4604,22 +4604,22 @@ static unsigned spill_prio(const MDBX_txn *txn, const unsigned i, * the child hasn't committed yet, and we'd have no way to undo it if * the child aborted. */ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, - const unsigned need) { + const size_t need) { #if xMDBX_DEBUG_SPILLING != 1 /* production mode */ if (likely(txn->tw.dirtyroom + txn->tw.loose_count >= need)) return MDBX_SUCCESS; - unsigned wanna_spill = need - txn->tw.dirtyroom; + size_t wanna_spill = need - txn->tw.dirtyroom; #else /* debug mode: spill at least one page if xMDBX_DEBUG_SPILLING == 1 */ - unsigned wanna_spill = + size_t wanna_spill = (need > txn->tw.dirtyroom) ? need - txn->tw.dirtyroom : 1; #endif /* xMDBX_DEBUG_SPILLING */ int rc = MDBX_SUCCESS; #if !MDBX_AVOID_MSYNC if (txn->mt_flags & MDBX_WRITEMAP) { - NOTICE("%s-spilling of %u dirty-entries (have %u dirty-room, need %u)", + NOTICE("%s-spilling of %zu dirty-entries (have %zu dirty-room, need %zu)", "msync", wanna_spill, txn->tw.dirtyroom, need); tASSERT(txn, txn->tw.spill_pages == nullptr); const MDBX_env *env = txn->mt_env; @@ -4639,12 +4639,12 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, } #endif /* MDBX_AVOID_MSYNC */ - const unsigned dirty = txn->tw.dirtylist->length; - const unsigned spill_min = + const size_t dirty = txn->tw.dirtylist->length; + const size_t spill_min = txn->mt_env->me_options.spill_min_denominator ? dirty / txn->mt_env->me_options.spill_min_denominator : 0; - const unsigned spill_max = + const size_t spill_max = dirty - (txn->mt_env->me_options.spill_max_denominator ? dirty / txn->mt_env->me_options.spill_max_denominator : 0); @@ -4653,8 +4653,8 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, if (!wanna_spill) return MDBX_SUCCESS; - NOTICE("%s-spilling %u dirty-entries (have %u dirty-room, need %u)", "pwrite", - wanna_spill, txn->tw.dirtyroom, need); + NOTICE("%s-spilling %zu dirty-entries (have %zu dirty-room, need %zu)", + "pwrite", wanna_spill, txn->tw.dirtyroom, need); tASSERT(txn, txn->tw.dirtylist->length >= wanna_spill); if (!MDBX_AVOID_MSYNC || !(txn->mt_flags & MDBX_WRITEMAP)) { if (!txn->tw.spill_pages) { @@ -4680,13 +4680,13 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, MDBX_dpl *const dl = dpl_sort(txn); /* Preserve pages which may soon be dirtied again */ - const unsigned unspillable = txn_keep(txn, m0); + const size_t unspillable = txn_keep(txn, m0); if (unspillable + txn->tw.loose_count >= dl->length) { #if xMDBX_DEBUG_SPILLING == 1 /* avoid false failure in debug mode */ if (likely(txn->tw.dirtyroom + txn->tw.loose_count >= need)) return MDBX_SUCCESS; #endif /* xMDBX_DEBUG_SPILLING */ - ERROR("all %u dirty pages are unspillable since referenced " + ERROR("all %zu dirty pages are unspillable since referenced " "by a cursor(s), use fewer cursors or increase " "MDBX_opt_txn_dp_limit", unspillable); @@ -4718,7 +4718,7 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, /* get min/max of LRU-labels */ uint32_t age_max = 0; - for (unsigned i = 1; i <= dl->length; ++i) { + for (size_t i = 1; i <= dl->length; ++i) { const uint32_t age = dpl_age(txn, i); age_max = (age_max >= age) ? age_max : age; } @@ -4729,7 +4729,7 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, pgno_t radix_counters[256], spillable = 0; memset(&radix_counters, 0, sizeof(radix_counters)); const uint32_t reciprocal = (UINT32_C(255) << 24) / (age_max + 1); - for (unsigned i = 1; i <= dl->length; ++i) { + for (size_t i = 1; i <= dl->length; ++i) { unsigned prio = spill_prio(txn, i, reciprocal); if (prio < 256) { radix_counters[prio] += 1; @@ -4738,8 +4738,8 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, } if (likely(spillable > 0)) { - unsigned prio2spill = 0, prio2adjacent = 128, amount = radix_counters[0]; - for (unsigned i = 1; i < 256; i++) { + size_t prio2spill = 0, prio2adjacent = 128, amount = radix_counters[0]; + for (size_t i = 1; i < 256; i++) { if (amount < wanna_spill) { prio2spill = i; prio2adjacent = i + (257 - i) / 2; @@ -4752,8 +4752,8 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, break; } - VERBOSE("prio2spill %u, prio2adjacent %u, spillable %u," - " wanna-spill %u, amount %u", + VERBOSE("prio2spill %zu, prio2adjacent %zu, spillable %u," + " wanna-spill %zu, amount %zu", prio2spill, prio2adjacent, spillable, wanna_spill, amount); tASSERT(txn, prio2spill < prio2adjacent && prio2adjacent <= 256); @@ -4764,8 +4764,8 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, if (unlikely(rc != MDBX_SUCCESS)) goto bailout; - unsigned prev_prio = 256; - unsigned r, w, prio; + unsigned prev_prio = 256, prio; + size_t r, w; pgno_t spilled_entries = 0, spilled_npages = 0; for (w = 0, r = 1; r <= dl->length && spilled_entries < wanna_spill; prev_prio = prio, ++r) { @@ -4840,26 +4840,27 @@ static int txn_spill(MDBX_txn *const txn, MDBX_cursor *const m0, pnl_sort(txn->tw.spill_pages, (size_t)txn->mt_next_pgno << 1); txn->mt_flags |= MDBX_TXN_SPILLS; } - NOTICE("spilled %u dirty-entries, now have %u dirty-room", spilled_entries, + NOTICE("spilled %u dirty-entries, now have %zu dirty-room", spilled_entries, txn->tw.dirtyroom); } else { tASSERT(txn, rc == MDBX_SUCCESS); - for (unsigned i = 1; i <= dl->length; ++i) { + for (size_t i = 1; i <= dl->length; ++i) { MDBX_page *dp = dl->items[i].ptr; - NOTICE("dirtylist[%u]: pgno %u, npages %u, flags 0x%04X, age %u, prio %u", - i, dp->mp_pgno, dpl_npages(dl, i), dp->mp_flags, dpl_age(txn, i), - spill_prio(txn, i, reciprocal)); + NOTICE( + "dirtylist[%zu]: pgno %u, npages %u, flags 0x%04X, age %u, prio %u", + i, dp->mp_pgno, dpl_npages(dl, i), dp->mp_flags, dpl_age(txn, i), + spill_prio(txn, i, reciprocal)); } } #if xMDBX_DEBUG_SPILLING == 2 if (txn->tw.loose_count + txn->tw.dirtyroom <= need / 2 + 1) - ERROR("dirty-list length: before %u, after %u, parent %i, loose %u; " - "needed %u, spillable %u; " - "spilled %u dirty-entries, now have %u dirty-room", + ERROR("dirty-list length: before %zu, after %zu, parent %zi, loose %zu; " + "needed %zu, spillable %u; " + "spilled %u dirty-entries, now have %zu dirty-room", dl->length + spilled, dl->length, (txn->mt_parent && txn->mt_parent->tw.dirtylist) - ? (int)txn->mt_parent->tw.dirtylist->length + ? (intptr_t)txn->mt_parent->tw.dirtylist->length : -1, txn->tw.loose_count, need, spillable, spilled, txn->tw.dirtyroom); ENSURE(txn->mt_env, txn->tw.loose_count + txn->tw.dirtyroom > need / 2); @@ -4877,7 +4878,7 @@ static int cursor_spill(MDBX_cursor *mc, const MDBX_val *key, MDBX_txn *txn = mc->mc_txn; /* Estimate how much space this operation will take: */ /* 1) Max b-tree height, reasonable enough with including dups' sub-tree */ - unsigned need = CURSOR_STACK + 3; + size_t need = CURSOR_STACK + 3; /* 2) GC/FreeDB for any payload */ if (mc->mc_dbi > FREE_DBI) { need += txn->mt_dbs[FREE_DBI].md_depth + 3; @@ -5152,8 +5153,7 @@ static txnid_t recent_committed_txnid(const MDBX_env *env) { return (m0 > m1) ? ((m0 > m2) ? m0 : m2) : ((m1 > m2) ? m1 : m2); } -static __inline bool meta_eq(const meta_troika_t *troika, unsigned a, - unsigned b) { +static __inline bool meta_eq(const meta_troika_t *troika, size_t a, size_t b) { assert(a < NUM_METAS && b < NUM_METAS); return troika->txnid[a] == troika->txnid[b] && (((troika->fsm >> a) ^ (troika->fsm >> b)) & 1) == 0 && @@ -5253,11 +5253,11 @@ static txnid_t find_oldest_reader(MDBX_env *const env, const txnid_t steady) { atomic_load32(&lck->mti_readers_refresh_flag, mo_AcquireRelease)) { lck->mti_readers_refresh_flag.weak = nothing_changed; jitter4testing(false); - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); new_oldest = steady; - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { const uint32_t pid = atomic_load32(&lck->mti_readers[i].mr_pid, mo_AcquireRelease); if (!pid) @@ -5270,7 +5270,7 @@ static txnid_t find_oldest_reader(MDBX_env *const env, const txnid_t steady) { atomic_load32(&lck->mti_readers_refresh_flag, mo_AcquireRelease)) && safe64_reset_compare(&lck->mti_readers[i].mr_txnid, rtxn)) { - NOTICE("kick stuck reader[%u of %u].pid_%u %" PRIaTXN + NOTICE("kick stuck reader[%zu of %zu].pid_%u %" PRIaTXN " < prev-oldest %" PRIaTXN ", steady-txn %" PRIaTXN, i, snap_nreaders, pid, rtxn, prev_oldest, steady); } @@ -5304,9 +5304,9 @@ __cold static pgno_t find_largest_snapshot(const MDBX_env *env, MDBX_lockinfo *const lck = env->me_lck_mmap.lck; if (likely(lck != NULL /* check for exclusive without-lck mode */)) { retry:; - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { if (atomic_load32(&lck->mti_readers[i].mr_pid, mo_AcquireRelease)) { /* jitter4testing(true); */ const pgno_t snap_pages = atomic_load32( @@ -5329,7 +5329,7 @@ __cold static pgno_t find_largest_snapshot(const MDBX_env *env, /* Add a page to the txn's dirty list */ __hot static int __must_check_result page_dirty(MDBX_txn *txn, MDBX_page *mp, - unsigned npages) { + pgno_t npages) { #if xMDBX_DEBUG_SPILLING == 2 txn->mt_env->debug_dirtied_act += 1; ENSURE(txn->mt_env, @@ -5346,7 +5346,7 @@ __hot static int __must_check_result page_dirty(MDBX_txn *txn, MDBX_page *mp, rc = pnl_insert_range(&txn->tw.reclaimed_pglist, loose->mp_pgno, 1); if (unlikely(rc != MDBX_SUCCESS)) goto bailout; - unsigned di = dpl_search(txn, loose->mp_pgno); + size_t di = dpl_search(txn, loose->mp_pgno); tASSERT(txn, txn->tw.dirtylist->items[di].ptr == loose); dpl_remove(txn, di); txn->tw.loose_pages = loose->mp_next; @@ -5355,7 +5355,7 @@ __hot static int __must_check_result page_dirty(MDBX_txn *txn, MDBX_page *mp, if (!(txn->mt_flags & MDBX_WRITEMAP)) dpage_free(txn->mt_env, loose, 1); } else { - ERROR("Dirtyroom is depleted, DPL length %u", txn->tw.dirtylist->length); + ERROR("Dirtyroom is depleted, DPL length %zu", txn->tw.dirtylist->length); if (!(txn->mt_flags & MDBX_WRITEMAP)) dpage_free(txn->mt_env, mp, npages); return MDBX_TXN_FULL; @@ -5605,11 +5605,11 @@ __cold static int map_resize(MDBX_env *env, const pgno_t used_pgno, } /* looking for readers from this process */ - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); eASSERT(env, !implicit); mresize_flags |= MDBX_MRESIZE_MAY_UNMAP | MDBX_MRESIZE_MAY_MOVE; - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { if (lck->mti_readers[i].mr_pid.weak == env->me_pid && lck->mti_readers[i].mr_tid.weak != osal_thread_self()) { /* the base address of the mapping can't be changed since @@ -5831,7 +5831,7 @@ __cold static int wipe_steady(MDBX_txn *txn, txnid_t last_steady) { //------------------------------------------------------------------------------ MDBX_MAYBE_UNUSED __hot static pgno_t * -scan4seq_fallback(pgno_t *range, const size_t len, const unsigned seq) { +scan4seq_fallback(pgno_t *range, const size_t len, const size_t seq) { assert(seq > 0 && len > seq); #if MDBX_PNL_ASCENDING assert(range[-1] == len); @@ -5896,10 +5896,10 @@ scan4seq_fallback(pgno_t *range, const size_t len, const unsigned seq) { } MDBX_MAYBE_UNUSED static const pgno_t *scan4range_checker(const MDBX_PNL pnl, - const unsigned seq) { - size_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_SIZE(pnl); + const size_t seq) { + size_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_GETSIZE(pnl); #if MDBX_PNL_ASCENDING - while (seq <= MDBX_PNL_SIZE(pnl) - begin) { + while (seq <= MDBX_PNL_GETSIZE(pnl) - begin) { if (pnl[begin + seq] - pnl[begin] == seq) return pnl + begin; ++begin; @@ -5977,7 +5977,7 @@ diffcmp2mask_sse2(const pgno_t *const ptr, const ptrdiff_t offset, } MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_SSE2 static pgno_t * -scan4seq_sse2(pgno_t *range, const size_t len, const unsigned seq) { +scan4seq_sse2(pgno_t *range, const size_t len, const size_t seq) { assert(seq > 0 && len > seq); #if MDBX_PNL_ASCENDING #error "FIXME: Not implemented" @@ -6039,7 +6039,7 @@ diffcmp2mask_avx2(const pgno_t *const ptr, const ptrdiff_t offset, } MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_AVX2 static pgno_t * -scan4seq_avx2(pgno_t *range, const size_t len, const unsigned seq) { +scan4seq_avx2(pgno_t *range, const size_t len, const size_t seq) { assert(seq > 0 && len > seq); #if MDBX_PNL_ASCENDING #error "FIXME: Not implemented" @@ -6107,7 +6107,7 @@ diffcmp2mask_avx512bw(const pgno_t *const ptr, const ptrdiff_t offset, } MDBX_MAYBE_UNUSED __hot MDBX_ATTRIBUTE_TARGET_AVX512BW static pgno_t * -scan4seq_avx512bw(pgno_t *range, const size_t len, const unsigned seq) { +scan4seq_avx512bw(pgno_t *range, const size_t len, const size_t seq) { assert(seq > 0 && len > seq); #if MDBX_PNL_ASCENDING #error "FIXME: Not implemented" @@ -6187,7 +6187,7 @@ static __always_inline size_t diffcmp2mask_neon(const pgno_t *const ptr, } __hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len, - const unsigned seq) { + const size_t seq) { assert(seq > 0 && len > seq); #if MDBX_PNL_ASCENDING #error "FIXME: Not implemented" @@ -6267,13 +6267,13 @@ __hot static pgno_t *scan4seq_neon(pgno_t *range, const size_t len, /* Selecting the most appropriate implementation at runtime, * depending on the available CPU features. */ static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len, - const unsigned seq); + const size_t seq); static pgno_t *(*scan4seq)(pgno_t *range, const size_t len, - const unsigned seq) = scan4seq_resolver; + const size_t seq) = scan4seq_resolver; static pgno_t *scan4seq_resolver(pgno_t *range, const size_t len, - const unsigned seq) { - pgno_t *(*choice)(pgno_t * range, const size_t len, const unsigned seq) = + const size_t seq) { + pgno_t *(*choice)(pgno_t * range, const size_t len, const size_t seq) = nullptr; #if __has_builtin(__builtin_cpu_init) || defined(__BUILTIN_CPU_INIT__) || \ __GNUC_PREREQ(4, 8) @@ -6334,7 +6334,7 @@ static pgr_t page_alloc_slowpath(MDBX_cursor *mc, const pgno_t num, int flags) { if (likely(flags & MDBX_ALLOC_GC)) { flags |= env->me_flags & MDBX_LIFORECLAIM; if (txn->mt_dbs[FREE_DBI].md_branch_pages && - MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) < coalesce_threshold) + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) < coalesce_threshold) flags |= MDBX_ALLOC_COALESCE; if (unlikely( /* If mc is updating the GC, then the retired-list cannot play @@ -6352,7 +6352,7 @@ static pgr_t page_alloc_slowpath(MDBX_cursor *mc, const pgno_t num, int flags) { eASSERT(env, pnl_check_allocated(txn->tw.reclaimed_pglist, txn->mt_next_pgno - MDBX_ENABLE_REFUND)); pgno_t pgno, *re_list = txn->tw.reclaimed_pglist; - unsigned re_len = MDBX_PNL_SIZE(re_list); + size_t re_len = MDBX_PNL_GETSIZE(re_list); pgno_t *range = nullptr; txnid_t detent = 0, last = 0; #if MDBX_ENABLE_PGOP_STAT @@ -6461,7 +6461,7 @@ static pgr_t page_alloc_slowpath(MDBX_cursor *mc, const pgno_t num, int flags) { /* skip IDs of records that already reclaimed */ if (txn->tw.lifo_reclaimed) { size_t i; - for (i = (size_t)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed); i > 0; --i) + for (i = MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed); i > 0; --i) if (txn->tw.lifo_reclaimed[i] == last) break; if (i) @@ -6495,24 +6495,24 @@ static pgr_t page_alloc_slowpath(MDBX_cursor *mc, const pgno_t num, int flags) { ret.err = MDBX_CORRUPTED; goto fail; } - const unsigned gc_len = MDBX_PNL_SIZE(gc_pnl); - if (unlikely(/* list is too long already */ MDBX_PNL_SIZE( + const size_t gc_len = MDBX_PNL_GETSIZE(gc_pnl); + if (unlikely(/* list is too long already */ MDBX_PNL_GETSIZE( txn->tw.reclaimed_pglist) >= env->me_options.rp_augment_limit) && ((/* not a slot-request from gc-update */ (flags & MDBX_ALLOC_SLOT) == 0 && /* have enough unallocated space */ txn->mt_geo.upper >= txn->mt_next_pgno + (size_t)num) || - gc_len + MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) >= + gc_len + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) >= MDBX_PGL_LIMIT)) { /* Stop reclaiming to avoid large/overflow the page list. * This is a rare case while search for a continuously multi-page region * in a large database. * todo4recovery://erased_by_github/libmdbx/issues/123 */ - NOTICE("stop reclaiming to avoid PNL overflow: %u (current) + %u " - "(chunk) -> %u", - MDBX_PNL_SIZE(txn->tw.reclaimed_pglist), gc_len, - gc_len + MDBX_PNL_SIZE(txn->tw.reclaimed_pglist)); + NOTICE("stop reclaiming to avoid PNL overflow: %zu (current) + %zu " + "(chunk) -> %zu", + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist), gc_len, + gc_len + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist)); flags &= ~(MDBX_ALLOC_GC | MDBX_ALLOC_COALESCE); break; } @@ -6530,9 +6530,9 @@ static pgr_t page_alloc_slowpath(MDBX_cursor *mc, const pgno_t num, int flags) { txn->tw.last_reclaimed = last; if (LOG_ENABLED(MDBX_LOG_EXTRA)) { - DEBUG_EXTRA("PNL read txn %" PRIaTXN " root %" PRIaPGNO " num %u, PNL", + DEBUG_EXTRA("PNL read txn %" PRIaTXN " root %" PRIaPGNO " len %zu, PNL", last, txn->mt_dbs[FREE_DBI].md_root, gc_len); - for (unsigned i = gc_len; i; i--) + for (size_t i = gc_len; i; i--) DEBUG_EXTRA_PRINT(" %" PRIaPGNO, gc_pnl[i]); DEBUG_EXTRA_PRINT("%s\n", "."); } @@ -6545,14 +6545,14 @@ static pgr_t page_alloc_slowpath(MDBX_cursor *mc, const pgno_t num, int flags) { } tASSERT(txn, dirtylist_check(txn)); - re_len = MDBX_PNL_SIZE(re_list); + re_len = MDBX_PNL_GETSIZE(re_list); tASSERT(txn, re_len == 0 || re_list[re_len] < txn->mt_next_pgno); if (MDBX_ENABLE_REFUND && re_len && unlikely(MDBX_PNL_MOST(re_list) == txn->mt_next_pgno - 1)) { /* Refund suitable pages into "unallocated" space */ txn_refund(txn); re_list = txn->tw.reclaimed_pglist; - re_len = MDBX_PNL_SIZE(re_list); + re_len = MDBX_PNL_GETSIZE(re_list); } /* Done for a kick-reclaim mode, actually no page needed */ @@ -6762,11 +6762,11 @@ done: for (const pgno_t *const end = re_list + re_len; ++range <= end;) range[-(ptrdiff_t)num] = *range; #endif - MDBX_PNL_SIZE(re_list) = re_len -= num; + MDBX_PNL_SETSIZE(re_list, re_len -= num); tASSERT(txn, pnl_check_allocated(txn->tw.reclaimed_pglist, txn->mt_next_pgno - MDBX_ENABLE_REFUND)); } else { - txn->mt_next_pgno = pgno + num; + txn->mt_next_pgno = (pgno_t)(pgno + num); eASSERT(env, txn->mt_next_pgno <= txn->mt_end_pgno); } @@ -6778,7 +6778,7 @@ done: ret.page->mp_leaf2_ksize = 0; ret.page->mp_flags = 0; if ((ASSERT_ENABLED() || AUDIT_ENABLED()) && num > 1) { - ret.page->mp_pages = num; + ret.page->mp_pages = (pgno_t)num; ret.page->mp_flags = P_OVERFLOW; } ret.err = page_dirty(txn, ret.page, num); @@ -6818,12 +6818,12 @@ __hot static pgr_t page_alloc(MDBX_cursor *mc) { if (likely(!(mc->mc_flags & C_GCFREEZE))) { MDBX_PNL pnl = txn->tw.reclaimed_pglist; - const unsigned len = MDBX_PNL_SIZE(pnl); + const size_t len = MDBX_PNL_GETSIZE(pnl); if (likely(len > 0)) { - MDBX_PNL_SIZE(pnl) = len - 1; + MDBX_PNL_SETSIZE(pnl, len - 1); #if MDBX_PNL_ASCENDING const pgno_t pgno = pnl[1]; - for (unsigned i = 1; i < len; ++i) + for (size_t i = 1; i < len; ++i) pnl[i] = pnl[i + 1]; #else const pgno_t pgno = pnl[len]; @@ -6893,7 +6893,7 @@ static pgr_t __must_check_result page_unspill(MDBX_txn *const txn, pgr_t ret; do { tASSERT(txn, (scan->mt_flags & MDBX_TXN_SPILLS) != 0); - const unsigned si = search_spilled(scan, mp->mp_pgno); + const size_t si = search_spilled(scan, mp->mp_pgno); if (!si) continue; const unsigned npages = IS_OVERFLOW(mp) ? mp->mp_pages : 1; @@ -7328,7 +7328,7 @@ static int cursor_shadow(MDBX_txn *parent, MDBX_txn *nested) { * * Returns 0 on success, non-zero on failure. */ static void cursors_eot(MDBX_txn *txn, const bool merge) { - for (int i = txn->mt_numdbs; --i >= 0;) { + for (intptr_t i = txn->mt_numdbs; --i >= 0;) { MDBX_cursor *next, *mc = txn->mt_cursors[i]; if (!mc) continue; @@ -7339,7 +7339,7 @@ static void cursors_eot(MDBX_txn *txn, const bool merge) { next = mc->mc_next; ENSURE(txn->mt_env, stage == MDBX_MC_LIVE || (stage == MDBX_MC_WAIT4EOT && bk)); - cASSERT(mc, mc->mc_dbi == (unsigned)i); + cASSERT(mc, mc->mc_dbi == (MDBX_dbi)i); if (bk) { MDBX_xcursor *mx = mc->mc_xcursor; cASSERT(mc, mx == bk->mc_xcursor); @@ -7383,9 +7383,9 @@ static void cursors_eot(MDBX_txn *txn, const bool merge) { static pgno_t find_largest_this(MDBX_env *env, pgno_t largest) { MDBX_lockinfo *const lck = env->me_lck_mmap.lck; if (likely(lck != NULL /* exclusive mode */)) { - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { retry: if (atomic_load32(&lck->mti_readers[i].mr_pid, mo_AcquireRelease) == env->me_pid) { @@ -7491,7 +7491,7 @@ static bind_rslot_result bind_rslot(MDBX_env *env, const uintptr_t tid) { } result.err = MDBX_SUCCESS; - unsigned slot, nreaders; + size_t slot, nreaders; while (1) { nreaders = env->me_lck->mti_numreaders.weak; for (slot = 0; slot < nreaders; slot++) @@ -7520,7 +7520,7 @@ static bind_rslot_result bind_rslot(MDBX_env *env, const uintptr_t tid) { atomic_store32(&result.rslot->mr_pid, 0, mo_AcquireRelease); safe64_reset(&result.rslot->mr_txnid, true); if (slot == nreaders) - env->me_lck->mti_numreaders.weak = ++nreaders; + env->me_lck->mti_numreaders.weak = (uint32_t)++nreaders; result.rslot->mr_tid.weak = (env->me_flags & MDBX_NOTLS) ? 0 : tid; atomic_store32(&result.rslot->mr_pid, env->me_pid, mo_AcquireRelease); osal_rdt_unlock(env); @@ -7809,7 +7809,7 @@ static int txn_renew(MDBX_txn *txn, const unsigned flags) { /* Seek & fetch the last meta */ uint64_t timestamp = 0; - unsigned loop = 0; + size_t loop = 0; meta_troika_t troika = meta_tap(env); while (1) { const meta_ptr_t head = @@ -7902,9 +7902,9 @@ static int txn_renew(MDBX_txn *txn, const unsigned flags) { MDBX_lockinfo *const lck = env->me_lck_mmap.lck; if (lck && (env->me_flags & MDBX_NOTLS) == 0 && (runtime_flags & MDBX_DBG_LEGACY_OVERLAP) == 0) { - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { if (atomic_load32(&lck->mti_readers[i].mr_pid, mo_Relaxed) == env->me_pid && unlikely(atomic_load64(&lck->mti_readers[i].mr_tid, mo_Relaxed) == @@ -7960,12 +7960,12 @@ static int txn_renew(MDBX_txn *txn, const unsigned flags) { #if MDBX_ENABLE_REFUND txn->tw.loose_refund_wl = 0; #endif /* MDBX_ENABLE_REFUND */ - MDBX_PNL_SIZE(txn->tw.retired_pages) = 0; + MDBX_PNL_SETSIZE(txn->tw.retired_pages, 0); txn->tw.spill_pages = NULL; txn->tw.spill_least_removed = 0; txn->tw.last_reclaimed = 0; if (txn->tw.lifo_reclaimed) - MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) = 0; + MDBX_PNL_SETSIZE(txn->tw.lifo_reclaimed, 0); env->me_txn = txn; txn->mt_numdbs = env->me_numdbs; memcpy(txn->mt_dbiseqs, env->me_dbiseqs, txn->mt_numdbs * sizeof(unsigned)); @@ -7984,7 +7984,7 @@ static int txn_renew(MDBX_txn *txn, const unsigned flags) { /* Setup db info */ osal_compiler_barrier(); memset(txn->mt_cursors, 0, sizeof(MDBX_cursor *) * txn->mt_numdbs); - for (unsigned i = CORE_DBS; i < txn->mt_numdbs; i++) { + for (size_t i = CORE_DBS; i < txn->mt_numdbs; i++) { const unsigned db_flags = env->me_dbflags[i]; txn->mt_dbs[i].md_flags = db_flags & DB_PERSISTENT_FLAGS; txn->mt_dbistate[i] = @@ -8136,7 +8136,7 @@ void *mdbx_txn_get_userctx(const MDBX_txn *txn) { int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, MDBX_txn **ret, void *context) { MDBX_txn *txn; - unsigned size, tsize; + size_t size, tsize; if (unlikely(!ret)) return MDBX_EINVAL; @@ -8210,8 +8210,8 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, txn->mt_geo = parent->mt_geo; rc = dpl_alloc(txn); if (likely(rc == MDBX_SUCCESS)) { - const unsigned len = - MDBX_PNL_SIZE(parent->tw.reclaimed_pglist) + parent->tw.loose_count; + const size_t len = MDBX_PNL_GETSIZE(parent->tw.reclaimed_pglist) + + parent->tw.loose_count; txn->tw.reclaimed_pglist = pnl_alloc((len > MDBX_PNL_INITIAL) ? len : MDBX_PNL_INITIAL); if (unlikely(!txn->tw.reclaimed_pglist)) @@ -8229,7 +8229,7 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, if (parent->tw.loose_count) { do { MDBX_page *lp = parent->tw.loose_pages; - const unsigned di = dpl_exist(parent, lp->mp_pgno); + const size_t di = dpl_exist(parent, lp->mp_pgno); tASSERT(parent, di && parent->tw.dirtylist->items[di].ptr == lp); tASSERT(parent, lp->mp_flags == P_LOOSE); rc = pnl_insert_range(&parent->tw.reclaimed_pglist, lp->mp_pgno, 1); @@ -8253,7 +8253,7 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, spill_purge(parent); tASSERT(txn, MDBX_PNL_ALLOCLEN(txn->tw.reclaimed_pglist) >= - MDBX_PNL_SIZE(parent->tw.reclaimed_pglist)); + MDBX_PNL_GETSIZE(parent->tw.reclaimed_pglist)); memcpy(txn->tw.reclaimed_pglist, parent->tw.reclaimed_pglist, MDBX_PNL_SIZEOF(parent->tw.reclaimed_pglist)); eASSERT(env, pnl_check_allocated( @@ -8267,12 +8267,12 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, if (parent->tw.lifo_reclaimed) { txn->tw.lifo_reclaimed = parent->tw.lifo_reclaimed; parent->tw.lifo_reclaimed = - (void *)(intptr_t)MDBX_PNL_SIZE(parent->tw.lifo_reclaimed); + (void *)(intptr_t)MDBX_PNL_GETSIZE(parent->tw.lifo_reclaimed); } txn->tw.retired_pages = parent->tw.retired_pages; parent->tw.retired_pages = - (void *)(intptr_t)MDBX_PNL_SIZE(parent->tw.retired_pages); + (void *)(intptr_t)MDBX_PNL_GETSIZE(parent->tw.retired_pages); txn->mt_txnid = parent->mt_txnid; txn->mt_front = parent->mt_front + 1; @@ -8288,7 +8288,7 @@ int mdbx_txn_begin_ex(MDBX_env *env, MDBX_txn *parent, MDBX_txn_flags_t flags, memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDBX_db)); txn->tw.troika = parent->tw.troika; /* Copy parent's mt_dbistate, but clear DB_NEW */ - for (unsigned i = 0; i < txn->mt_numdbs; i++) + for (size_t i = 0; i < txn->mt_numdbs; i++) txn->mt_dbistate[i] = parent->mt_dbistate[i] & ~(DBI_FRESH | DBI_CREAT | DBI_DIRTY); tASSERT(parent, @@ -8390,9 +8390,9 @@ int mdbx_txn_info(const MDBX_txn *txn, MDBX_txn_info *info, bool scan_rlt) { if (scan_rlt && info->txn_reader_lag > 1 && lck) { /* find next more recent reader */ txnid_t next_reader = head.txnid; - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { retry: if (atomic_load32(&lck->mti_readers[i].mr_pid, mo_AcquireRelease)) { jitter4testing(true); @@ -8428,8 +8428,8 @@ int mdbx_txn_info(const MDBX_txn *txn, MDBX_txn_info *info, bool scan_rlt) { info->txn_space_limit_soft = pgno2bytes(env, txn->mt_geo.now); info->txn_space_limit_hard = pgno2bytes(env, txn->mt_geo.upper); info->txn_space_retired = pgno2bytes( - env, txn->mt_child ? (unsigned)(uintptr_t)txn->tw.retired_pages - : MDBX_PNL_SIZE(txn->tw.retired_pages)); + env, txn->mt_child ? (size_t)txn->tw.retired_pages + : MDBX_PNL_GETSIZE(txn->tw.retired_pages)); info->txn_space_leftover = pgno2bytes(env, txn->tw.dirtyroom); info->txn_space_dirty = pgno2bytes(env, txn->mt_env->me_options.dp_limit - txn->tw.dirtyroom); @@ -8437,14 +8437,14 @@ int mdbx_txn_info(const MDBX_txn *txn, MDBX_txn_info *info, bool scan_rlt) { MDBX_lockinfo *const lck = env->me_lck_mmap.lck; if (scan_rlt && lck) { txnid_t oldest_snapshot = txn->mt_txnid; - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); if (snap_nreaders) { oldest_snapshot = txn_oldest_reader(txn); if (oldest_snapshot == txn->mt_txnid - 1) { /* check if there is at least one reader */ bool exists = false; - for (unsigned i = 0; i < snap_nreaders; ++i) { + for (size_t i = 0; i < snap_nreaders; ++i) { if (atomic_load32(&lck->mti_readers[i].mr_pid, mo_Relaxed) && txn->mt_txnid > safe64_read(&lck->mti_readers[i].mr_txnid)) { exists = true; @@ -8484,7 +8484,7 @@ int mdbx_txn_flags(const MDBX_txn *txn) { } /* Check for misused dbi handles */ -static __inline bool dbi_changed(MDBX_txn *txn, MDBX_dbi dbi) { +static __inline bool dbi_changed(MDBX_txn *txn, size_t dbi) { if (txn->mt_dbiseqs == txn->mt_env->me_dbiseqs) return false; if (likely( @@ -8495,15 +8495,15 @@ static __inline bool dbi_changed(MDBX_txn *txn, MDBX_dbi dbi) { return true; } -static __inline unsigned dbi_seq(const MDBX_env *const env, unsigned slot) { +static __inline unsigned dbi_seq(const MDBX_env *const env, size_t slot) { unsigned v = env->me_dbiseqs[slot].weak + 1; return v + (v == 0); } static void dbi_import_locked(MDBX_txn *txn) { const MDBX_env *const env = txn->mt_env; - unsigned n = env->me_numdbs; - for (unsigned i = CORE_DBS; i < n; ++i) { + size_t n = env->me_numdbs; + for (size_t i = CORE_DBS; i < n; ++i) { if (i >= txn->mt_numdbs) { txn->mt_cursors[i] = NULL; if (txn->mt_dbiseqs != env->me_dbiseqs) @@ -8538,7 +8538,7 @@ static void dbi_import_locked(MDBX_txn *txn) { } ++n; } - txn->mt_numdbs = n; + txn->mt_numdbs = (MDBX_dbi)n; } /* Import DBI which opened after txn started into context */ @@ -8563,7 +8563,7 @@ static void dbi_update(MDBX_txn *txn, int keep) { bool locked = false; MDBX_env *const env = txn->mt_env; - for (unsigned i = n; --i >= CORE_DBS;) { + for (size_t i = n; --i >= CORE_DBS;) { if (likely((txn->mt_dbistate[i] & DBI_CREAT) == 0)) continue; if (!locked) { @@ -8608,19 +8608,19 @@ static void dbi_update(MDBX_txn *txn, int keep) { /* Filter-out pgno list from transaction's dirty-page list */ static void dpl_sift(MDBX_txn *const txn, MDBX_PNL pl, const bool spilled) { - if (MDBX_PNL_SIZE(pl) && txn->tw.dirtylist->length) { + if (MDBX_PNL_GETSIZE(pl) && txn->tw.dirtylist->length) { tASSERT(txn, pnl_check_allocated(pl, (size_t)txn->mt_next_pgno << spilled)); MDBX_dpl *dl = dpl_sort(txn); /* Scanning in ascend order */ - const int step = MDBX_PNL_ASCENDING ? 1 : -1; - const int begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_SIZE(pl); - const int end = MDBX_PNL_ASCENDING ? MDBX_PNL_SIZE(pl) + 1 : 0; + const intptr_t step = MDBX_PNL_ASCENDING ? 1 : -1; + const intptr_t begin = MDBX_PNL_ASCENDING ? 1 : MDBX_PNL_GETSIZE(pl); + const intptr_t end = MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(pl) + 1 : 0; tASSERT(txn, pl[begin] <= pl[end - step]); - unsigned r = dpl_search(txn, pl[begin] >> spilled); + size_t w, r = dpl_search(txn, pl[begin] >> spilled); tASSERT(txn, dl->sorted == dl->length); - for (int i = begin; r <= dl->length;) { /* scan loop */ + for (intptr_t i = begin; r <= dl->length;) { /* scan loop */ assert(i != end); tASSERT(txn, !spilled || (pl[i] & 1) == 0); pgno_t pl_pgno = pl[i] >> spilled; @@ -8635,7 +8635,8 @@ static void dpl_sift(MDBX_txn *const txn, MDBX_PNL pl, const bool spilled) { } /* update loop */ - unsigned npages, w = r; + unsigned npages; + w = r; remove_dl: npages = dpl_npages(dl, r); dl->pages_including_loose -= npages; @@ -8767,18 +8768,18 @@ static int txn_end(MDBX_txn *txn, const unsigned mode) { sizeof(meta_troika_t)) == 0); if (txn->tw.lifo_reclaimed) { - eASSERT(env, MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) >= - (unsigned)(uintptr_t)parent->tw.lifo_reclaimed); - MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) = - (unsigned)(uintptr_t)parent->tw.lifo_reclaimed; + eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) >= + (uintptr_t)parent->tw.lifo_reclaimed); + MDBX_PNL_SETSIZE(txn->tw.lifo_reclaimed, + (uintptr_t)parent->tw.lifo_reclaimed); parent->tw.lifo_reclaimed = txn->tw.lifo_reclaimed; } if (txn->tw.retired_pages) { - eASSERT(env, MDBX_PNL_SIZE(txn->tw.retired_pages) >= - (unsigned)(uintptr_t)parent->tw.retired_pages); - MDBX_PNL_SIZE(txn->tw.retired_pages) = - (unsigned)(uintptr_t)parent->tw.retired_pages; + eASSERT(env, MDBX_PNL_GETSIZE(txn->tw.retired_pages) >= + (uintptr_t)parent->tw.retired_pages); + MDBX_PNL_SETSIZE(txn->tw.retired_pages, + (uintptr_t)parent->tw.retired_pages); parent->tw.retired_pages = txn->tw.retired_pages; } @@ -8882,20 +8883,19 @@ int mdbx_txn_abort(MDBX_txn *txn) { /* Count all the pages in each DB and in the GC and make sure * it matches the actual number of pages being used. */ -__cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, +__cold static int audit_ex(MDBX_txn *txn, size_t retired_stored, bool dont_filter_gc) { - pgno_t pending = 0; - if ((txn->mt_flags & MDBX_TXN_RDONLY) == 0) { - pending = txn->tw.loose_count + MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) + - (MDBX_PNL_SIZE(txn->tw.retired_pages) - retired_stored); - } + size_t pending = 0; + if ((txn->mt_flags & MDBX_TXN_RDONLY) == 0) + pending = txn->tw.loose_count + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) + + (MDBX_PNL_GETSIZE(txn->tw.retired_pages) - retired_stored); MDBX_cursor_couple cx; int rc = cursor_init(&cx.outer, txn, FREE_DBI); if (unlikely(rc != MDBX_SUCCESS)) return rc; - pgno_t gc = 0; + size_t gc = 0; MDBX_val key, data; while ((rc = mdbx_cursor_get(&cx.outer, &key, &data, MDBX_NEXT)) == 0) { if (!dont_filter_gc) { @@ -8903,7 +8903,7 @@ __cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, return MDBX_CORRUPTED; txnid_t id = unaligned_peek_u64(4, key.iov_base); if (txn->tw.lifo_reclaimed) { - for (unsigned i = 1; i <= MDBX_PNL_SIZE(txn->tw.lifo_reclaimed); ++i) + for (size_t i = 1; i <= MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed); ++i) if (id == txn->tw.lifo_reclaimed[i]) goto skip; } else if (id <= txn->tw.last_reclaimed) @@ -8915,11 +8915,11 @@ __cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, } tASSERT(txn, rc == MDBX_NOTFOUND); - for (MDBX_dbi i = FREE_DBI; i < txn->mt_numdbs; i++) + for (size_t i = FREE_DBI; i < txn->mt_numdbs; i++) txn->mt_dbistate[i] &= ~DBI_AUDITED; - pgno_t used = NUM_METAS; - for (MDBX_dbi i = FREE_DBI; i <= MAIN_DBI; i++) { + size_t used = NUM_METAS; + for (size_t i = FREE_DBI; i <= MAIN_DBI; i++) { if (!(txn->mt_dbistate[i] & DBI_VALID)) continue; rc = cursor_init(&cx.outer, txn, i); @@ -8936,7 +8936,7 @@ __cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, rc = page_search(&cx.outer, NULL, MDBX_PS_FIRST); while (rc == MDBX_SUCCESS) { MDBX_page *mp = cx.outer.mc_pg[cx.outer.mc_top]; - for (unsigned j = 0; j < page_numkeys(mp); j++) { + for (size_t j = 0; j < page_numkeys(mp); j++) { MDBX_node *node = page_node(mp, j); if (node_flags(node) == F_SUBDATA) { if (unlikely(node_ds(node) != sizeof(MDBX_db))) @@ -8966,7 +8966,7 @@ __cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, tASSERT(txn, rc == MDBX_NOTFOUND); } - for (MDBX_dbi i = FREE_DBI; i < txn->mt_numdbs; i++) { + for (size_t i = FREE_DBI; i < txn->mt_numdbs; i++) { if ((txn->mt_dbistate[i] & (DBI_VALID | DBI_AUDITED | DBI_STALE)) != DBI_VALID) continue; @@ -8979,7 +8979,7 @@ __cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, } if (!(txn->mt_dbistate[i] & DBI_AUDITED)) { WARNING("audit %s@%" PRIaTXN - ": unable account dbi %d / \"%*s\", state 0x%02x", + ": unable account dbi %zd / \"%*s\", state 0x%02x", txn->mt_parent ? "nested-" : "", txn->mt_txnid, i, (int)txn->mt_dbxs[i].md_name.iov_len, (const char *)txn->mt_dbxs[i].md_name.iov_base, @@ -8991,23 +8991,23 @@ __cold static int audit_ex(MDBX_txn *txn, unsigned retired_stored, return MDBX_SUCCESS; if ((txn->mt_flags & MDBX_TXN_RDONLY) == 0) - ERROR("audit @%" PRIaTXN ": %u(pending) = %u(loose) + " - "%u(reclaimed) + %u(retired-pending) - %u(retired-stored)", + ERROR("audit @%" PRIaTXN ": %zu(pending) = %zu(loose) + " + "%zu(reclaimed) + %zu(retired-pending) - %zu(retired-stored)", txn->mt_txnid, pending, txn->tw.loose_count, - MDBX_PNL_SIZE(txn->tw.reclaimed_pglist), - txn->tw.retired_pages ? MDBX_PNL_SIZE(txn->tw.retired_pages) : 0, + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist), + txn->tw.retired_pages ? MDBX_PNL_GETSIZE(txn->tw.retired_pages) : 0, retired_stored); - ERROR("audit @%" PRIaTXN ": %" PRIaPGNO "(pending) + %" PRIaPGNO - "(gc) + %" PRIaPGNO "(count) = %" PRIaPGNO "(total) <> %" PRIaPGNO + ERROR("audit @%" PRIaTXN ": %zu(pending) + %zu" + "(gc) + %zu(count) = %zu(total) <> %zu" "(allocated)", txn->mt_txnid, pending, gc, used, pending + gc + used, - txn->mt_next_pgno); + (size_t)txn->mt_next_pgno); return MDBX_PROBLEM; } typedef struct gc_update_context { - unsigned retired_stored, loop; - unsigned settled, cleaned_slot, reused_slot, filled_slot; + size_t retired_stored, loop; + size_t settled, cleaned_slot, reused_slot, filled_slot; txnid_t cleaned_id, rid; bool lifo, dense; #if MDBX_ENABLE_BIGFOOT @@ -9025,8 +9025,8 @@ static __inline int gcu_context_init(MDBX_txn *txn, gcu_context_t *ctx) { return cursor_init(&ctx->cursor.outer, txn, FREE_DBI); } -static __always_inline unsigned gcu_backlog_size(MDBX_txn *txn) { - return MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) + txn->tw.loose_count; +static __always_inline size_t gcu_backlog_size(MDBX_txn *txn) { + return MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) + txn->tw.loose_count; } static int gcu_clean_stored_retired(MDBX_txn *txn, gcu_context_t *ctx) { @@ -9045,7 +9045,7 @@ static int gcu_clean_stored_retired(MDBX_txn *txn, gcu_context_t *ctx) { if (csr.err == MDBX_SUCCESS && csr.exact) { ctx->retired_stored = 0; err = mdbx_cursor_del(&ctx->cursor.outer, 0); - TRACE("== clear-4linear, backlog %u, err %d", gcu_backlog_size(txn), + TRACE("== clear-4linear, backlog %zu, err %d", gcu_backlog_size(txn), err); } } @@ -9062,12 +9062,12 @@ static int gcu_clean_stored_retired(MDBX_txn *txn, gcu_context_t *ctx) { * during a deleting, when GC tree is unbalanced. */ static int gcu_prepare_backlog(MDBX_txn *txn, gcu_context_t *ctx, const bool reserve4retired) { - const unsigned pages4retiredlist = + const size_t pages4retiredlist = reserve4retired ? number_of_ovpages( txn->mt_env, MDBX_PNL_SIZEOF(txn->tw.retired_pages)) : 0; - const unsigned backlog4cow = txn->mt_dbs[FREE_DBI].md_depth; - const unsigned backlog4rebalance = backlog4cow + 1; + const size_t backlog4cow = txn->mt_dbs[FREE_DBI].md_depth; + const size_t backlog4rebalance = backlog4cow + 1; if (likely(pages4retiredlist < 2 && gcu_backlog_size(txn) > (reserve4retired @@ -9075,9 +9075,10 @@ static int gcu_prepare_backlog(MDBX_txn *txn, gcu_context_t *ctx, : (backlog4cow + backlog4rebalance)))) return MDBX_SUCCESS; - TRACE(">> reserve4retired %c, backlog %u, 4list %u, 4cow %u, 4rebalance %u", - reserve4retired ? 'Y' : 'N', gcu_backlog_size(txn), pages4retiredlist, - backlog4cow, backlog4rebalance); + TRACE( + ">> reserve4retired %c, backlog %zu, 4list %zu, 4cow %zu, 4rebalance %zu", + reserve4retired ? 'Y' : 'N', gcu_backlog_size(txn), pages4retiredlist, + backlog4cow, backlog4rebalance); int err; if (unlikely(pages4retiredlist > 2)) { @@ -9092,19 +9093,19 @@ static int gcu_prepare_backlog(MDBX_txn *txn, gcu_context_t *ctx, ctx->cursor.outer.mc_flags &= ~C_RECLAIMING; err = cursor_touch(&ctx->cursor.outer); - TRACE("== after-touch, backlog %u, err %d", gcu_backlog_size(txn), err); + TRACE("== after-touch, backlog %zu, err %d", gcu_backlog_size(txn), err); if (unlikely(pages4retiredlist > 1) && - MDBX_PNL_SIZE(txn->tw.retired_pages) != ctx->retired_stored && + MDBX_PNL_GETSIZE(txn->tw.retired_pages) != ctx->retired_stored && err == MDBX_SUCCESS) { tASSERT(txn, reserve4retired); err = gcu_clean_stored_retired(txn, ctx); if (unlikely(err != MDBX_SUCCESS)) return err; - err = page_alloc_slowpath(&ctx->cursor.outer, pages4retiredlist, + err = page_alloc_slowpath(&ctx->cursor.outer, (pgno_t)pages4retiredlist, MDBX_ALLOC_GC | MDBX_ALLOC_FAKE) .err; - TRACE("== after-4linear, backlog %u, err %d", gcu_backlog_size(txn), err); + TRACE("== after-4linear, backlog %zu, err %d", gcu_backlog_size(txn), err); cASSERT(&ctx->cursor.outer, gcu_backlog_size(txn) >= pages4retiredlist || err != MDBX_SUCCESS); } @@ -9117,7 +9118,7 @@ static int gcu_prepare_backlog(MDBX_txn *txn, gcu_context_t *ctx, .err; ctx->cursor.outer.mc_flags |= C_RECLAIMING; - TRACE("<< backlog %u, err %d", gcu_backlog_size(txn), err); + TRACE("<< backlog %zu, err %d", gcu_backlog_size(txn), err); return (err != MDBX_NOTFOUND) ? err : MDBX_SUCCESS; } @@ -9161,7 +9162,7 @@ retry: txn->mt_next_pgno - MDBX_ENABLE_REFUND)); tASSERT(txn, dirtylist_check(txn)); if (unlikely(/* paranoia */ ctx->loop > ((MDBX_DEBUG > 0) ? 12 : 42))) { - ERROR("too more loops %u, bailout", ctx->loop); + ERROR("too more loops %zu, bailout", ctx->loop); rc = MDBX_PROBLEM; goto bailout; } @@ -9183,8 +9184,8 @@ retry: MDBX_val key, data; TRACE("%s", " >> continue"); - if (ctx->retired_stored != MDBX_PNL_SIZE(txn->tw.retired_pages) && - (MDBX_PNL_SIZE(txn->tw.retired_pages) > env->me_maxgc_ov1page || + if (ctx->retired_stored != MDBX_PNL_GETSIZE(txn->tw.retired_pages) && + (MDBX_PNL_GETSIZE(txn->tw.retired_pages) > env->me_maxgc_ov1page || ctx->retired_stored > env->me_maxgc_ov1page)) { rc = gcu_prepare_backlog(txn, ctx, true); if (unlikely(rc != MDBX_SUCCESS)) @@ -9195,7 +9196,7 @@ retry: txn->mt_next_pgno - MDBX_ENABLE_REFUND)); if (ctx->lifo) { if (ctx->cleaned_slot < (txn->tw.lifo_reclaimed - ? MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) + ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) : 0)) { ctx->settled = 0; ctx->cleaned_slot = 0; @@ -9220,13 +9221,13 @@ retry: goto bailout; } tASSERT(txn, ctx->cleaned_id <= env->me_lck->mti_oldest_reader.weak); - TRACE("%s: cleanup-reclaimed-id [%u]%" PRIaTXN, dbg_prefix_mode, + TRACE("%s: cleanup-reclaimed-id [%zu]%" PRIaTXN, dbg_prefix_mode, ctx->cleaned_slot, ctx->cleaned_id); tASSERT(txn, *txn->mt_cursors == &ctx->cursor.outer); rc = mdbx_cursor_del(&ctx->cursor.outer, 0); if (unlikely(rc != MDBX_SUCCESS)) goto bailout; - } while (ctx->cleaned_slot < MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)); + } while (ctx->cleaned_slot < MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)); txl_sort(txn->tw.lifo_reclaimed); } } else { @@ -9292,14 +9293,14 @@ retry: * The pages themselves remain in dirtylist. */ if (unlikely(!txn->tw.lifo_reclaimed && txn->tw.last_reclaimed < 1)) { if (txn->tw.loose_count > 0) { - TRACE("%s: try allocate gc-slot for %u loose-pages", dbg_prefix_mode, + TRACE("%s: try allocate gc-slot for %zu loose-pages", dbg_prefix_mode, txn->tw.loose_count); rc = page_alloc_slowpath(&ctx->cursor.outer, 0, MDBX_ALLOC_GC | MDBX_ALLOC_SLOT | MDBX_ALLOC_FAKE) .err; if (rc == MDBX_SUCCESS) { - TRACE("%s: retry since gc-slot for %u loose-pages available", + TRACE("%s: retry since gc-slot for %zu loose-pages available", dbg_prefix_mode, txn->tw.loose_count); continue; } @@ -9311,7 +9312,7 @@ retry: goto bailout; for (MDBX_page *mp = txn->tw.loose_pages; mp; mp = mp->mp_next) pnl_xappend(txn->tw.retired_pages, mp->mp_pgno); - TRACE("%s: append %u loose-pages to retired-pages", dbg_prefix_mode, + TRACE("%s: append %zu loose-pages to retired-pages", dbg_prefix_mode, txn->tw.loose_count); } } else { @@ -9322,23 +9323,23 @@ retry: MDBX_PNL loose = txn->tw.reclaimed_pglist + MDBX_PNL_ALLOCLEN(txn->tw.reclaimed_pglist) - txn->tw.loose_count - 1; - unsigned count = 0; + size_t count = 0; for (MDBX_page *mp = txn->tw.loose_pages; mp; mp = mp->mp_next) { tASSERT(txn, mp->mp_flags == P_LOOSE); loose[++count] = mp->mp_pgno; } tASSERT(txn, count == txn->tw.loose_count); - MDBX_PNL_SIZE(loose) = count; + MDBX_PNL_SETSIZE(loose, count); pnl_sort(loose, txn->mt_next_pgno); pnl_merge(txn->tw.reclaimed_pglist, loose); - TRACE("%s: append %u loose-pages to reclaimed-pages", dbg_prefix_mode, + TRACE("%s: append %zu loose-pages to reclaimed-pages", dbg_prefix_mode, txn->tw.loose_count); } /* filter-out list of dirty-pages from loose-pages */ MDBX_dpl *const dl = txn->tw.dirtylist; - unsigned w = 0; - for (unsigned r = w; ++r <= dl->length;) { + size_t w = 0; + for (size_t r = w; ++r <= dl->length;) { MDBX_page *dp = dl->items[r].ptr; tASSERT(txn, dp->mp_flags == P_LOOSE || IS_MODIFIABLE(txn, dp)); tASSERT(txn, dpl_endpgno(dl, r) <= txn->mt_next_pgno); @@ -9351,7 +9352,7 @@ retry: dpage_free(env, dp, 1); } } - TRACE("%s: filtered-out loose-pages from %u -> %u dirty-pages", + TRACE("%s: filtered-out loose-pages from %zu -> %zu dirty-pages", dbg_prefix_mode, dl->length, w); tASSERT(txn, txn->tw.loose_count == dl->length - w); dpl_setlen(dl, w); @@ -9368,9 +9369,9 @@ retry: #endif /* MDBX_ENABLE_REFUND */ } - const unsigned amount = (unsigned)MDBX_PNL_SIZE(txn->tw.reclaimed_pglist); + const size_t amount = MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist); /* handle retired-list - store ones into single gc-record */ - if (ctx->retired_stored < MDBX_PNL_SIZE(txn->tw.retired_pages)) { + if (ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages)) { if (unlikely(!ctx->retired_stored)) { /* Make sure last page of GC is touched and on retired-list */ ctx->cursor.outer.mc_flags &= ~C_RECLAIMING; @@ -9382,14 +9383,14 @@ retry: } #if MDBX_ENABLE_BIGFOOT - unsigned retired_pages_before; + size_t retired_pages_before; do { if (ctx->bigfoot > txn->mt_txnid) { rc = gcu_clean_stored_retired(txn, ctx); tASSERT(txn, ctx->bigfoot <= txn->mt_txnid); } - retired_pages_before = MDBX_PNL_SIZE(txn->tw.retired_pages); + retired_pages_before = MDBX_PNL_GETSIZE(txn->tw.retired_pages); rc = gcu_prepare_backlog(txn, ctx, true); if (unlikely(rc != MDBX_SUCCESS)) goto bailout; @@ -9400,9 +9401,9 @@ retry: do { key.iov_len = sizeof(txnid_t); key.iov_base = &ctx->bigfoot; - const unsigned left = (unsigned)MDBX_PNL_SIZE(txn->tw.retired_pages) - - ctx->retired_stored; - const unsigned chunk = + const size_t left = + MDBX_PNL_GETSIZE(txn->tw.retired_pages) - ctx->retired_stored; + const size_t chunk = (left > env->me_maxgc_ov1page && ctx->bigfoot < MAX_TXNID) ? env->me_maxgc_ov1page : left; @@ -9411,10 +9412,10 @@ retry: if (unlikely(rc != MDBX_SUCCESS)) goto bailout; - if (retired_pages_before == MDBX_PNL_SIZE(txn->tw.retired_pages)) { - const unsigned at = (ctx->lifo == MDBX_PNL_ASCENDING) - ? left - chunk - : ctx->retired_stored; + if (retired_pages_before == MDBX_PNL_GETSIZE(txn->tw.retired_pages)) { + const size_t at = (ctx->lifo == MDBX_PNL_ASCENDING) + ? left - chunk + : ctx->retired_stored; pgno_t *const begin = txn->tw.retired_pages + at; /* MDBX_PNL_ASCENDING == false && LIFO == false: * - the larger pgno is at the beginning of retired list @@ -9424,19 +9425,20 @@ retry: * and should be placed with the smaller txnid. */ const pgno_t save = *begin; - *begin = chunk; + *begin = (pgno_t)chunk; memcpy(data.iov_base, begin, data.iov_len); *begin = save; TRACE("%s: put-retired/bigfoot @ %" PRIaTXN - " (slice #%u) #%u [%u..%u] of %u", + " (slice #%u) #%zu [%zu..%zu] of %zu", dbg_prefix_mode, ctx->bigfoot, (unsigned)(ctx->bigfoot - txn->mt_txnid), chunk, at, at + chunk, retired_pages_before); } ctx->retired_stored += chunk; - } while (ctx->retired_stored < MDBX_PNL_SIZE(txn->tw.retired_pages) && + } while (ctx->retired_stored < + MDBX_PNL_GETSIZE(txn->tw.retired_pages) && (++ctx->bigfoot, true)); - } while (retired_pages_before != MDBX_PNL_SIZE(txn->tw.retired_pages)); + } while (retired_pages_before != MDBX_PNL_GETSIZE(txn->tw.retired_pages)); #else /* Write to last page of GC */ key.iov_len = sizeof(txnid_t); @@ -9450,7 +9452,7 @@ retry: /* Retry if tw.retired_pages[] grew during the Put() */ } while (data.iov_len < MDBX_PNL_SIZEOF(txn->tw.retired_pages)); - ctx->retired_stored = (unsigned)MDBX_PNL_SIZE(txn->tw.retired_pages); + ctx->retired_stored = MDBX_PNL_GETSIZE(txn->tw.retired_pages); pnl_sort(txn->tw.retired_pages, txn->mt_next_pgno); eASSERT(env, data.iov_len == MDBX_PNL_SIZEOF(txn->tw.retired_pages)); memcpy(data.iov_base, txn->tw.retired_pages, data.iov_len); @@ -9459,17 +9461,17 @@ retry: ctx->retired_stored, txn->mt_txnid); #endif /* MDBX_ENABLE_BIGFOOT */ if (LOG_ENABLED(MDBX_LOG_EXTRA)) { - unsigned i = ctx->retired_stored; - DEBUG_EXTRA("txn %" PRIaTXN " root %" PRIaPGNO " num %u, retired-PNL", + size_t i = ctx->retired_stored; + DEBUG_EXTRA("txn %" PRIaTXN " root %" PRIaPGNO " num %zu, retired-PNL", txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i); for (; i; i--) DEBUG_EXTRA_PRINT(" %" PRIaPGNO, txn->tw.retired_pages[i]); DEBUG_EXTRA_PRINT("%s\n", "."); } - if (unlikely(amount != MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) && + if (unlikely(amount != MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) && ctx->settled)) { - TRACE("%s: reclaimed-list changed %u -> %u, retry", dbg_prefix_mode, - amount, (unsigned)MDBX_PNL_SIZE(txn->tw.reclaimed_pglist)); + TRACE("%s: reclaimed-list changed %zu -> %zu, retry", dbg_prefix_mode, + amount, MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist)); goto retry /* rare case, but avoids GC fragmentation and one cycle. */ ; @@ -9488,18 +9490,16 @@ retry: if (unlikely(rc != MDBX_SUCCESS)) goto bailout; } - const unsigned left = amount - ctx->settled; - TRACE("%s: amount %u, settled %d, left %d, lifo-reclaimed-slots %u, " - "reused-gc-slots %u", - dbg_prefix_mode, amount, ctx->settled, (int)left, - txn->tw.lifo_reclaimed - ? (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - : 0, + const size_t left = amount - ctx->settled; + TRACE("%s: amount %zu, settled %zd, left %zd, lifo-reclaimed-slots %zu, " + "reused-gc-slots %zu", + dbg_prefix_mode, amount, ctx->settled, left, + txn->tw.lifo_reclaimed ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) : 0, ctx->reused_slot); - if (0 >= (int)left) + if (0 >= (intptr_t)left) break; - const unsigned prefer_max_scatter = 257; + const size_t prefer_max_scatter = 257; txnid_t reservation_gc_id; if (ctx->lifo) { if (txn->tw.lifo_reclaimed == nullptr) { @@ -9509,10 +9509,8 @@ retry: goto bailout; } } - if ((unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) < - prefer_max_scatter && - left > ((unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - - ctx->reused_slot) * + if (MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) < prefer_max_scatter && + left > (MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot) * env->me_maxgc_ov1page && !ctx->dense) { /* LY: need just a txn-id for save page list. */ @@ -9531,12 +9529,12 @@ retry: MDBX_PNL_LAST(txn->tw.lifo_reclaimed)); need_cleanup = true; } - } while (rc == MDBX_SUCCESS && - (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) < - prefer_max_scatter && - left > ((unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - - ctx->reused_slot) * - env->me_maxgc_ov1page); + } while ( + rc == MDBX_SUCCESS && + MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) < prefer_max_scatter && + left > + (MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot) * + env->me_maxgc_ov1page); ctx->cursor.outer.mc_flags |= C_RECLAIMING; if (likely(rc == MDBX_SUCCESS)) { @@ -9546,7 +9544,7 @@ retry: /* LY: some troubles... */ goto bailout; - if (MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)) { + if (MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)) { if (need_cleanup) { txl_sort(txn->tw.lifo_reclaimed); ctx->cleaned_slot = 0; @@ -9566,17 +9564,17 @@ retry: } /* LY: GC is empty, will look any free txn-id in high2low order. */ - while (MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) < prefer_max_scatter && - left > ((unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - + while (MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) < prefer_max_scatter && + left > (MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot) * env->me_maxgc_ov1page) { if (unlikely(ctx->rid <= MIN_TXNID)) { - if (unlikely(MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) <= + if (unlikely(MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) <= ctx->reused_slot)) { - NOTICE("** restart: reserve depleted (reused_gc_slot %u >= " - "lifo_reclaimed %u" PRIaTXN, + NOTICE("** restart: reserve depleted (reused_gc_slot %zu >= " + "lifo_reclaimed %zu" PRIaTXN, ctx->reused_slot, - (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)); + MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)); goto retry; } break; @@ -9627,7 +9625,7 @@ retry: 1 /* mark cleanup is not needed for added slot. */; TRACE("%s: append @%" PRIaTXN - " to lifo-reclaimed, cleaned-gc-slot = %u", + " to lifo-reclaimed, cleaned-gc-slot = %zu", dbg_prefix_mode, ctx->rid, ctx->cleaned_slot); } @@ -9640,11 +9638,11 @@ retry: } } - const unsigned i = - (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot; - tASSERT(txn, i > 0 && i <= MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)); + const size_t i = + MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot; + tASSERT(txn, i > 0 && i <= MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)); reservation_gc_id = txn->tw.lifo_reclaimed[i]; - TRACE("%s: take @%" PRIaTXN " from lifo-reclaimed[%u]", dbg_prefix_mode, + TRACE("%s: take @%" PRIaTXN " from lifo-reclaimed[%zu]", dbg_prefix_mode, reservation_gc_id, i); } else { tASSERT(txn, txn->tw.lifo_reclaimed == NULL); @@ -9675,39 +9673,38 @@ retry: } ++ctx->reused_slot; - unsigned chunk = left; + size_t chunk = left; if (unlikely(chunk > env->me_maxgc_ov1page)) { - const unsigned avail_gc_slots = + const size_t avail_gc_slots = txn->tw.lifo_reclaimed - ? (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - - ctx->reused_slot + 1 - : (ctx->rid < INT16_MAX) ? (unsigned)ctx->rid + ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot + 1 + : (ctx->rid < INT16_MAX) ? (size_t)ctx->rid : INT16_MAX; if (avail_gc_slots > 1) { if (chunk < env->me_maxgc_ov1page * 2) chunk /= 2; else { - const unsigned threshold = + const size_t threshold = env->me_maxgc_ov1page * ((avail_gc_slots < prefer_max_scatter) ? avail_gc_slots : prefer_max_scatter); if (left < threshold) chunk = env->me_maxgc_ov1page; else { - const unsigned tail = left - threshold + env->me_maxgc_ov1page + 1; - unsigned span = 1; - unsigned avail = (unsigned)((pgno2bytes(env, span) - PAGEHDRSZ) / - sizeof(pgno_t)) /* - 1 + span */; + const size_t tail = left - threshold + env->me_maxgc_ov1page + 1; + size_t span = 1; + size_t avail = ((pgno2bytes(env, span) - PAGEHDRSZ) / + sizeof(pgno_t)) /* - 1 + span */; if (tail > avail) { - for (unsigned i = amount - span; i > 0; --i) { + for (size_t i = amount - span; i > 0; --i) { if (MDBX_PNL_ASCENDING ? (txn->tw.reclaimed_pglist[i] + span) : (txn->tw.reclaimed_pglist[i] - span) == txn->tw.reclaimed_pglist[i + span]) { span += 1; - avail = (unsigned)((pgno2bytes(env, span) - PAGEHDRSZ) / - sizeof(pgno_t)) - - 1 + span; + avail = + ((pgno2bytes(env, span) - PAGEHDRSZ) / sizeof(pgno_t)) - + 1 + span; if (avail >= tail) break; } @@ -9725,11 +9722,11 @@ retry: } tASSERT(txn, chunk > 0); - TRACE("%s: gc_rid %" PRIaTXN ", reused_gc_slot %u, reservation-id " + TRACE("%s: gc_rid %" PRIaTXN ", reused_gc_slot %zu, reservation-id " "%" PRIaTXN, dbg_prefix_mode, ctx->rid, ctx->reused_slot, reservation_gc_id); - TRACE("%s: chunk %u, gc-per-ovpage %u", dbg_prefix_mode, chunk, + TRACE("%s: chunk %zu, gc-per-ovpage %u", dbg_prefix_mode, chunk, env->me_maxgc_ov1page); tASSERT(txn, reservation_gc_id <= env->me_lck->mti_oldest_reader.weak); @@ -9746,7 +9743,7 @@ retry: key.iov_len = sizeof(reservation_gc_id); key.iov_base = &reservation_gc_id; data.iov_len = (chunk + 1) * sizeof(pgno_t); - TRACE("%s: reserve %u [%u...%u) @%" PRIaTXN, dbg_prefix_mode, chunk, + TRACE("%s: reserve %zu [%zu...%zu) @%" PRIaTXN, dbg_prefix_mode, chunk, ctx->settled + 1, ctx->settled + chunk + 1, reservation_gc_id); gcu_prepare_backlog(txn, ctx, true); rc = mdbx_cursor_put(&ctx->cursor.outer, &key, &data, @@ -9758,42 +9755,43 @@ retry: gcu_clean_reserved(env, data); ctx->settled += chunk; - TRACE("%s: settled %u (+%u), continue", dbg_prefix_mode, ctx->settled, + TRACE("%s: settled %zu (+%zu), continue", dbg_prefix_mode, ctx->settled, chunk); if (txn->tw.lifo_reclaimed && - unlikely(amount < MDBX_PNL_SIZE(txn->tw.reclaimed_pglist)) && - (ctx->loop < 5 || MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) - amount > + unlikely(amount < MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist)) && + (ctx->loop < 5 || MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist) - amount > env->me_maxgc_ov1page)) { - NOTICE("** restart: reclaimed-list growth %u -> %u", amount, - (unsigned)MDBX_PNL_SIZE(txn->tw.reclaimed_pglist)); + NOTICE("** restart: reclaimed-list growth %zu -> %zu", amount, + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist)); goto retry; } continue; } - tASSERT(txn, ctx->cleaned_slot == (txn->tw.lifo_reclaimed - ? MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - : 0)); + tASSERT(txn, + ctx->cleaned_slot == (txn->tw.lifo_reclaimed + ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) + : 0)); TRACE("%s", " >> filling"); /* Fill in the reserved records */ ctx->filled_slot = txn->tw.lifo_reclaimed - ? (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot + ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) - ctx->reused_slot : ctx->reused_slot; rc = MDBX_SUCCESS; tASSERT(txn, pnl_check_allocated(txn->tw.reclaimed_pglist, txn->mt_next_pgno - MDBX_ENABLE_REFUND)); tASSERT(txn, dirtylist_check(txn)); - if (MDBX_PNL_SIZE(txn->tw.reclaimed_pglist)) { + if (MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist)) { MDBX_val key, data; key.iov_len = data.iov_len = 0; /* avoid MSVC warning */ key.iov_base = data.iov_base = NULL; - const unsigned amount = MDBX_PNL_SIZE(txn->tw.reclaimed_pglist); - unsigned left = amount; + const size_t amount = MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist); + size_t left = amount; if (txn->tw.lifo_reclaimed == nullptr) { tASSERT(txn, ctx->lifo == 0); rc = cursor_first(&ctx->cursor.outer, &key, &data); @@ -9805,30 +9803,28 @@ retry: while (true) { txnid_t fill_gc_id; - TRACE("%s: left %u of %u", dbg_prefix_mode, left, - (unsigned)MDBX_PNL_SIZE(txn->tw.reclaimed_pglist)); + TRACE("%s: left %zu of %zu", dbg_prefix_mode, left, + MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist)); if (txn->tw.lifo_reclaimed == nullptr) { tASSERT(txn, ctx->lifo == 0); fill_gc_id = unaligned_peek_u64(4, key.iov_base); if (ctx->filled_slot-- == 0 || fill_gc_id > txn->tw.last_reclaimed) { NOTICE( - "** restart: reserve depleted (filled_slot %u, fill_id %" PRIaTXN + "** restart: reserve depleted (filled_slot %zu, fill_id %" PRIaTXN " > last_reclaimed %" PRIaTXN, ctx->filled_slot, fill_gc_id, txn->tw.last_reclaimed); goto retry; } } else { tASSERT(txn, ctx->lifo != 0); - if (++ctx->filled_slot > - (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)) { - NOTICE("** restart: reserve depleted (filled_gc_slot %u > " - "lifo_reclaimed %u" PRIaTXN, - ctx->filled_slot, - (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)); + if (++ctx->filled_slot > MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)) { + NOTICE("** restart: reserve depleted (filled_gc_slot %zu > " + "lifo_reclaimed %zu" PRIaTXN, + ctx->filled_slot, MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)); goto retry; } fill_gc_id = txn->tw.lifo_reclaimed[ctx->filled_slot]; - TRACE("%s: seek-reservation @%" PRIaTXN " at lifo_reclaimed[%u]", + TRACE("%s: seek-reservation @%" PRIaTXN " at lifo_reclaimed[%zu]", dbg_prefix_mode, fill_gc_id, ctx->filled_slot); key.iov_base = &fill_gc_id; key.iov_len = sizeof(fill_gc_id); @@ -9836,10 +9832,10 @@ retry: if (unlikely(rc != MDBX_SUCCESS)) goto bailout; } - tASSERT(txn, - ctx->cleaned_slot == (txn->tw.lifo_reclaimed - ? MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) - : 0)); + tASSERT(txn, ctx->cleaned_slot == + (txn->tw.lifo_reclaimed + ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) + : 0)); tASSERT(txn, fill_gc_id > 0 && fill_gc_id <= env->me_lck->mti_oldest_reader.weak); key.iov_base = &fill_gc_id; @@ -9847,9 +9843,9 @@ retry: tASSERT(txn, data.iov_len >= sizeof(pgno_t) * 2); ctx->cursor.outer.mc_flags |= C_GCFREEZE; - unsigned chunk = (unsigned)(data.iov_len / sizeof(pgno_t)) - 1; + size_t chunk = data.iov_len / sizeof(pgno_t) - 1; if (unlikely(chunk > left)) { - TRACE("%s: chunk %u > left %u, @%" PRIaTXN, dbg_prefix_mode, chunk, + TRACE("%s: chunk %zu > left %zu, @%" PRIaTXN, dbg_prefix_mode, chunk, left, fill_gc_id); if ((ctx->loop < 5 && chunk - left > ctx->loop / 2) || chunk - left > env->me_maxgc_ov1page) { @@ -9867,37 +9863,36 @@ retry: gcu_clean_reserved(env, data); if (unlikely(txn->tw.loose_count || - amount != MDBX_PNL_SIZE(txn->tw.reclaimed_pglist))) { - NOTICE("** restart: reclaimed-list growth (%u -> %u, loose +%u)", - amount, MDBX_PNL_SIZE(txn->tw.reclaimed_pglist), + amount != MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist))) { + NOTICE("** restart: reclaimed-list growth (%zu -> %zu, loose +%zu)", + amount, MDBX_PNL_GETSIZE(txn->tw.reclaimed_pglist), txn->tw.loose_count); goto retry; } if (unlikely(txn->tw.lifo_reclaimed ? ctx->cleaned_slot < - MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) + MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) : ctx->cleaned_id < txn->tw.last_reclaimed)) { NOTICE("%s", "** restart: reclaimed-slots changed"); goto retry; } if (unlikely(ctx->retired_stored != - MDBX_PNL_SIZE(txn->tw.retired_pages))) { + MDBX_PNL_GETSIZE(txn->tw.retired_pages))) { tASSERT(txn, - ctx->retired_stored < MDBX_PNL_SIZE(txn->tw.retired_pages)); - NOTICE("** restart: retired-list growth (%u -> %u)", - ctx->retired_stored, MDBX_PNL_SIZE(txn->tw.retired_pages)); + ctx->retired_stored < MDBX_PNL_GETSIZE(txn->tw.retired_pages)); + NOTICE("** restart: retired-list growth (%zu -> %zu)", + ctx->retired_stored, MDBX_PNL_GETSIZE(txn->tw.retired_pages)); goto retry; } pgno_t *dst = data.iov_base; - *dst++ = chunk; + *dst++ = (pgno_t)chunk; pgno_t *src = MDBX_PNL_BEGIN(txn->tw.reclaimed_pglist) + left - chunk; memcpy(dst, src, chunk * sizeof(pgno_t)); pgno_t *from = src, *to = src + chunk; - TRACE("%s: fill %u [ %u:%" PRIaPGNO "...%u:%" PRIaPGNO "] @%" PRIaTXN, - dbg_prefix_mode, chunk, (unsigned)(from - txn->tw.reclaimed_pglist), - from[0], (unsigned)(to - txn->tw.reclaimed_pglist), to[-1], - fill_gc_id); + TRACE("%s: fill %zu [ %zu:%" PRIaPGNO "...%zu:%" PRIaPGNO "] @%" PRIaTXN, + dbg_prefix_mode, chunk, from - txn->tw.reclaimed_pglist, from[0], + to - txn->tw.reclaimed_pglist, to[-1], fill_gc_id); left -= chunk; if (AUDIT_ENABLED()) { @@ -9923,29 +9918,30 @@ retry: tASSERT(txn, rc == MDBX_SUCCESS); if (unlikely(txn->tw.loose_count != 0)) { - NOTICE("** restart: got %u loose pages", txn->tw.loose_count); + NOTICE("** restart: got %zu loose pages", txn->tw.loose_count); goto retry; } if (unlikely(ctx->filled_slot != (txn->tw.lifo_reclaimed - ? (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed) + ? MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed) : 0))) { const bool will_retry = ctx->loop < 9; - NOTICE("** %s: reserve excess (filled-slot %u, loop %u)", + NOTICE("** %s: reserve excess (filled-slot %zu, loop %zu)", will_retry ? "restart" : "ignore", ctx->filled_slot, ctx->loop); if (will_retry) goto retry; } - tASSERT(txn, txn->tw.lifo_reclaimed == NULL || - ctx->cleaned_slot == MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)); + tASSERT(txn, + txn->tw.lifo_reclaimed == NULL || + ctx->cleaned_slot == MDBX_PNL_GETSIZE(txn->tw.lifo_reclaimed)); bailout: txn->mt_cursors[FREE_DBI] = ctx->cursor.outer.mc_next; - MDBX_PNL_SIZE(txn->tw.reclaimed_pglist) = 0; - TRACE("<<< %u loops, rc = %d", ctx->loop, rc); + MDBX_PNL_SETSIZE(txn->tw.reclaimed_pglist, 0); + TRACE("<<< %zu loops, rc = %d", ctx->loop, rc); return rc; } @@ -9954,7 +9950,7 @@ static int txn_write(MDBX_txn *txn, iov_ctx_t *ctx) { if (MDBX_AVOID_MSYNC || !(txn->mt_flags & MDBX_WRITEMAP)) dl = dpl_sort(txn); int rc = MDBX_SUCCESS; - unsigned r, w; + size_t r, w; for (w = 0, r = 1; r <= dl->length; ++r) { MDBX_page *dp = dl->items[r].ptr; if (dp->mp_flags & P_LOOSE) { @@ -10004,13 +10000,13 @@ int mdbx_txn_commit(MDBX_txn *txn) { return __inline_mdbx_txn_commit(txn); } /* Merge child txn into parent */ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, - const unsigned parent_retired_len) { + const size_t parent_retired_len) { MDBX_dpl *const src = dpl_sort(txn); /* Remove refunded pages from parent's dirty list */ MDBX_dpl *const dst = dpl_sort(parent); if (MDBX_ENABLE_REFUND) { - unsigned n = dst->length; + size_t n = dst->length; while (n && dst->items[n].pgno >= parent->mt_next_pgno) { if (!(txn->mt_env->me_flags & MDBX_WRITEMAP)) { unsigned npages = dpl_npages(dst, n); @@ -10031,12 +10027,12 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, dpl_sift(parent, reclaimed_list, false); /* Move retired pages from parent's dirty & spilled list to reclaimed */ - unsigned r, w, d, s, l; + size_t r, w, d, s, l; for (r = w = parent_retired_len; - ++r <= MDBX_PNL_SIZE(parent->tw.retired_pages);) { + ++r <= MDBX_PNL_GETSIZE(parent->tw.retired_pages);) { const pgno_t pgno = parent->tw.retired_pages[r]; - const unsigned di = dpl_exist(parent, pgno); - const unsigned si = !di ? search_spilled(parent, pgno) : 0; + const size_t di = dpl_exist(parent, pgno); + const size_t si = !di ? search_spilled(parent, pgno) : 0; unsigned npages; const char *kind; if (di) { @@ -10059,7 +10055,7 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, /* Список retired страниц не сортирован, но для ускорения сортировки * дополняется в соответствии с MDBX_PNL_ASCENDING */ #if MDBX_PNL_ASCENDING - const unsigned len = MDBX_PNL_SIZE(parent->tw.retired_pages); + const size_t len = MDBX_PNL_GETSIZE(parent->tw.retired_pages); while (r < len && parent->tw.retired_pages[r + 1] == pgno + l) { ++r; if (++l == npages) @@ -10083,46 +10079,46 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, continue; } - DEBUG("reclaim retired parent's %u->%u %s page %" PRIaPGNO, npages, l, kind, - pgno); + DEBUG("reclaim retired parent's %u -> %zu %s page %" PRIaPGNO, npages, l, + kind, pgno); int err = pnl_insert_range(&parent->tw.reclaimed_pglist, pgno, l); ENSURE(txn->mt_env, err == MDBX_SUCCESS); } - MDBX_PNL_SIZE(parent->tw.retired_pages) = w; + MDBX_PNL_SETSIZE(parent->tw.retired_pages, w); /* Filter-out parent spill list */ - if (parent->tw.spill_pages && MDBX_PNL_SIZE(parent->tw.spill_pages) > 0) { + if (parent->tw.spill_pages && MDBX_PNL_GETSIZE(parent->tw.spill_pages) > 0) { const MDBX_PNL sl = spill_purge(parent); - unsigned len = MDBX_PNL_SIZE(sl); + size_t len = MDBX_PNL_GETSIZE(sl); if (len) { /* Remove refunded pages from parent's spill list */ if (MDBX_ENABLE_REFUND && MDBX_PNL_MOST(sl) >= (parent->mt_next_pgno << 1)) { #if MDBX_PNL_ASCENDING - unsigned i = MDBX_PNL_SIZE(sl); + size_t i = MDBX_PNL_GETSIZE(sl); assert(MDBX_PNL_MOST(sl) == MDBX_PNL_LAST(sl)); do { if ((sl[i] & 1) == 0) DEBUG("refund parent's spilled page %" PRIaPGNO, sl[i] >> 1); i -= 1; } while (i && sl[i] >= (parent->mt_next_pgno << 1)); - MDBX_PNL_SIZE(sl) = i; + MDBX_PNL_GETSIZE(sl) = i; #else assert(MDBX_PNL_MOST(sl) == MDBX_PNL_FIRST(sl)); - unsigned i = 0; + size_t i = 0; do { ++i; if ((sl[i] & 1) == 0) DEBUG("refund parent's spilled page %" PRIaPGNO, sl[i] >> 1); } while (i < len && sl[i + 1] >= (parent->mt_next_pgno << 1)); - MDBX_PNL_SIZE(sl) = len -= i; + MDBX_PNL_SETSIZE(sl, len -= i); memmove(sl + 1, sl + 1 + i, len * sizeof(sl[0])); #endif } tASSERT(txn, pnl_check_allocated(sl, (size_t)parent->mt_next_pgno << 1)); /* Remove reclaimed pages from parent's spill list */ - s = MDBX_PNL_SIZE(sl), r = MDBX_PNL_SIZE(reclaimed_list); + s = MDBX_PNL_GETSIZE(sl), r = MDBX_PNL_GETSIZE(reclaimed_list); /* Scanning from end to begin */ while (s && r) { if (sl[s] & 1) { @@ -10146,10 +10142,10 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, /* Remove anything in our dirty list from parent's spill list */ /* Scanning spill list in descend order */ - const int step = MDBX_PNL_ASCENDING ? -1 : 1; - s = MDBX_PNL_ASCENDING ? MDBX_PNL_SIZE(sl) : 1; + const intptr_t step = MDBX_PNL_ASCENDING ? -1 : 1; + s = MDBX_PNL_ASCENDING ? MDBX_PNL_GETSIZE(sl) : 1; d = src->length; - while (d && (MDBX_PNL_ASCENDING ? s > 0 : s <= MDBX_PNL_SIZE(sl))) { + while (d && (MDBX_PNL_ASCENDING ? s > 0 : s <= MDBX_PNL_GETSIZE(sl))) { if (sl[s] & 1) { s += step; continue; @@ -10249,7 +10245,7 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, } ++w; } - NOTICE("squash to begin for extending-merge %u -> %u", d, w - 1); + NOTICE("squash to begin for extending-merge %zu -> %zu", d, w - 1); d = w - 1; continue; } @@ -10291,7 +10287,7 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, } --w; } - NOTICE("squash to end for shrinking-merge %u -> %u", d, w + 1); + NOTICE("squash to end for shrinking-merge %zu -> %zu", d, w + 1); d = w + 1; continue; } @@ -10351,7 +10347,7 @@ static __inline void txn_merge(MDBX_txn *const parent, MDBX_txn *const txn, if (parent->tw.spill_pages) { assert(pnl_check_allocated(parent->tw.spill_pages, (size_t)parent->mt_next_pgno << 1)); - if (MDBX_PNL_SIZE(parent->tw.spill_pages)) + if (MDBX_PNL_GETSIZE(parent->tw.spill_pages)) parent->mt_flags |= MDBX_TXN_SPILLS; } } @@ -10423,8 +10419,8 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { sizeof(parent->mt_geo)) == 0); tASSERT(txn, memcmp(&parent->mt_canary, &txn->mt_canary, sizeof(parent->mt_canary)) == 0); - tASSERT(txn, - !txn->tw.spill_pages || MDBX_PNL_SIZE(txn->tw.spill_pages) == 0); + tASSERT(txn, !txn->tw.spill_pages || + MDBX_PNL_GETSIZE(txn->tw.spill_pages) == 0); tASSERT(txn, txn->tw.loose_count == 0); /* fast completion of pure nested transaction */ @@ -10434,11 +10430,10 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { /* Preserve space for spill list to avoid parent's state corruption * if allocation fails. */ - const unsigned parent_retired_len = - (unsigned)(uintptr_t)parent->tw.retired_pages; - tASSERT(txn, parent_retired_len <= MDBX_PNL_SIZE(txn->tw.retired_pages)); - const unsigned retired_delta = - MDBX_PNL_SIZE(txn->tw.retired_pages) - parent_retired_len; + const size_t parent_retired_len = (uintptr_t)parent->tw.retired_pages; + tASSERT(txn, parent_retired_len <= MDBX_PNL_GETSIZE(txn->tw.retired_pages)); + const size_t retired_delta = + MDBX_PNL_GETSIZE(txn->tw.retired_pages) - parent_retired_len; if (retired_delta) { rc = pnl_need(&txn->tw.reclaimed_pglist, retired_delta); if (unlikely(rc != MDBX_SUCCESS)) @@ -10448,7 +10443,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { if (txn->tw.spill_pages) { if (parent->tw.spill_pages) { rc = pnl_need(&parent->tw.spill_pages, - MDBX_PNL_SIZE(txn->tw.spill_pages)); + MDBX_PNL_GETSIZE(txn->tw.spill_pages)); if (unlikely(rc != MDBX_SUCCESS)) goto fail; } @@ -10494,12 +10489,12 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { /* Update parent's DBs array */ memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDBX_db)); parent->mt_numdbs = txn->mt_numdbs; - for (unsigned i = 0; i < txn->mt_numdbs; i++) { + for (size_t i = 0; i < txn->mt_numdbs; i++) { /* preserve parent's status */ const uint8_t state = txn->mt_dbistate[i] | (parent->mt_dbistate[i] & (DBI_CREAT | DBI_FRESH | DBI_DIRTY)); - DEBUG("db %u dbi-state %s 0x%02x -> 0x%02x", i, + DEBUG("dbi %zu dbi-state %s 0x%02x -> 0x%02x", i, (parent->mt_dbistate[i] != state) ? "update" : "still", parent->mt_dbistate[i], state); parent->mt_dbistate[i] = state; @@ -10520,7 +10515,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { tASSERT(parent, lp->mp_pgno < parent->tw.loose_refund_wl && lp->mp_pgno + 1 < parent->mt_next_pgno); /* Check parent's reclaimed pages not suitable for refund */ - if (MDBX_PNL_SIZE(parent->tw.reclaimed_pglist)) + if (MDBX_PNL_GETSIZE(parent->tw.reclaimed_pglist)) tASSERT(parent, MDBX_PNL_MOST(parent->tw.reclaimed_pglist) + 1 < parent->mt_next_pgno); } @@ -10542,7 +10537,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { if (txn->tw.dirtylist->length == 0 && (txn->mt_flags & (MDBX_TXN_DIRTY | MDBX_TXN_SPILLS)) == 0) { - for (int i = txn->mt_numdbs; --i >= 0;) + for (intptr_t i = txn->mt_numdbs; --i >= 0;) tASSERT(txn, (txn->mt_dbistate[i] & DBI_DIRTY) == 0); #if defined(MDBX_NOSUCCESS_EMPTY_COMMIT) && MDBX_NOSUCCESS_EMPTY_COMMIT rc = txn_end(txn, end_mode); @@ -10607,7 +10602,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { ts_2 = latency ? osal_monotime() : 0; if (AUDIT_ENABLED()) { - rc = audit_ex(txn, MDBX_PNL_SIZE(txn->tw.retired_pages), true); + rc = audit_ex(txn, MDBX_PNL_GETSIZE(txn->tw.retired_pages), true); const uint64_t audit_end = osal_monotime(); audit_duration = osal_monotime_to_16dot16(audit_end - ts_2); ts_2 = audit_end; @@ -10650,7 +10645,7 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { meta.mm_extra_pagehdr = head.ptr_c->mm_extra_pagehdr; unaligned_poke_u64(4, meta.mm_pages_retired, unaligned_peek_u64(4, head.ptr_c->mm_pages_retired) + - MDBX_PNL_SIZE(txn->tw.retired_pages)); + MDBX_PNL_GETSIZE(txn->tw.retired_pages)); meta.mm_geo = txn->mt_geo; meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; @@ -10660,8 +10655,8 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) { #if MDBX_ENABLE_BIGFOOT if (gcu_ctx.bigfoot > txn->mt_txnid) { commit_txnid = gcu_ctx.bigfoot; - TRACE("use @%" PRIaTXN " (+%u) for commit bigfoot-txn", commit_txnid, - (unsigned)(commit_txnid - txn->mt_txnid)); + TRACE("use @%" PRIaTXN " (+%zu) for commit bigfoot-txn", commit_txnid, + (size_t)(commit_txnid - txn->mt_txnid)); } #endif meta_set_txnid(env, &meta, commit_txnid); @@ -11057,7 +11052,7 @@ __cold static int read_header(MDBX_env *env, MDBX_meta *dest, } __cold static MDBX_page *meta_model(const MDBX_env *env, MDBX_page *model, - unsigned num) { + size_t num) { ENSURE(env, is_powerof2(env->me_psize)); ENSURE(env, env->me_psize >= MIN_PAGESIZE); ENSURE(env, env->me_psize <= MAX_PAGESIZE); @@ -11067,7 +11062,7 @@ __cold static MDBX_page *meta_model(const MDBX_env *env, MDBX_page *model, ENSURE(env, env->me_dbgeo.now <= env->me_dbgeo.upper); memset(model, 0, env->me_psize); - model->mp_pgno = num; + model->mp_pgno = (pgno_t)num; model->mp_flags = P_META; MDBX_meta *const model_meta = page_meta(model); unaligned_poke_u64(4, model_meta->mm_magic_and_version, MDBX_DATA_MAGIC); @@ -11505,7 +11500,7 @@ fail: } static void recalculate_merge_threshold(MDBX_env *env) { - const unsigned bytes = page_space(env); + const size_t bytes = page_space(env); env->me_merge_threshold = (uint16_t)(bytes - (bytes * env->me_options.merge_threshold_16dot16_percent >> @@ -12904,7 +12899,7 @@ static uint32_t merge_sync_flags(const uint32_t a, const uint32_t b) { } __cold static int __must_check_result override_meta(MDBX_env *env, - unsigned target, + size_t target, txnid_t txnid, const MDBX_meta *shape) { int rc = alloc_page_buf(env); @@ -12917,7 +12912,7 @@ __cold static int __must_check_result override_meta(MDBX_env *env, eASSERT(env, coherency_check_meta(env, model, true)); if (shape) { if (txnid && unlikely(!coherency_check_meta(env, shape, false))) { - ERROR("bailout overriding meta-%u since model failed " + ERROR("bailout overriding meta-%zu since model failed " "freedb/maindb %s-check for txnid #%" PRIaTXN, target, "pre", constmeta_txnid(shape)); return MDBX_PROBLEM; @@ -12941,7 +12936,7 @@ __cold static int __must_check_result override_meta(MDBX_env *env, memcpy(&model->mm_magic_and_version, &shape->mm_magic_and_version, sizeof(model->mm_magic_and_version)); if (unlikely(!coherency_check_meta(env, model, false))) { - ERROR("bailout overriding meta-%u since model failed " + ERROR("bailout overriding meta-%zu since model failed " "freedb/maindb %s-check for txnid #%" PRIaTXN, target, "post", txnid); return MDBX_PROBLEM; @@ -12949,7 +12944,7 @@ __cold static int __must_check_result override_meta(MDBX_env *env, } } unaligned_poke_u64(4, model->mm_sign, meta_sign(model)); - rc = validate_meta(env, model, page, target, nullptr); + rc = validate_meta(env, model, page, (pgno_t)target, nullptr); if (unlikely(MDBX_IS_ERROR(rc))) return MDBX_PROBLEM; @@ -13007,11 +13002,11 @@ __cold int mdbx_env_turn_for_recovery(MDBX_env *env, unsigned target) { const MDBX_meta *target_meta = METAPAGE(env, target); txnid_t new_txnid = safe64_txnid_next(constmeta_txnid(target_meta)); - for (unsigned n = 0; n < NUM_METAS; ++n) { + for (size_t n = 0; n < NUM_METAS; ++n) { if (n == target) continue; MDBX_meta meta = *METAPAGE(env, target); - if (validate_meta(env, &meta, pgno2page(env, n), n, nullptr) != + if (validate_meta(env, &meta, pgno2page(env, n), (pgno_t)n, nullptr) != MDBX_SUCCESS) { int err = override_meta(env, n, 0, nullptr); if (unlikely(err != MDBX_SUCCESS)) @@ -13644,7 +13639,7 @@ __cold static int env_close(MDBX_env *env) { } if (env->me_dbxs) { - for (unsigned i = env->me_numdbs; --i >= CORE_DBS;) + for (size_t i = env->me_numdbs; --i >= CORE_DBS;) osal_free(env->me_dbxs[i].md_name.iov_base); osal_free(env->me_dbxs); env->me_dbxs = nullptr; @@ -13867,25 +13862,25 @@ static bool unsure_equal(MDBX_cmp_func cmp, const MDBX_val *a, __hot static struct node_result node_search(MDBX_cursor *mc, const MDBX_val *key) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; - const int nkeys = page_numkeys(mp); + const intptr_t nkeys = page_numkeys(mp); DKBUF_DEBUG; - DEBUG("searching %u keys in %s %spage %" PRIaPGNO, nkeys, + DEBUG("searching %zu keys in %s %spage %" PRIaPGNO, nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", mp->mp_pgno); struct node_result ret; ret.exact = false; STATIC_ASSERT(P_BRANCH == 1); - int low = mp->mp_flags & P_BRANCH; - int high = nkeys - 1; + intptr_t low = mp->mp_flags & P_BRANCH; + intptr_t high = nkeys - 1; if (unlikely(high < low)) { mc->mc_ki[mc->mc_top] = 0; ret.node = NULL; return ret; } - int i; + intptr_t i; MDBX_cmp_func *cmp = mc->mc_dbx->md_cmp; MDBX_val nodekey; if (unlikely(IS_LEAF2(mp))) { @@ -13897,7 +13892,7 @@ __hot static struct node_result node_search(MDBX_cursor *mc, cASSERT(mc, (char *)mp + mc->mc_txn->mt_env->me_psize >= (char *)nodekey.iov_base + nodekey.iov_len); int cr = cmp(key, &nodekey); - DEBUG("found leaf index %u [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr); + DEBUG("found leaf index %zu [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr); if (cr > 0) /* Found entry is less than the key. */ /* Skip to get the smallest entry larger than key. */ @@ -13933,9 +13928,9 @@ __hot static struct node_result node_search(MDBX_cursor *mc, (char *)nodekey.iov_base + nodekey.iov_len); int cr = cmp(key, &nodekey); if (IS_LEAF(mp)) - DEBUG("found leaf index %u [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr); + DEBUG("found leaf index %zu [%s], rc = %i", i, DKEY_DEBUG(&nodekey), cr); else - DEBUG("found branch index %u [%s -> %" PRIaPGNO "], rc = %i", i, + DEBUG("found branch index %zu [%s -> %" PRIaPGNO "], rc = %i", i, DKEY_DEBUG(&nodekey), node_pgno(node), cr); if (cr > 0) /* Found entry is less than the key. */ @@ -14024,7 +14019,8 @@ __hot static __always_inline int page_get_checker_lite(const uint16_t ILL, if (unlikely(page->mp_upper < page->mp_lower || ((page->mp_lower | page->mp_upper) & 1) || PAGEHDRSZ + page->mp_upper > txn->mt_env->me_psize)) - return bad_page(page, "invalid page' lower(%u)/upper(%u) with limit %u\n", + return bad_page(page, + "invalid page' lower(%u)/upper(%u) with limit %zu\n", page->mp_lower, page->mp_upper, page_space(txn->mt_env)); } else if ((ILL & P_OVERFLOW) == 0) { @@ -14085,8 +14081,8 @@ __hot static __always_inline pgr_t page_get_inline(const uint16_t ILL, search_spilled(spiller, pgno)) break; - const unsigned i = dpl_search(spiller, pgno); - tASSERT(txn, (int)i > 0); + const size_t i = dpl_search(spiller, pgno); + tASSERT(txn, (intptr_t)i > 0); if (spiller->tw.dirtylist->items[i].pgno == pgno) { spiller->tw.dirtylist->items[i].lru = txn->tw.dirtylru++; r.page = spiller->tw.dirtylist->items[i].ptr; @@ -14127,9 +14123,9 @@ __hot __noinline static int page_search_root(MDBX_cursor *mc, while (IS_BRANCH(mp)) { MDBX_node *node; - int i; + intptr_t i; - DEBUG("branch page %" PRIaPGNO " has %u keys", mp->mp_pgno, + DEBUG("branch page %" PRIaPGNO " has %zu keys", mp->mp_pgno, page_numkeys(mp)); /* Don't assert on branch pages in the GC. We can get here * while in the process of rebalancing a GC branch page; we must @@ -14156,7 +14152,7 @@ __hot __noinline static int page_search_root(MDBX_cursor *mc, i = mc->mc_ki[mc->mc_top] + nsr.exact - 1; else i = page_numkeys(mp) - 1; - DEBUG("following index %u for key [%s]", i, DKEY_DEBUG(key)); + DEBUG("following index %zu for key [%s]", i, DKEY_DEBUG(key)); } cASSERT(mc, i >= 0 && i < (int)page_numkeys(mp)); @@ -14208,7 +14204,7 @@ static int setup_dbx(MDBX_dbx *const dbx, const MDBX_db *const db, ? 4 /* sizeof(uint32_t) */ : ((db->md_flags & MDBX_DUPFIXED) ? 1 : 0); dbx->md_vlen_max = valsize_max(pagesize, db->md_flags); - assert(dbx->md_vlen_max != (unsigned)-1); + assert(dbx->md_vlen_max != (size_t)-1); if ((db->md_flags & (MDBX_DUPFIXED | MDBX_INTEGERDUP)) != 0 && db->md_xsize) { if (!MDBX_DISABLE_VALIDATION && unlikely(db->md_xsize < dbx->md_vlen_min || @@ -14222,10 +14218,10 @@ static int setup_dbx(MDBX_dbx *const dbx, const MDBX_db *const db, return MDBX_SUCCESS; } -static int fetch_sdb(MDBX_txn *txn, MDBX_dbi dbi) { +static int fetch_sdb(MDBX_txn *txn, size_t dbi) { MDBX_cursor_couple couple; if (unlikely(dbi_changed(txn, dbi))) { - NOTICE("dbi %u was changed for txn %" PRIaTXN, dbi, txn->mt_txnid); + NOTICE("dbi %zu was changed for txn %" PRIaTXN, dbi, txn->mt_txnid); return MDBX_BAD_DBI; } int rc = cursor_init(&couple.outer, txn, MAIN_DBI); @@ -14236,7 +14232,7 @@ static int fetch_sdb(MDBX_txn *txn, MDBX_dbi dbi) { rc = page_search(&couple.outer, &dbx->md_name, 0); if (unlikely(rc != MDBX_SUCCESS)) { notfound: - NOTICE("dbi %u refs to inaccessible subDB `%*s` for txn %" PRIaTXN + NOTICE("dbi %zu refs to inaccessible subDB `%*s` for txn %" PRIaTXN " (err %d)", dbi, (int)dbx->md_name.iov_len, (const char *)dbx->md_name.iov_base, txn->mt_txnid, rc); @@ -14250,7 +14246,7 @@ static int fetch_sdb(MDBX_txn *txn, MDBX_dbi dbi) { goto notfound; } if (unlikely((node_flags(nsr.node) & (F_DUPDATA | F_SUBDATA)) != F_SUBDATA)) { - NOTICE("dbi %u refs to not a named subDB `%*s` for txn %" PRIaTXN " (%s)", + NOTICE("dbi %zu refs to not a named subDB `%*s` for txn %" PRIaTXN " (%s)", dbi, (int)dbx->md_name.iov_len, (const char *)dbx->md_name.iov_base, txn->mt_txnid, "wrong flags"); return MDBX_INCOMPATIBLE; /* not a named DB */ @@ -14262,7 +14258,7 @@ static int fetch_sdb(MDBX_txn *txn, MDBX_dbi dbi) { return rc; if (unlikely(data.iov_len != sizeof(MDBX_db))) { - NOTICE("dbi %u refs to not a named subDB `%*s` for txn %" PRIaTXN " (%s)", + NOTICE("dbi %zu refs to not a named subDB `%*s` for txn %" PRIaTXN " (%s)", dbi, (int)dbx->md_name.iov_len, (const char *)dbx->md_name.iov_base, txn->mt_txnid, "wrong rec-size"); return MDBX_INCOMPATIBLE; /* not a named DB */ @@ -14273,7 +14269,7 @@ static int fetch_sdb(MDBX_txn *txn, MDBX_dbi dbi) { * have dropped and recreated the DB with other flags. */ MDBX_db *const db = &txn->mt_dbs[dbi]; if (unlikely((db->md_flags & DB_PERSISTENT_FLAGS) != md_flags)) { - NOTICE("dbi %u refs to the re-created subDB `%*s` for txn %" PRIaTXN + NOTICE("dbi %zu refs to the re-created subDB `%*s` for txn %" PRIaTXN " with different flags (present 0x%X != wanna 0x%X)", dbi, (int)dbx->md_name.iov_len, (const char *)dbx->md_name.iov_base, txn->mt_txnid, db->md_flags & DB_PERSISTENT_FLAGS, md_flags); @@ -14635,9 +14631,9 @@ static int cursor_next(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, goto skip; } - int ki = mc->mc_ki[mc->mc_top]; + intptr_t ki = mc->mc_ki[mc->mc_top]; mc->mc_ki[mc->mc_top] = (indx_t)++ki; - const int numkeys = page_numkeys(mp); + const intptr_t numkeys = page_numkeys(mp); if (unlikely(ki >= numkeys)) { DEBUG("%s", "=====> move to next sibling page"); mc->mc_ki[mc->mc_top] = (indx_t)(numkeys - 1); @@ -14652,7 +14648,7 @@ static int cursor_next(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, } skip: - DEBUG("==> cursor points to page %" PRIaPGNO " with %u keys, key index %u", + DEBUG("==> cursor points to page %" PRIaPGNO " with %zu keys, key index %u", mp->mp_pgno, page_numkeys(mp), mc->mc_ki[mc->mc_top]); if (!MDBX_DISABLE_VALIDATION && unlikely(!CHECK_LEAF_TYPE(mc, mp))) { @@ -14742,7 +14738,7 @@ static int cursor_prev(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, DEBUG("prev page is %" PRIaPGNO ", key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top]); } - DEBUG("==> cursor points to page %" PRIaPGNO " with %u keys, key index %u", + DEBUG("==> cursor points to page %" PRIaPGNO " with %zu keys, key index %u", mp->mp_pgno, page_numkeys(mp), mc->mc_ki[mc->mc_top]); if (!MDBX_DISABLE_VALIDATION && unlikely(!CHECK_LEAF_TYPE(mc, mp))) { @@ -14850,7 +14846,7 @@ cursor_set(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op) { goto got_node; } if (cmp > 0) { - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); if (nkeys > 1) { if (IS_LEAF2(mp)) { nodekey.iov_base = page_leaf2key(mp, nkeys - 1, nodekey.iov_len); @@ -14895,7 +14891,7 @@ cursor_set(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, MDBX_cursor_op op) { } /* If any parents have right-sibs, search. * Otherwise, there's nothing further. */ - unsigned i; + size_t i; for (i = 0; i < mc->mc_top; i++) if (mc->mc_ki[i] < page_numkeys(mc->mc_pg[i]) - 1) break; @@ -15182,7 +15178,7 @@ __hot int mdbx_cursor_get(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, mp->mp_pgno, mp->mp_flags); return MDBX_CORRUPTED; } - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); if (unlikely(mc->mc_ki[mc->mc_top] >= nkeys)) { cASSERT(mc, nkeys <= UINT16_MAX); if (mc->mc_flags & C_EOF) @@ -15408,14 +15404,14 @@ static int cursor_next_batch(MDBX_cursor *mc) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; if (unlikely(mc->mc_flags & C_EOF)) { - if ((unsigned)mc->mc_ki[mc->mc_top] + 1 >= page_numkeys(mp)) + if ((size_t)mc->mc_ki[mc->mc_top] + 1 >= page_numkeys(mp)) return MDBX_NOTFOUND; mc->mc_flags ^= C_EOF; } - int ki = mc->mc_ki[mc->mc_top]; + intptr_t ki = mc->mc_ki[mc->mc_top]; mc->mc_ki[mc->mc_top] = (indx_t)++ki; - const int numkeys = page_numkeys(mp); + const intptr_t numkeys = page_numkeys(mp); if (likely(ki >= numkeys)) { DEBUG("%s", "=====> move to next sibling page"); mc->mc_ki[mc->mc_top] = (indx_t)(numkeys - 1); @@ -15479,8 +15475,8 @@ int mdbx_cursor_get_batch(MDBX_cursor *mc, size_t *count, MDBX_val *pairs, mp->mp_pgno, mp->mp_flags); return MDBX_CORRUPTED; } - const unsigned nkeys = page_numkeys(mp); - unsigned i = mc->mc_ki[mc->mc_top], n = 0; + const size_t nkeys = page_numkeys(mp); + size_t i = mc->mc_ki[mc->mc_top], n = 0; if (unlikely(i >= nkeys)) { cASSERT(mc, op == MDBX_GET_CURRENT); cASSERT(mc, mdbx_cursor_on_last(mc) == MDBX_RESULT_TRUE); @@ -15888,7 +15884,7 @@ __hot int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, /* there's only a key anyway, so this is a no-op */ if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { char *ptr; - unsigned ksize = mc->mc_db->md_xsize; + size_t ksize = mc->mc_db->md_xsize; if (unlikely(key->iov_len != ksize)) return MDBX_BAD_VALSIZE; ptr = page_leaf2key(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize); @@ -15897,7 +15893,7 @@ __hot int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, /* if overwriting slot 0 of leaf, need to * update branch key if there is a parent page */ if (mc->mc_top && !mc->mc_ki[mc->mc_top]) { - unsigned dtop = 1; + size_t dtop = 1; mc->mc_top--; /* slot 0 is always an empty key, find real slot */ while (mc->mc_top && !mc->mc_ki[mc->mc_top]) { @@ -16012,7 +16008,7 @@ __hot int mdbx_cursor_put(MDBX_cursor *mc, const MDBX_val *key, MDBX_val *data, * fp: old sub-page or a header faking it. * mp: new (sub-)page. offset: growth in page size. * xdata: node data with new page or DB. */ - unsigned i; + size_t i; size_t offset = 0; MDBX_page *mp = fp = xdata.iov_base = env->me_pbuf; mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno; @@ -16224,7 +16220,7 @@ new_sub:; if (likely(rc == 0)) { /* Adjust other cursors pointing to mp */ const MDBX_dbi dbi = mc->mc_dbi; - const unsigned i = mc->mc_top; + const size_t i = mc->mc_top; MDBX_page *const mp = mc->mc_pg[i]; for (MDBX_cursor *m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { @@ -16279,9 +16275,9 @@ new_sub:; /* Adjust other cursors pointing to mp */ MDBX_cursor *m2; MDBX_xcursor *mx = mc->mc_xcursor; - unsigned i = mc->mc_top; + size_t i = mc->mc_top; MDBX_page *mp = mc->mc_pg[i]; - const int nkeys = page_numkeys(mp); + const intptr_t nkeys = page_numkeys(mp); for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2 = m2->mc_next) { if (m2 == mc || m2->mc_snum < mc->mc_snum) @@ -16525,20 +16521,20 @@ static pgr_t page_new_large(MDBX_cursor *mc, const unsigned npages) { } __hot static int __must_check_result node_add_leaf2(MDBX_cursor *mc, - unsigned indx, + size_t indx, const MDBX_val *key) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; DKBUF_DEBUG; - DEBUG("add to leaf2-%spage %" PRIaPGNO " index %i, " + DEBUG("add to leaf2-%spage %" PRIaPGNO " index %zi, " " key size %" PRIuPTR " [%s]", IS_SUBP(mp) ? "sub-" : "", mp->mp_pgno, indx, key ? key->iov_len : 0, DKEY_DEBUG(key)); cASSERT(mc, key); cASSERT(mc, PAGETYPE_COMPAT(mp) == (P_LEAF | P_LEAF2)); - const unsigned ksize = mc->mc_db->md_xsize; + const size_t ksize = mc->mc_db->md_xsize; cASSERT(mc, ksize == key->iov_len); - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); /* Just using these for counting */ const intptr_t lower = mp->mp_lower + sizeof(indx_t); @@ -16552,7 +16548,7 @@ __hot static int __must_check_result node_add_leaf2(MDBX_cursor *mc, char *const ptr = page_leaf2key(mp, indx, ksize); cASSERT(mc, nkeys >= indx); - const unsigned diff = nkeys - indx; + const size_t diff = nkeys - indx; if (likely(diff > 0)) /* Move higher keys up one slot. */ memmove(ptr + ksize, ptr, diff * ksize); @@ -16561,12 +16557,12 @@ __hot static int __must_check_result node_add_leaf2(MDBX_cursor *mc, return MDBX_SUCCESS; } -static int __must_check_result node_add_branch(MDBX_cursor *mc, unsigned indx, +static int __must_check_result node_add_branch(MDBX_cursor *mc, size_t indx, const MDBX_val *key, pgno_t pgno) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; DKBUF_DEBUG; - DEBUG("add to branch-%spage %" PRIaPGNO " index %i, node-pgno %" PRIaPGNO + DEBUG("add to branch-%spage %" PRIaPGNO " index %zi, node-pgno %" PRIaPGNO " key size %" PRIuPTR " [%s]", IS_SUBP(mp) ? "sub-" : "", mp->mp_pgno, indx, pgno, key ? key->iov_len : 0, DKEY_DEBUG(key)); @@ -16575,9 +16571,9 @@ static int __must_check_result node_add_branch(MDBX_cursor *mc, unsigned indx, STATIC_ASSERT(NODESIZE % 2 == 0); /* Move higher pointers up one slot. */ - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); cASSERT(mc, nkeys >= indx); - for (unsigned i = nkeys; i > indx; --i) + for (size_t i = nkeys; i > indx; --i) mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; /* Adjust free space offsets. */ @@ -16604,14 +16600,13 @@ static int __must_check_result node_add_branch(MDBX_cursor *mc, unsigned indx, return MDBX_SUCCESS; } -__hot static int __must_check_result node_add_leaf(MDBX_cursor *mc, - unsigned indx, +__hot static int __must_check_result node_add_leaf(MDBX_cursor *mc, size_t indx, const MDBX_val *key, MDBX_val *data, unsigned flags) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; DKBUF_DEBUG; - DEBUG("add to leaf-%spage %" PRIaPGNO " index %i, data size %" PRIuPTR + DEBUG("add to leaf-%spage %" PRIaPGNO " index %zi, data size %" PRIuPTR " key size %" PRIuPTR " [%s]", IS_SUBP(mp) ? "sub-" : "", mp->mp_pgno, indx, data ? data->iov_len : 0, key ? key->iov_len : 0, DKEY_DEBUG(key)); @@ -16656,9 +16651,9 @@ __hot static int __must_check_result node_add_leaf(MDBX_cursor *mc, cASSERT(mc, node_bytes == leaf_size(mc->mc_txn->mt_env, key, data)); /* Move higher pointers up one slot. */ - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); cASSERT(mc, nkeys >= indx); - for (unsigned i = nkeys; i > indx; --i) + for (size_t i = nkeys; i > indx; --i) mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; /* Adjust free space offsets. */ @@ -16703,16 +16698,16 @@ __hot static int __must_check_result node_add_leaf(MDBX_cursor *mc, * part of a MDBX_DUPFIXED database. */ __hot static void node_del(MDBX_cursor *mc, size_t ksize) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; - const unsigned hole = mc->mc_ki[mc->mc_top]; - const unsigned nkeys = page_numkeys(mp); + const size_t hole = mc->mc_ki[mc->mc_top]; + const size_t nkeys = page_numkeys(mp); - DEBUG("delete node %u on %s page %" PRIaPGNO, hole, + DEBUG("delete node %zu on %s page %" PRIaPGNO, hole, IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno); cASSERT(mc, hole < nkeys); if (IS_LEAF2(mp)) { cASSERT(mc, ksize >= sizeof(indx_t)); - unsigned diff = nkeys - 1 - hole; + size_t diff = nkeys - 1 - hole; char *base = page_leaf2key(mp, hole, ksize); if (diff) memmove(base, base + ksize, diff * ksize); @@ -16732,7 +16727,7 @@ __hot static void node_del(MDBX_cursor *mc, size_t ksize) { hole_size = EVEN(hole_size); const indx_t hole_offset = mp->mp_ptrs[hole]; - unsigned r, w; + size_t r, w; for (r = w = 0; r < nkeys; r++) if (r != hole) mp->mp_ptrs[w++] = (mp->mp_ptrs[r] < hole_offset) @@ -16759,12 +16754,12 @@ __hot static void node_del(MDBX_cursor *mc, size_t ksize) { /* Compact the main page after deleting a node on a subpage. * [in] mp The main page to operate on. * [in] indx The index of the subpage on the main page. */ -static void node_shrink(MDBX_page *mp, unsigned indx) { +static void node_shrink(MDBX_page *mp, size_t indx) { MDBX_node *node; MDBX_page *sp, *xp; char *base; size_t nsize, delta, len, ptr; - int i; + intptr_t i; node = page_node(mp, indx); sp = (MDBX_page *)node_data(node); @@ -16975,13 +16970,13 @@ static int cursor_xinit2(MDBX_cursor *mc, MDBX_xcursor *src_mx, return MDBX_SUCCESS; } -static __inline int couple_init(MDBX_cursor_couple *couple, const MDBX_dbi dbi, +static __inline int couple_init(MDBX_cursor_couple *couple, const size_t dbi, MDBX_txn *const txn, MDBX_db *const db, MDBX_dbx *const dbx, uint8_t *const dbstate) { couple->outer.mc_signature = MDBX_MC_LIVE; couple->outer.mc_next = NULL; couple->outer.mc_backup = NULL; - couple->outer.mc_dbi = dbi; + couple->outer.mc_dbi = (MDBX_dbi)dbi; couple->outer.mc_txn = txn; couple->outer.mc_db = db; couple->outer.mc_dbx = dbx; @@ -17021,7 +17016,7 @@ static __inline int couple_init(MDBX_cursor_couple *couple, const MDBX_dbi dbi, } /* Initialize a cursor for a given transaction and database. */ -static int cursor_init(MDBX_cursor *mc, MDBX_txn *txn, MDBX_dbi dbi) { +static int cursor_init(MDBX_cursor *mc, MDBX_txn *txn, size_t dbi) { STATIC_ASSERT(offsetof(MDBX_cursor_couple, outer) == 0); return couple_init(container_of(mc, MDBX_cursor_couple, outer), dbi, txn, &txn->mt_dbs[dbi], &txn->mt_dbxs[dbi], @@ -17179,7 +17174,7 @@ again: dest->mc_flags ^= (dest->mc_flags ^ src->mc_flags) & ~C_UNTRACK; dest->mc_top = src->mc_top; dest->mc_snum = src->mc_snum; - for (unsigned i = 0; i < src->mc_snum; ++i) { + for (size_t i = 0; i < src->mc_snum; ++i) { dest->mc_ki[i] = src->mc_ki[i]; dest->mc_pg[i] = src->mc_pg[i]; } @@ -17292,7 +17287,7 @@ static int update_key(MDBX_cursor *mc, const MDBX_val *key) { char *base; size_t len; ptrdiff_t delta, ksize, oksize; - int ptr, i, nkeys, indx; + intptr_t ptr, i, nkeys, indx; DKBUF_DEBUG; cASSERT(mc, cursor_is_tracked(mc)); @@ -17304,8 +17299,8 @@ static int update_key(MDBX_cursor *mc, const MDBX_val *key) { MDBX_val k2; k2.iov_base = node_key(node); k2.iov_len = node_ks(node); - DEBUG("update key %u (offset %u) [%s] to [%s] on page %" PRIaPGNO, indx, ptr, - DVAL_DEBUG(&k2), DKEY_DEBUG(key), mp->mp_pgno); + DEBUG("update key %zi (offset %zu) [%s] to [%s] on page %" PRIaPGNO, indx, + ptr, DVAL_DEBUG(&k2), DKEY_DEBUG(key), mp->mp_pgno); #endif /* MDBX_DEBUG */ /* Sizes must be 2-byte aligned. */ @@ -17379,7 +17374,7 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) { key4move.iov_base = node_key(srcnode); if (csrc->mc_ki[csrc->mc_top] == 0) { - const unsigned snum = csrc->mc_snum; + const size_t snum = csrc->mc_snum; cASSERT(csrc, snum > 0); /* must find the lowest key below src */ rc = page_search_lowest(csrc); @@ -17411,7 +17406,7 @@ static int node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, bool fromleft) { } if (cdst->mc_ki[cdst->mc_top] == 0) { - const unsigned snum = cdst->mc_snum; + const size_t snum = cdst->mc_snum; cASSERT(csrc, snum > 0); MDBX_cursor mn; cursor_copy(cdst, &mn); @@ -17682,11 +17677,11 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) { const int pagetype = PAGETYPE_WHOLE(psrc); /* Move all nodes from src to dst */ - const unsigned dst_nkeys = page_numkeys(pdst); - const unsigned src_nkeys = page_numkeys(psrc); + const size_t dst_nkeys = page_numkeys(pdst); + const size_t src_nkeys = page_numkeys(psrc); cASSERT(cdst, dst_nkeys + src_nkeys >= (IS_LEAF(psrc) ? 1u : 2u)); if (likely(src_nkeys)) { - unsigned j = dst_nkeys; + size_t j = dst_nkeys; if (unlikely(pagetype & P_LEAF2)) { /* Mark dst as dirty. */ if (unlikely(rc = page_touch(cdst))) @@ -17694,7 +17689,7 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) { key.iov_len = csrc->mc_db->md_xsize; key.iov_base = page_data(psrc); - unsigned i = 0; + size_t i = 0; do { rc = node_add_leaf2(cdst, j++, &key); if (unlikely(rc != MDBX_SUCCESS)) @@ -17738,7 +17733,7 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) { if (unlikely(rc = page_touch(cdst))) return rc; - unsigned i = 0; + size_t i = 0; while (true) { if (pagetype & P_LEAF) { MDBX_val data; @@ -17761,7 +17756,7 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) { } pdst = cdst->mc_pg[cdst->mc_top]; - DEBUG("dst page %" PRIaPGNO " now has %u keys (%.1f%% filled)", + DEBUG("dst page %" PRIaPGNO " now has %zu keys (%.1f%% filled)", pdst->mp_pgno, page_numkeys(pdst), page_fill(cdst->mc_txn->mt_env, pdst)); @@ -17789,7 +17784,7 @@ static int page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) { /* Adjust other cursors pointing to mp */ MDBX_cursor *m2, *m3; const MDBX_dbi dbi = csrc->mc_dbi; - const unsigned top = csrc->mc_top; + const size_t top = csrc->mc_top; for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { m3 = (csrc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; @@ -17914,7 +17909,7 @@ static void cursor_restore(const MDBX_cursor *csrc, MDBX_cursor *cdst) { cdst->mc_flags = csrc->mc_flags; cdst->mc_checking = csrc->mc_checking; - for (unsigned i = 0; i < csrc->mc_snum; i++) { + for (size_t i = 0; i < csrc->mc_snum; i++) { cdst->mc_pg[i] = csrc->mc_pg[i]; cdst->mc_ki[i] = csrc->mc_ki[i]; } @@ -17948,30 +17943,30 @@ static int rebalance(MDBX_cursor *mc) { const int pagetype = PAGETYPE_WHOLE(mc->mc_pg[mc->mc_top]); STATIC_ASSERT(P_BRANCH == 1); - const unsigned minkeys = (pagetype & P_BRANCH) + 1; + const size_t minkeys = (pagetype & P_BRANCH) + 1; /* Pages emptier than this are candidates for merging. */ - unsigned room_threshold = likely(mc->mc_dbi != FREE_DBI) - ? mc->mc_txn->mt_env->me_merge_threshold - : mc->mc_txn->mt_env->me_merge_threshold_gc; + size_t room_threshold = likely(mc->mc_dbi != FREE_DBI) + ? mc->mc_txn->mt_env->me_merge_threshold + : mc->mc_txn->mt_env->me_merge_threshold_gc; const MDBX_page *const tp = mc->mc_pg[mc->mc_top]; - const unsigned numkeys = page_numkeys(tp); - const unsigned room = page_room(tp); + const size_t numkeys = page_numkeys(tp); + const size_t room = page_room(tp); DEBUG("rebalancing %s page %" PRIaPGNO - " (has %u keys, full %.1f%%, used %u, room %u bytes )", + " (has %zu keys, full %.1f%%, used %zu, room %zu bytes )", (pagetype & P_LEAF) ? "leaf" : "branch", tp->mp_pgno, numkeys, page_fill(mc->mc_txn->mt_env, tp), page_used(mc->mc_txn->mt_env, tp), room); if (unlikely(numkeys < minkeys)) { - DEBUG("page %" PRIaPGNO " must be merged due keys < %u threshold", + DEBUG("page %" PRIaPGNO " must be merged due keys < %zu threshold", tp->mp_pgno, minkeys); } else if (unlikely(room > room_threshold)) { - DEBUG("page %" PRIaPGNO " should be merged due room %u > %u threshold", + DEBUG("page %" PRIaPGNO " should be merged due room %zu > %zu threshold", tp->mp_pgno, room, room_threshold); } else { - DEBUG("no need to rebalance page %" PRIaPGNO ", room %u < %u threshold", + DEBUG("no need to rebalance page %" PRIaPGNO ", room %zu < %zu threshold", tp->mp_pgno, room, room_threshold); cASSERT(mc, mc->mc_db->md_entries > 0); return MDBX_SUCCESS; @@ -17980,7 +17975,7 @@ static int rebalance(MDBX_cursor *mc) { int rc; if (mc->mc_snum < 2) { MDBX_page *const mp = mc->mc_pg[0]; - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); cASSERT(mc, (mc->mc_db->md_entries == 0) == (nkeys == 0)); if (IS_SUBP(mp)) { DEBUG("%s", "Can't rebalance a subpage, ignoring"); @@ -18062,7 +18057,7 @@ static int rebalance(MDBX_cursor *mc) { /* The parent (branch page) must have at least 2 pointers, * otherwise the tree is invalid. */ - const unsigned pre_top = mc->mc_top - 1; + const size_t pre_top = mc->mc_top - 1; cASSERT(mc, IS_BRANCH(mc->mc_pg[pre_top])); cASSERT(mc, !IS_SUBP(mc->mc_pg[0])); cASSERT(mc, page_numkeys(mc->mc_pg[pre_top]) > 1); @@ -18094,14 +18089,14 @@ static int rebalance(MDBX_cursor *mc) { } cASSERT(mc, left || right); - const unsigned ki_top = mc->mc_ki[mc->mc_top]; - const unsigned ki_pre_top = mn.mc_ki[pre_top]; - const unsigned nkeys = page_numkeys(mn.mc_pg[mn.mc_top]); + const size_t ki_top = mc->mc_ki[mc->mc_top]; + const size_t ki_pre_top = mn.mc_ki[pre_top]; + const size_t nkeys = page_numkeys(mn.mc_pg[mn.mc_top]); - const unsigned left_room = left ? page_room(left) : 0; - const unsigned right_room = right ? page_room(right) : 0; - const unsigned left_nkeys = left ? page_numkeys(left) : 0; - const unsigned right_nkeys = right ? page_numkeys(right) : 0; + const size_t left_room = left ? page_room(left) : 0; + const size_t right_room = right ? page_room(right) : 0; + const size_t left_nkeys = left ? page_numkeys(left) : 0; + const size_t right_nkeys = right ? page_numkeys(right) : 0; retry: if (left_room > room_threshold && left_room >= right_room) { /* try merge with left */ @@ -18110,7 +18105,7 @@ retry: mn.mc_ki[mn.mc_top - 1] = (indx_t)(ki_pre_top - 1); mn.mc_ki[mn.mc_top] = (indx_t)(left_nkeys - 1); mc->mc_ki[mc->mc_top] = 0; - const unsigned new_ki = ki_top + left_nkeys; + const size_t new_ki = ki_top + left_nkeys; mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1; /* We want rebalance to find mn when doing fixups */ WITH_CURSOR_TRACKING(mn, rc = page_merge(mc, &mn)); @@ -18176,7 +18171,7 @@ retry: goto retry; } ERROR("Unable to merge/rebalance %s page %" PRIaPGNO - " (has %u keys, full %.1f%%, used %u, room %u bytes )", + " (has %zu keys, full %.1f%%, used %zu, room %zu bytes )", (pagetype & P_LEAF) ? "leaf" : "branch", tp->mp_pgno, numkeys, page_fill(mc->mc_txn->mt_env, tp), page_used(mc->mc_txn->mt_env, tp), room); @@ -18259,24 +18254,24 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { if (unlikely(mp->mp_upper < mp->mp_lower || ((mp->mp_lower | mp->mp_upper) & 1) || PAGEHDRSZ + mp->mp_upper > env->me_psize)) - rc = bad_page(mp, "invalid page lower(%u)/upper(%u) with limit %u\n", + rc = bad_page(mp, "invalid page lower(%u)/upper(%u) with limit %zu\n", mp->mp_lower, mp->mp_upper, page_space(env)); char *const end_of_page = (char *)mp + env->me_psize; - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); STATIC_ASSERT(P_BRANCH == 1); if (unlikely(nkeys <= (uint8_t)(mp->mp_flags & P_BRANCH))) { if ((!(mc->mc_flags & C_SUB) || mc->mc_db->md_entries) && (!(mc->mc_checking & CC_UPDATING) || !(IS_MODIFIABLE(mc->mc_txn, mp) || (mp->mp_flags & P_SUBP)))) rc = - bad_page(mp, "%s-page nkeys (%u) < %u\n", + bad_page(mp, "%s-page nkeys (%zu) < %u\n", IS_BRANCH(mp) ? "branch" : "leaf", nkeys, 1 + IS_BRANCH(mp)); } if (!IS_LEAF2(mp) && unlikely(PAGEHDRSZ + mp->mp_upper + nkeys * sizeof(MDBX_node) + nkeys - 1 > env->me_psize)) - rc = bad_page(mp, "invalid page upper (%u) for nkeys %u with limit %u\n", + rc = bad_page(mp, "invalid page upper (%u) for nkeys %zu with limit %zu\n", mp->mp_upper, nkeys, page_space(env)); const size_t ksize_max = keysize_max(env->me_psize, 0); @@ -18291,7 +18286,7 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { } MDBX_val here, prev = {0, 0}; - for (unsigned i = 0; i < nkeys; ++i) { + for (size_t i = 0; i < nkeys; ++i) { if (IS_LEAF2(mp)) { char *const key = page_leaf2key(mp, i, leaf2_ksize); if (unlikely(end_of_page < key + leaf2_ksize)) { @@ -18313,7 +18308,7 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { here.iov_len = leaf2_ksize; here.iov_base = key; if (prev.iov_base && unlikely(mc->mc_dbx->md_cmp(&prev, &here) >= 0)) - rc = bad_page(mp, "leaf2-key #%u wrong order (%s >= %s)\n", i, + rc = bad_page(mp, "leaf2-key #%zu wrong order (%s >= %s)\n", i, DKEY(&prev), DVAL(&here)); prev = here; } @@ -18321,16 +18316,16 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { const MDBX_node *const node = page_node(mp, i); const char *node_end = (char *)node + NODESIZE; if (unlikely(node_end > end_of_page)) { - rc = bad_page(mp, "node[%u] (%zu) beyond page-end\n", i, + rc = bad_page(mp, "node[%zu] (%zu) beyond page-end\n", i, node_end - end_of_page); continue; } const size_t ksize = node_ks(node); if (unlikely(ksize > ksize_max)) - rc = bad_page(mp, "node[%u] too long key (%zu)\n", i, ksize); + rc = bad_page(mp, "node[%zu] too long key (%zu)\n", i, ksize); char *key = node_key(node); if (unlikely(end_of_page < key + ksize)) { - rc = bad_page(mp, "node[%u] key (%zu) beyond page-end\n", i, + rc = bad_page(mp, "node[%zu] key (%zu) beyond page-end\n", i, key + ksize - end_of_page); continue; } @@ -18338,13 +18333,13 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { if (unlikely(ksize < mc->mc_dbx->md_klen_min || ksize > mc->mc_dbx->md_klen_max)) rc = bad_page( - mp, "node[%u] key size (%zu) <> min/max key-length (%zu/%zu)\n", + mp, "node[%zu] key size (%zu) <> min/max key-length (%zu/%zu)\n", i, ksize, mc->mc_dbx->md_klen_min, mc->mc_dbx->md_klen_max); if ((mc->mc_checking & CC_SKIPORD) == 0) { here.iov_base = key; here.iov_len = ksize; if (prev.iov_base && unlikely(mc->mc_dbx->md_cmp(&prev, &here) >= 0)) - rc = bad_page(mp, "node[%u] key wrong order (%s >= %s)\n", i, + rc = bad_page(mp, "node[%zu] key wrong order (%s >= %s)\n", i, DKEY(&prev), DVAL(&here)); prev = here; } @@ -18352,23 +18347,24 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { if (IS_BRANCH(mp)) { if ((mc->mc_checking & CC_UPDATING) == 0 && i == 0 && unlikely(ksize != 0)) - rc = bad_page(mp, "branch-node[%u] wrong 0-node key-length (%zu)\n", + rc = bad_page(mp, "branch-node[%zu] wrong 0-node key-length (%zu)\n", i, ksize); const pgno_t ref = node_pgno(node); if (unlikely(ref < MIN_PAGENO) || (unlikely(ref >= mc->mc_txn->mt_next_pgno) && (unlikely(ref >= mc->mc_txn->mt_geo.now) || !(mc->mc_checking & CC_RETIRING)))) - rc = bad_page(mp, "branch-node[%u] wrong pgno (%u)\n", i, ref); + rc = bad_page(mp, "branch-node[%zu] wrong pgno (%u)\n", i, ref); if (unlikely(node_flags(node))) - rc = bad_page(mp, "branch-node[%u] wrong flags (%u)\n", i, + rc = bad_page(mp, "branch-node[%zu] wrong flags (%u)\n", i, node_flags(node)); continue; } switch (node_flags(node)) { default: - rc = bad_page(mp, "invalid node[%u] flags (%u)\n", i, node_flags(node)); + rc = + bad_page(mp, "invalid node[%zu] flags (%u)\n", i, node_flags(node)); break; case F_BIGDATA /* data on large-page */: case 0 /* usual */: @@ -18383,7 +18379,7 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { if (node_flags(node) & F_BIGDATA) { if (unlikely(end_of_page < data + sizeof(pgno_t))) { rc = bad_page( - mp, "node-%s(%u of %u, %zu bytes) beyond (%zu) page-end\n", + mp, "node-%s(%zu of %zu, %zu bytes) beyond (%zu) page-end\n", "bigdata-pgno", i, nkeys, dsize, data + dsize - end_of_page); continue; } @@ -18419,9 +18415,9 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { } if (unlikely(end_of_page < data + dsize)) { - rc = - bad_page(mp, "node-%s(%u of %u, %zu bytes) beyond (%zu) page-end\n", - "data", i, nkeys, dsize, data + dsize - end_of_page); + rc = bad_page(mp, + "node-%s(%zu of %zu, %zu bytes) beyond (%zu) page-end\n", + "data", i, nkeys, dsize, data + dsize - end_of_page); continue; } @@ -18469,7 +18465,7 @@ __cold static int page_check(MDBX_cursor *const mc, const MDBX_page *const mp) { } const char *const end_of_subpage = data + dsize; - const int nsubkeys = page_numkeys(sp); + const intptr_t nsubkeys = page_numkeys(sp); if (unlikely(nsubkeys == 0) && !(mc->mc_checking & CC_UPDATING) && mc->mc_db->md_entries) rc = bad_page(mp, "no keys on a %s-page\n", @@ -18577,7 +18573,7 @@ __cold static int cursor_check(MDBX_cursor *mc) { for (int n = 0; n < (int)mc->mc_snum; ++n) { MDBX_page *mp = mc->mc_pg[n]; - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); const bool expect_branch = (n < mc->mc_db->md_depth - 1) ? true : false; const bool expect_nested_leaf = (n + 1 == mc->mc_db->md_depth - 1) ? true : false; @@ -18602,7 +18598,7 @@ __cold static int cursor_check(MDBX_cursor *mc) { if (unlikely(err != MDBX_SUCCESS)) return err; - for (unsigned i = 0; i < nkeys; ++i) { + for (size_t i = 0; i < nkeys; ++i) { if (branch) { MDBX_node *node = page_node(mp, i); cASSERT(mc, node_flags(node) == 0); @@ -18640,7 +18636,7 @@ static int cursor_del(MDBX_cursor *mc) { int rc; MDBX_page *mp; indx_t ki; - unsigned nkeys; + size_t nkeys; MDBX_dbi dbi = mc->mc_dbi; cASSERT(mc, cursor_is_tracked(mc)); @@ -18826,25 +18822,25 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, const unsigned naf) { unsigned flags; int rc = MDBX_SUCCESS, foliage = 0; - unsigned i, ptop; + size_t i, ptop; MDBX_env *const env = mc->mc_txn->mt_env; MDBX_val sepkey, rkey, xdata; MDBX_page *tmp_ki_copy = NULL; DKBUF; MDBX_page *const mp = mc->mc_pg[mc->mc_top]; - const unsigned newindx = mc->mc_ki[mc->mc_top]; - unsigned nkeys = page_numkeys(mp); + const size_t newindx = mc->mc_ki[mc->mc_top]; + size_t nkeys = page_numkeys(mp); if (AUDIT_ENABLED()) { rc = cursor_check_updating(mc); if (unlikely(rc != MDBX_SUCCESS)) return rc; } STATIC_ASSERT(P_BRANCH == 1); - const unsigned minkeys = (mp->mp_flags & P_BRANCH) + 1; + const size_t minkeys = (mp->mp_flags & P_BRANCH) + 1; DEBUG(">> splitting %s-page %" PRIaPGNO - " and adding %zu+%zu [%s] at %i, nkeys %i", + " and adding %zu+%zu [%s] at %i, nkeys %zi", IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno, newkey->iov_len, newdata ? newdata->iov_len : 0, DKEY_DEBUG(newkey), mc->mc_ki[mc->mc_top], nkeys); @@ -18913,7 +18909,7 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, mn.mc_ki[mn.mc_top] = 0; mn.mc_ki[ptop] = mc->mc_ki[ptop] + 1; - unsigned split_indx = + size_t split_indx = (newindx < nkeys) ? /* split at the middle */ (nkeys + 1) >> 1 : /* split at the end (i.e. like append-mode ) */ nkeys - minkeys + 1; @@ -18965,9 +18961,9 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, } else { if (IS_LEAF2(sister)) { char *split, *ins; - unsigned lsize, rsize, ksize; + size_t lsize, rsize, ksize; /* Move half of the keys to the right sibling */ - const int distance = mc->mc_ki[mc->mc_top] - split_indx; + const intptr_t distance = mc->mc_ki[mc->mc_top] - split_indx; ksize = mc->mc_db->md_xsize; split = page_leaf2key(mp, split_indx, ksize); rsize = (nkeys - split_indx) * ksize; @@ -19022,7 +19018,7 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, goto done; } - const unsigned max_space = page_space(env); + const size_t max_space = page_space(env); const size_t new_size = IS_LEAF(mp) ? leaf_size(env, newkey, newdata) : branch_size(env, newkey); @@ -19058,18 +19054,18 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, split_indx += mp->mp_flags & P_BRANCH; } eASSERT(env, split_indx >= minkeys && split_indx <= nkeys + 1 - minkeys); - const unsigned dim_nodes = + const size_t dim_nodes = (newindx >= split_indx) ? split_indx : nkeys - split_indx; - const unsigned dim_used = (sizeof(indx_t) + NODESIZE + 1) * dim_nodes; + const size_t dim_used = (sizeof(indx_t) + NODESIZE + 1) * dim_nodes; if (new_size >= dim_used) { /* Search for best acceptable split point */ i = (newindx < split_indx) ? 0 : nkeys; - int dir = (newindx < split_indx) ? 1 : -1; + intptr_t dir = (newindx < split_indx) ? 1 : -1; size_t before = 0, after = new_size + page_used(env, mp); - unsigned best_split = split_indx; - unsigned best_shift = INT_MAX; + size_t best_split = split_indx; + size_t best_shift = INT_MAX; - TRACE("seek separator from %u, step %i, default %u, new-idx %u, " + TRACE("seek separator from %zu, step %zi, default %zu, new-idx %zu, " "new-size %zu", i, dir, split_indx, newindx, new_size); do { @@ -19087,13 +19083,13 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, before += size; after -= size; - TRACE("step %u, size %zu, before %zu, after %zu, max %u", i, size, + TRACE("step %zu, size %zu, before %zu, after %zu, max %zu", i, size, before, after, max_space); if (before <= max_space && after <= max_space) { - const unsigned split = i + (dir > 0); + const size_t split = i + (dir > 0); if (split >= minkeys && split <= nkeys + 1 - minkeys) { - const unsigned shift = branchless_abs(split_indx - split); + const size_t shift = branchless_abs(split_indx - split); if (shift >= best_shift) break; best_shift = shift; @@ -19106,7 +19102,7 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, } while (i < nkeys); split_indx = best_split; - TRACE("chosen %u", split_indx); + TRACE("chosen %zu", split_indx); } eASSERT(env, split_indx >= minkeys && split_indx <= nkeys + 1 - minkeys); @@ -19120,7 +19116,7 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, } } } - DEBUG("separator is %d [%s]", split_indx, DKEY_DEBUG(&sepkey)); + DEBUG("separator is %zd [%s]", split_indx, DKEY_DEBUG(&sepkey)); bool did_split_parent = false; /* Copy separator key to the parent. */ @@ -19256,9 +19252,9 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, /* Move nodes */ mc->mc_pg[mc->mc_top] = sister; i = split_indx; - unsigned n = 0; + size_t n = 0; do { - TRACE("i %u, nkeys %u => n %u, rp #%u", i, nkeys, n, sister->mp_pgno); + TRACE("i %zu, nkeys %zu => n %zu, rp #%u", i, nkeys, n, sister->mp_pgno); pgno_t pgno = 0; MDBX_val *rdata = NULL; if (i == newindx) { @@ -19315,7 +19311,7 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, } } while (i != split_indx); - TRACE("i %u, nkeys %u, n %u, pgno #%u", i, nkeys, n, + TRACE("i %zu, nkeys %zu, n %zu, pgno #%u", i, nkeys, n, mc->mc_pg[mc->mc_top]->mp_pgno); nkeys = page_numkeys(tmp_ki_copy); @@ -19398,7 +19394,7 @@ static int page_split(MDBX_cursor *mc, const MDBX_val *const newkey, if (XCURSOR_INITED(m3) && IS_LEAF(mp)) XCURSOR_REFRESH(m3, m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); } - TRACE("mp #%u left: %d, sister #%u left: %d", mp->mp_pgno, page_room(mp), + TRACE("mp #%u left: %zd, sister #%u left: %zd", mp->mp_pgno, page_room(mp), sister->mp_pgno, page_room(sister)); done: @@ -19566,7 +19562,7 @@ static int compacting_put_bytes(mdbx_compacting_ctx *ctx, const void *src, size_t bytes, pgno_t pgno, pgno_t npages) { assert(pgno == 0 || bytes > PAGEHDRSZ); while (bytes > 0) { - const unsigned side = ctx->mc_head & 1; + const size_t side = ctx->mc_head & 1; const size_t left = (size_t)MDBX_ENVCOPY_WRITEBUF - ctx->mc_wlen[side]; if (left < (pgno ? PAGEHDRSZ : 1)) { int err = compacting_toggle_write_buffers(ctx); @@ -19645,7 +19641,7 @@ __cold static int compacting_walk_tree(mdbx_compacting_ctx *ctx, return MDBX_ENOMEM; char *ptr = buf; - for (unsigned i = 0; i < mc->mc_top; i++) { + for (size_t i = 0; i < mc->mc_top; i++) { page_copy((MDBX_page *)ptr, mc->mc_pg[i], ctx->mc_env->me_psize); mc->mc_pg[i] = (MDBX_page *)ptr; ptr += ctx->mc_env->me_psize; @@ -19655,12 +19651,12 @@ __cold static int compacting_walk_tree(mdbx_compacting_ctx *ctx, while (mc->mc_snum > 0) { MDBX_page *mp = mc->mc_pg[mc->mc_top]; - unsigned n = page_numkeys(mp); + size_t n = page_numkeys(mp); if (IS_LEAF(mp)) { if (!(mc->mc_flags & C_SUB) /* may have nested F_SUBDATA or F_BIGDATA nodes */) { - for (unsigned i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { MDBX_node *node = page_node(mp, i); if (node_flags(node) == F_BIGDATA) { /* Need writable leaf */ @@ -19884,7 +19880,7 @@ __cold static int env_compact(MDBX_env *env, MDBX_txn *read_txn, data.iov_len < MDBX_PNL_SIZEOF(pnl) || !(pnl_check(pnl, read_txn->mt_next_pgno)))) return MDBX_CORRUPTED; - gc += MDBX_PNL_SIZE(pnl); + gc += MDBX_PNL_GETSIZE(pnl); } if (unlikely(rc != MDBX_NOTFOUND)) return rc; @@ -20423,7 +20419,7 @@ __cold static int stat_acc(const MDBX_txn *txn, MDBX_stat *st, size_t bytes) { err = page_search(&cx.outer, NULL, MDBX_PS_FIRST); while (err == MDBX_SUCCESS) { const MDBX_page *mp = cx.outer.mc_pg[cx.outer.mc_top]; - for (unsigned i = 0; i < page_numkeys(mp); i++) { + for (size_t i = 0; i < page_numkeys(mp); i++) { const MDBX_node *node = page_node(mp, i); if (node_flags(node) != F_SUBDATA) continue; @@ -20690,7 +20686,7 @@ __cold static int fetch_envinfo_ex(const MDBX_env *env, const MDBX_txn *txn, arg->mi_self_latter_reader_txnid = arg->mi_latter_reader_txnid = arg->mi_recent_txnid; if (env->me_lck_mmap.lck) { - for (unsigned i = 0; i < arg->mi_numreaders; ++i) { + for (size_t i = 0; i < arg->mi_numreaders; ++i) { const uint32_t pid = atomic_load32(&lck->mti_readers[i].mr_pid, mo_AcquireRelease); if (pid) { @@ -21090,11 +21086,11 @@ static int dbi_close_locked(MDBX_env *env, MDBX_dbi dbi) { osal_free(ptr); if (env->me_numdbs == dbi + 1) { - unsigned i = env->me_numdbs; + size_t i = env->me_numdbs; do --i; while (i > CORE_DBS && !env->me_dbxs[i - 1].md_name.iov_base); - env->me_numdbs = i; + env->me_numdbs = (MDBX_dbi)i; } return MDBX_SUCCESS; @@ -21165,10 +21161,10 @@ static int drop_tree(MDBX_cursor *mc, const bool may_have_subDBs) { cursor_copy(mc, &mx); while (mc->mc_snum > 0) { MDBX_page *const mp = mc->mc_pg[mc->mc_top]; - const unsigned nkeys = page_numkeys(mp); + const size_t nkeys = page_numkeys(mp); if (IS_LEAF(mp)) { cASSERT(mc, mc->mc_snum == mc->mc_db->md_depth); - for (unsigned i = 0; i < nkeys; i++) { + for (size_t i = 0; i < nkeys; i++) { MDBX_node *node = page_node(mp, i); if (node_flags(node) & F_BIGDATA) { rc = page_retire_ex(mc, node_largedata_pgno(node), nullptr, 0); @@ -21195,7 +21191,7 @@ static int drop_tree(MDBX_cursor *mc, const bool may_have_subDBs) { const unsigned pagetype = (IS_FROZEN(txn, mp) ? P_FROZEN : 0) + ((mc->mc_snum + 1 == mc->mc_db->md_depth) ? P_LEAF : P_BRANCH); - for (unsigned i = 0; i < nkeys; i++) { + for (size_t i = 0; i < nkeys; i++) { MDBX_node *node = page_node(mp, i); tASSERT(txn, (node_flags(node) & (F_BIGDATA | F_SUBDATA | F_DUPDATA)) == 0); @@ -21219,7 +21215,7 @@ static int drop_tree(MDBX_cursor *mc, const bool may_have_subDBs) { pop: cursor_pop(mc); mc->mc_ki[0] = 0; - for (unsigned i = 1; i < mc->mc_snum; i++) { + for (size_t i = 1; i < mc->mc_snum; i++) { mc->mc_ki[i] = 0; mc->mc_pg[i] = mx.mc_pg[i]; } @@ -21327,9 +21323,9 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func, int serial = 0; MDBX_lockinfo *const lck = env->me_lck_mmap.lck; if (likely(lck)) { - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); - for (unsigned i = 0; i < snap_nreaders; i++) { + for (size_t i = 0; i < snap_nreaders; i++) { const MDBX_reader *r = lck->mti_readers + i; retry_reader:; const uint32_t pid = atomic_load32(&r->mr_pid, mo_AcquireRelease); @@ -21377,8 +21373,8 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func, reader_pages_retired)) : 0; } - rc = func(ctx, ++serial, i, pid, (mdbx_tid_t)tid, txnid, lag, bytes_used, - bytes_retained); + rc = func(ctx, ++serial, (unsigned)i, pid, (mdbx_tid_t)tid, txnid, lag, + bytes_used, bytes_retained); if (unlikely(rc != MDBX_SUCCESS)) break; } @@ -21391,13 +21387,13 @@ __cold int mdbx_reader_list(const MDBX_env *env, MDBX_reader_list_func *func, * return -1 if already present. */ __cold static bool pid_insert(uint32_t *ids, uint32_t pid) { /* binary search of pid in list */ - unsigned base = 0; - unsigned cursor = 1; + size_t base = 0; + size_t cursor = 1; int val = 0; - unsigned n = ids[0]; + size_t n = ids[0]; while (n > 0) { - unsigned pivot = n >> 1; + size_t pivot = n >> 1; cursor = base + pivot + 1; val = pid - ids[cursor]; @@ -21447,7 +21443,7 @@ __cold MDBX_INTERNAL_FUNC int cleanup_dead_readers(MDBX_env *env, return MDBX_SUCCESS; } - const unsigned snap_nreaders = + const size_t snap_nreaders = atomic_load32(&lck->mti_numreaders, mo_AcquireRelease); uint32_t pidsbuf_onstask[142]; uint32_t *const pids = @@ -21459,7 +21455,7 @@ __cold MDBX_INTERNAL_FUNC int cleanup_dead_readers(MDBX_env *env, pids[0] = 0; int count = 0; - for (unsigned i = 0; i < snap_nreaders; i++) { + for (size_t i = 0; i < snap_nreaders; i++) { const uint32_t pid = atomic_load32(&lck->mti_readers[i].mr_pid, mo_AcquireRelease); if (pid == 0) @@ -21508,7 +21504,7 @@ __cold MDBX_INTERNAL_FUNC int cleanup_dead_readers(MDBX_env *env, } /* clean it */ - for (unsigned j = i; j < snap_nreaders; j++) { + for (size_t j = i; j < snap_nreaders; j++) { if (lck->mti_readers[j].mr_pid.weak == pid) { DEBUG("clear stale reader pid %" PRIuPTR " txn %" PRIaTXN, (size_t)pid, lck->mti_readers[j].mr_txnid.weak); @@ -21584,7 +21580,7 @@ __cold static txnid_t kick_longlived_readers(MDBX_env *env, MDBX_reader *stucked = nullptr; uint64_t hold_retired = 0; - for (unsigned i = 0; i < lck->mti_numreaders.weak; ++i) { + for (size_t i = 0; i < lck->mti_numreaders.weak; ++i) { const uint64_t snap_retired = atomic_load64( &lck->mti_readers[i].mr_snapshot_pages_retired, mo_Relaxed); const txnid_t rtxn = safe64_read(&lck->mti_readers[i].mr_txnid); @@ -21741,7 +21737,7 @@ __cold static int walk_tree(mdbx_walk_ctx_t *ctx, const pgno_t pgno, int err = page_get(ctx->mw_cursor, pgno, &mp, parent_txnid); MDBX_page_type_t type = walk_page_type(mp); - const unsigned nentries = mp ? page_numkeys(mp) : 0; + const size_t nentries = mp ? page_numkeys(mp) : 0; unsigned npages = 1; size_t pagesize = pgno2bytes(ctx->mw_txn->mt_env, npages); size_t header_size = @@ -21751,7 +21747,7 @@ __cold static int walk_tree(mdbx_walk_ctx_t *ctx, const pgno_t pgno, (mp ? page_room(mp) : pagesize - header_size) - payload_size; size_t align_bytes = 0; - for (unsigned i = 0; err == MDBX_SUCCESS && i < nentries; + for (size_t i = 0; err == MDBX_SUCCESS && i < nentries; align_bytes += ((payload_size + align_bytes) & 1), ++i) { if (type == MDBX_page_dupfixed_leaf) { /* LEAF2 pages have no mp_ptrs[] or node headers */ @@ -21822,7 +21818,7 @@ __cold static int walk_tree(mdbx_walk_ctx_t *ctx, const pgno_t pgno, } MDBX_page *sp = node_data(node); - const unsigned nsubkeys = page_numkeys(sp); + const size_t nsubkeys = page_numkeys(sp); size_t subheader_size = IS_LEAF2(sp) ? PAGEHDRSZ : PAGEHDRSZ + sp->mp_lower; size_t subunused_size = page_room(sp); @@ -21843,7 +21839,7 @@ __cold static int walk_tree(mdbx_walk_ctx_t *ctx, const pgno_t pgno, err = MDBX_CORRUPTED; } - for (unsigned j = 0; err == MDBX_SUCCESS && j < nsubkeys; + for (size_t j = 0; err == MDBX_SUCCESS && j < nsubkeys; subalign_bytes += ((subpayload_size + subalign_bytes) & 1), ++j) { if (subtype == MDBX_subpage_dupfixed_leaf) { @@ -21884,7 +21880,7 @@ __cold static int walk_tree(mdbx_walk_ctx_t *ctx, const pgno_t pgno, if (unlikely(rc != MDBX_SUCCESS)) return (rc == MDBX_RESULT_TRUE) ? MDBX_SUCCESS : rc; - for (unsigned i = 0; err == MDBX_SUCCESS && i < nentries; ++i) { + for (size_t i = 0; err == MDBX_SUCCESS && i < nentries; ++i) { if (type == MDBX_page_dupfixed_leaf) continue; @@ -22050,7 +22046,7 @@ int mdbx_cursor_on_first(const MDBX_cursor *mc) { if (!(mc->mc_flags & C_INITIALIZED)) return mc->mc_db->md_entries ? MDBX_RESULT_FALSE : MDBX_RESULT_TRUE; - for (unsigned i = 0; i < mc->mc_snum; ++i) { + for (size_t i = 0; i < mc->mc_snum; ++i) { if (mc->mc_ki[i]) return MDBX_RESULT_FALSE; } @@ -22069,8 +22065,8 @@ int mdbx_cursor_on_last(const MDBX_cursor *mc) { if (!(mc->mc_flags & C_INITIALIZED)) return mc->mc_db->md_entries ? MDBX_RESULT_FALSE : MDBX_RESULT_TRUE; - for (unsigned i = 0; i < mc->mc_snum; ++i) { - unsigned nkeys = page_numkeys(mc->mc_pg[i]); + for (size_t i = 0; i < mc->mc_snum; ++i) { + size_t nkeys = page_numkeys(mc->mc_pg[i]); if (mc->mc_ki[i] < nkeys - 1) return MDBX_RESULT_FALSE; } @@ -22097,8 +22093,8 @@ int mdbx_cursor_eof(const MDBX_cursor *mc) { struct diff_result { ptrdiff_t diff; - unsigned level; - int root_nkeys; + size_t level; + ptrdiff_t root_nkeys; }; /* calculates: r = x - y */ @@ -22132,18 +22128,18 @@ __hot static int cursor_diff(const MDBX_cursor *const __restrict x, while (likely(r->level < y->mc_snum && r->level < x->mc_snum)) { if (unlikely(y->mc_pg[r->level] != x->mc_pg[r->level])) { - ERROR("Mismatch cursors's pages at %u level", r->level); + ERROR("Mismatch cursors's pages at %zu level", r->level); return MDBX_PROBLEM; } - int nkeys = page_numkeys(y->mc_pg[r->level]); + intptr_t nkeys = page_numkeys(y->mc_pg[r->level]); assert(nkeys > 0); if (r->level == 0) r->root_nkeys = nkeys; - const int limit_ki = nkeys - 1; - const int x_ki = x->mc_ki[r->level]; - const int y_ki = y->mc_ki[r->level]; + const intptr_t limit_ki = nkeys - 1; + const intptr_t x_ki = x->mc_ki[r->level]; + const intptr_t y_ki = y->mc_ki[r->level]; r->diff = ((x_ki < limit_ki) ? x_ki : limit_ki) - ((y_ki < limit_ki) ? y_ki : limit_ki); if (r->diff == 0) { @@ -23312,7 +23308,7 @@ __cold void global_ctor(void) { bootid = osal_bootid(); #if MDBX_DEBUG - for (unsigned i = 0; i < 2 * 2 * 2 * 3 * 3 * 3; ++i) { + for (size_t i = 0; i < 2 * 2 * 2 * 3 * 3 * 3; ++i) { const bool s0 = (i >> 0) & 1; const bool s1 = (i >> 1) & 1; const bool s2 = (i >> 2) & 1; @@ -23359,9 +23355,9 @@ __cold void global_ctor(void) { #endif /* MDBX_DEBUG*/ #if 0 /* debug */ - for (unsigned i = 0; i < 65536; ++i) { + for (size_t i = 0; i < 65536; ++i) { size_t pages = pv2pages(i); - unsigned x = pages2pv(pages); + size_t x = pages2pv(pages); size_t xp = pv2pages(x); if (!(x == i || (x % 2 == 0 && x < 65536)) || pages != xp) printf("%u => %zu => %u => %zu\n", i, pages, x, xp); diff --git a/src/internals.h b/src/internals.h index 05f7393f..061e5cba 100644 --- a/src/internals.h +++ b/src/internals.h @@ -570,7 +570,7 @@ typedef struct MDBX_page { : PAGETYPE_WHOLE(p)) /* Size of the page header, excluding dynamic data at the end */ -#define PAGEHDRSZ ((unsigned)offsetof(MDBX_page, mp_ptrs)) +#define PAGEHDRSZ offsetof(MDBX_page, mp_ptrs) #pragma pack(pop) @@ -860,7 +860,7 @@ typedef struct MDBX_dp { MDBX_page *ptr; pgno_t pgno; union { - unsigned extra; + uint32_t extra; __anonymous_struct_extension__ struct { unsigned multi : 1; unsigned lru : 31; @@ -870,10 +870,10 @@ typedef struct MDBX_dp { /* An DPL (dirty-page list) is a sorted array of MDBX_DPs. */ typedef struct MDBX_dpl { - unsigned sorted; - unsigned length; - unsigned pages_including_loose; /* number of pages, but not an entries. */ - unsigned detent; /* allocated size excluding the MDBX_DPL_RESERVE_GAP */ + size_t sorted; + size_t length; + size_t pages_including_loose; /* number of pages, but not an entries. */ + size_t detent; /* allocated size excluding the MDBX_DPL_RESERVE_GAP */ #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ (!defined(__cplusplus) && defined(_MSC_VER)) MDBX_dp items[] /* dynamic size with holes at zero and after the last */; @@ -892,11 +892,17 @@ typedef struct MDBX_dpl { ((1u << 17) - 2 - MDBX_ASSUME_MALLOC_OVERHEAD / sizeof(txnid_t)) #define MDBX_PNL_ALLOCLEN(pl) ((pl)[-1]) -#define MDBX_PNL_SIZE(pl) ((pl)[0]) +#define MDBX_PNL_GETSIZE(pl) ((size_t)((pl)[0])) +#define MDBX_PNL_SETSIZE(pl, size) \ + do { \ + const size_t __size = size; \ + assert(__size < INT_MAX); \ + (pl)[0] = (pgno_t)__size; \ + } while (0) #define MDBX_PNL_FIRST(pl) ((pl)[1]) -#define MDBX_PNL_LAST(pl) ((pl)[MDBX_PNL_SIZE(pl)]) +#define MDBX_PNL_LAST(pl) ((pl)[MDBX_PNL_GETSIZE(pl)]) #define MDBX_PNL_BEGIN(pl) (&(pl)[1]) -#define MDBX_PNL_END(pl) (&(pl)[MDBX_PNL_SIZE(pl) + 1]) +#define MDBX_PNL_END(pl) (&(pl)[MDBX_PNL_GETSIZE(pl) + 1]) #if MDBX_PNL_ASCENDING #define MDBX_PNL_LEAST(pl) MDBX_PNL_FIRST(pl) @@ -906,8 +912,8 @@ typedef struct MDBX_dpl { #define MDBX_PNL_MOST(pl) MDBX_PNL_FIRST(pl) #endif -#define MDBX_PNL_SIZEOF(pl) ((MDBX_PNL_SIZE(pl) + 1) * sizeof(pgno_t)) -#define MDBX_PNL_IS_EMPTY(pl) (MDBX_PNL_SIZE(pl) == 0) +#define MDBX_PNL_SIZEOF(pl) ((MDBX_PNL_GETSIZE(pl) + 1) * sizeof(pgno_t)) +#define MDBX_PNL_IS_EMPTY(pl) (MDBX_PNL_GETSIZE(pl) == 0) /*----------------------------------------------------------------------------*/ /* Internal structures */ @@ -1013,13 +1019,13 @@ struct MDBX_txn { #if MDBX_ENABLE_REFUND pgno_t loose_refund_wl /* FIXME: describe */; #endif /* MDBX_ENABLE_REFUND */ + /* a sequence to spilling dirty page with LRU policy */ + unsigned dirtylru; /* dirtylist room: Dirty array size - dirty pages visible to this txn. * Includes ancestor txns' dirty pages not hidden by other txns' * dirty/spilled pages. Thus commit(nested txn) has room to merge * dirtylist into mt_parent after freeing hidden mt_parent pages. */ - unsigned dirtyroom; - /* a sequence to spilling dirty page with LRU policy */ - unsigned dirtylru; + size_t dirtyroom; /* For write txns: Modified pages. Sorted when not MDBX_WRITEMAP. */ MDBX_dpl *dirtylist; /* The list of reclaimed txns from GC */ @@ -1030,8 +1036,8 @@ struct MDBX_txn { * in this transaction, linked through `mp_next`. */ MDBX_page *loose_pages; /* Number of loose pages (tw.loose_pages) */ - unsigned loose_count; - unsigned spill_least_removed; + size_t loose_count; + size_t spill_least_removed; /* The sorted list of dirty pages we temporarily wrote to disk * because the dirty list was full. page numbers in here are * shifted left by 1, deleted slots have the LSB set. */ diff --git a/src/osal.c b/src/osal.c index 34aeb62d..f8600cb8 100644 --- a/src/osal.c +++ b/src/osal.c @@ -583,7 +583,7 @@ static size_t osal_iov_max; MDBX_INTERNAL_FUNC int osal_ioring_create(osal_ioring_t *ior, #if defined(_WIN32) || defined(_WIN64) - unsigned flags, + uint8_t flags, #endif /* Windows */ mdbx_filehandle_t fd) { memset(ior, 0, sizeof(osal_ioring_t)); @@ -1480,14 +1480,14 @@ MDBX_INTERNAL_FUNC int osal_write(mdbx_filehandle_t fd, const void *buf, } } -int osal_pwritev(mdbx_filehandle_t fd, struct iovec *iov, int sgvcnt, +int osal_pwritev(mdbx_filehandle_t fd, struct iovec *iov, size_t sgvcnt, uint64_t offset) { size_t expected = 0; - for (int i = 0; i < sgvcnt; ++i) + for (size_t i = 0; i < sgvcnt; ++i) expected += iov[i].iov_len; #if !MDBX_HAVE_PWRITEV size_t written = 0; - for (int i = 0; i < sgvcnt; ++i) { + for (size_t i = 0; i < sgvcnt; ++i) { int rc = osal_pwrite(fd, iov[i].iov_base, iov[i].iov_len, offset); if (unlikely(rc != MDBX_SUCCESS)) return rc; diff --git a/src/osal.h b/src/osal.h index 568ae9c0..7d4b37b3 100644 --- a/src/osal.h +++ b/src/osal.h @@ -352,7 +352,7 @@ typedef struct osal_ioring { /* Actually this is not ioring for now, but on the way. */ MDBX_INTERNAL_FUNC int osal_ioring_create(osal_ioring_t *, #if defined(_WIN32) || defined(_WIN64) - unsigned flags, + uint8_t flags, #endif /* Windows */ mdbx_filehandle_t fd); MDBX_INTERNAL_FUNC int osal_ioring_resize(osal_ioring_t *, size_t items); @@ -380,11 +380,11 @@ static inline unsigned osal_ioring_used(const osal_ioring_t *ior) { return ior->allocated - ior->slots_left; } -static inline int osal_ioring_reserve(osal_ioring_t *ior, unsigned items, +static inline int osal_ioring_reserve(osal_ioring_t *ior, size_t items, size_t bytes) { items = (items > 32) ? items : 32; #if defined(_WIN32) || defined(_WIN64) - const unsigned npages = (unsigned)(bytes >> ior->pagesize_ln2); + const size_t npages = bytes >> ior->pagesize_ln2; items = (items > npages) ? items : npages; #else (void)bytes; @@ -468,7 +468,7 @@ MDBX_INTERNAL_FUNC int osal_fastmutex_release(osal_fastmutex_t *fastmutex); MDBX_INTERNAL_FUNC int osal_fastmutex_destroy(osal_fastmutex_t *fastmutex); MDBX_INTERNAL_FUNC int osal_pwritev(mdbx_filehandle_t fd, struct iovec *iov, - int sgvcnt, uint64_t offset); + size_t sgvcnt, uint64_t offset); MDBX_INTERNAL_FUNC int osal_pread(mdbx_filehandle_t fd, void *buf, size_t count, uint64_t offset); MDBX_INTERNAL_FUNC int osal_pwrite(mdbx_filehandle_t fd, const void *buf,