mirror of
https://github.com/isar/libmdbx.git
synced 2025-09-06 13:42:21 +08:00
mdbx: rename xyz_align2os_xyz()
to xyz_ceil2sp_xyz()
(cosmetics).
This commit is contained in:
@@ -169,7 +169,7 @@ __cold int mdbx_env_warmup(const MDBX_env *env, const MDBX_txn *txn, MDBX_warmup
|
||||
const troika_t troika = meta_tap(env);
|
||||
used_pgno = meta_recent(env, &troika).ptr_v->geometry.first_unallocated;
|
||||
}
|
||||
const size_t used_range = pgno_align2os_bytes(env, used_pgno);
|
||||
const size_t used_range = pgno_ceil2sp_bytes(env, used_pgno);
|
||||
const pgno_t mlock_pgno = bytes2pgno(env, used_range);
|
||||
|
||||
int rc = MDBX_SUCCESS;
|
||||
|
@@ -316,8 +316,8 @@ __cold static void compacting_fixup_meta(MDBX_env *env, meta_t *meta) {
|
||||
meta->geometry.now = meta->geometry.first_unallocated;
|
||||
const size_t aligner = pv2pages(meta->geometry.grow_pv ? meta->geometry.grow_pv : meta->geometry.shrink_pv);
|
||||
if (aligner) {
|
||||
const pgno_t aligned = pgno_align2os_pgno(env, meta->geometry.first_unallocated + aligner -
|
||||
meta->geometry.first_unallocated % aligner);
|
||||
const pgno_t aligned = pgno_ceil2sp_pgno(env, meta->geometry.first_unallocated + aligner -
|
||||
meta->geometry.first_unallocated % aligner);
|
||||
meta->geometry.now = aligned;
|
||||
}
|
||||
}
|
||||
@@ -558,7 +558,7 @@ retry_snap_meta:
|
||||
meta_sign_as_steady(headcopy);
|
||||
|
||||
/* Copy the data */
|
||||
const size_t whole_size = pgno_align2os_bytes(env, txn->geo.end_pgno);
|
||||
const size_t whole_size = pgno_ceil2sp_bytes(env, txn->geo.end_pgno);
|
||||
const size_t used_size = pgno2bytes(env, txn->geo.first_unallocated);
|
||||
jitter4testing(false);
|
||||
|
||||
@@ -691,7 +691,7 @@ __cold static int copy2fd(MDBX_txn *txn, mdbx_filehandle_t fd, MDBX_copy_flags_t
|
||||
|
||||
MDBX_env *const env = txn->env;
|
||||
const size_t buffer_size =
|
||||
pgno_align2os_bytes(env, NUM_METAS) +
|
||||
pgno_ceil2sp_bytes(env, NUM_METAS) +
|
||||
ceil_powerof2(((flags & MDBX_CP_COMPACT) ? 2 * (size_t)MDBX_ENVCOPY_WRITEBUF : (size_t)MDBX_ENVCOPY_WRITEBUF),
|
||||
globals.sys_pagesize);
|
||||
|
||||
|
@@ -1222,9 +1222,9 @@ __cold int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, intptr_t si
|
||||
new_geo.shrink_pv = pages2pv(bytes2pgno(env, shrink_threshold));
|
||||
new_geo.first_unallocated = current_geo->first_unallocated;
|
||||
|
||||
ENSURE(env, pgno_align2os_bytes(env, new_geo.lower) == (size_t)size_lower);
|
||||
ENSURE(env, pgno_align2os_bytes(env, new_geo.upper) == (size_t)size_upper);
|
||||
ENSURE(env, pgno_align2os_bytes(env, new_geo.now) == (size_t)size_now);
|
||||
ENSURE(env, pgno_ceil2sp_bytes(env, new_geo.lower) == (size_t)size_lower);
|
||||
ENSURE(env, pgno_ceil2sp_bytes(env, new_geo.upper) == (size_t)size_upper);
|
||||
ENSURE(env, pgno_ceil2sp_bytes(env, new_geo.now) == (size_t)size_now);
|
||||
ENSURE(env, new_geo.grow_pv == pages2pv(pv2pages(new_geo.grow_pv)));
|
||||
ENSURE(env, new_geo.shrink_pv == pages2pv(pv2pages(new_geo.shrink_pv)));
|
||||
|
||||
|
@@ -173,7 +173,7 @@ void env_options_adjust_defaults(MDBX_env *env) {
|
||||
: basis >> factor;
|
||||
threshold =
|
||||
(threshold < env->geo_in_bytes.shrink || !env->geo_in_bytes.shrink) ? threshold : env->geo_in_bytes.shrink;
|
||||
env->madv_threshold = bytes2pgno(env, bytes_align2os_bytes(env, threshold));
|
||||
env->madv_threshold = bytes2pgno(env, bytes_ceil2sp_bytes(env, threshold));
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
16
src/cogs.c
16
src/cogs.c
@@ -74,16 +74,16 @@ __cold bool pv2pages_verify(void) {
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION size_t bytes_align2os_bytes(const MDBX_env *env, size_t bytes) {
|
||||
MDBX_NOTHROW_PURE_FUNCTION size_t bytes_ceil2sp_bytes(const MDBX_env *env, size_t bytes) {
|
||||
return ceil_powerof2(bytes, (env->ps > globals.sys_pagesize) ? env->ps : globals.sys_pagesize);
|
||||
}
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION size_t pgno_align2os_bytes(const MDBX_env *env, size_t pgno) {
|
||||
MDBX_NOTHROW_PURE_FUNCTION size_t pgno_ceil2sp_bytes(const MDBX_env *env, size_t pgno) {
|
||||
return ceil_powerof2(pgno2bytes(env, pgno), globals.sys_pagesize);
|
||||
}
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION pgno_t pgno_align2os_pgno(const MDBX_env *env, size_t pgno) {
|
||||
return bytes2pgno(env, pgno_align2os_bytes(env, pgno));
|
||||
MDBX_NOTHROW_PURE_FUNCTION pgno_t pgno_ceil2sp_pgno(const MDBX_env *env, size_t pgno) {
|
||||
return bytes2pgno(env, pgno_ceil2sp_bytes(env, pgno));
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
@@ -227,8 +227,8 @@ int cmp_equal_or_wrong(const MDBX_val *a, const MDBX_val *b) { return eq_fast(a,
|
||||
__cold void update_mlcnt(const MDBX_env *env, const pgno_t new_aligned_mlocked_pgno, const bool lock_not_release) {
|
||||
for (;;) {
|
||||
const pgno_t mlock_pgno_before = atomic_load32(&env->mlocked_pgno, mo_AcquireRelease);
|
||||
eASSERT(env, pgno_align2os_pgno(env, mlock_pgno_before) == mlock_pgno_before);
|
||||
eASSERT(env, pgno_align2os_pgno(env, new_aligned_mlocked_pgno) == new_aligned_mlocked_pgno);
|
||||
eASSERT(env, pgno_ceil2sp_pgno(env, mlock_pgno_before) == mlock_pgno_before);
|
||||
eASSERT(env, pgno_ceil2sp_pgno(env, new_aligned_mlocked_pgno) == new_aligned_mlocked_pgno);
|
||||
if (lock_not_release ? (mlock_pgno_before >= new_aligned_mlocked_pgno)
|
||||
: (mlock_pgno_before <= new_aligned_mlocked_pgno))
|
||||
break;
|
||||
@@ -282,9 +282,7 @@ __cold void munlock_after(const MDBX_env *env, const pgno_t aligned_pgno, const
|
||||
}
|
||||
}
|
||||
|
||||
__cold void munlock_all(const MDBX_env *env) {
|
||||
munlock_after(env, 0, bytes_align2os_bytes(env, env->dxb_mmap.current));
|
||||
}
|
||||
__cold void munlock_all(const MDBX_env *env) { munlock_after(env, 0, bytes_ceil2sp_bytes(env, env->dxb_mmap.current)); }
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
|
@@ -220,11 +220,10 @@ MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t bytes2pgno(const MDBX_env *env,
|
||||
return (pgno_t)(bytes >> env->ps2ln);
|
||||
}
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t bytes_align2os_bytes(const MDBX_env *env, size_t bytes);
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t pgno_align2os_bytes(const MDBX_env *env, size_t pgno);
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL pgno_t pgno_align2os_pgno(const MDBX_env *env, size_t pgno);
|
||||
/* align to system page size */
|
||||
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t bytes_ceil2sp_bytes(const MDBX_env *env, size_t bytes);
|
||||
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL size_t pgno_ceil2sp_bytes(const MDBX_env *env, size_t pgno);
|
||||
MDBX_NOTHROW_PURE_FUNCTION MDBX_INTERNAL pgno_t pgno_ceil2sp_pgno(const MDBX_env *env, size_t pgno);
|
||||
|
||||
MDBX_NOTHROW_PURE_FUNCTION static inline pgno_t largechunk_npages(const MDBX_env *env, size_t bytes) {
|
||||
return bytes2pgno(env, PAGEHDRSZ - 1 + bytes) + 1;
|
||||
|
38
src/dxb.c
38
src/dxb.c
@@ -138,8 +138,8 @@ __cold int dxb_resize(MDBX_env *const env, const pgno_t used_pgno, const pgno_t
|
||||
* by other process. Avoids remapping until it necessary. */
|
||||
limit_pgno = prev_limit_pgno;
|
||||
}
|
||||
const size_t limit_bytes = pgno_align2os_bytes(env, limit_pgno);
|
||||
const size_t size_bytes = pgno_align2os_bytes(env, size_pgno);
|
||||
const size_t limit_bytes = pgno_ceil2sp_bytes(env, limit_pgno);
|
||||
const size_t size_bytes = pgno_ceil2sp_bytes(env, size_pgno);
|
||||
const void *const prev_map = env->dxb_mmap.base;
|
||||
|
||||
VERBOSE("resize(env-flags 0x%x, mode %d) datafile/mapping: "
|
||||
@@ -220,7 +220,7 @@ __cold int dxb_resize(MDBX_env *const env, const pgno_t used_pgno, const pgno_t
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.msync.weak += 1;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, used_pgno), MDBX_SYNC_NONE);
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, used_pgno), MDBX_SYNC_NONE);
|
||||
if (unlikely(rc != MDBX_SUCCESS))
|
||||
goto bailout;
|
||||
}
|
||||
@@ -405,10 +405,10 @@ __cold int dxb_set_readahead(const MDBX_env *env, const pgno_t edge, const bool
|
||||
const bool toggle = force_whole || ((enable ^ env->lck->readahead_anchor) & 1) || !env->lck->readahead_anchor;
|
||||
const pgno_t prev_edge = env->lck->readahead_anchor >> 1;
|
||||
const size_t limit = env->dxb_mmap.limit;
|
||||
size_t offset = toggle ? 0 : pgno_align2os_bytes(env, (prev_edge < edge) ? prev_edge : edge);
|
||||
size_t offset = toggle ? 0 : pgno_ceil2sp_bytes(env, (prev_edge < edge) ? prev_edge : edge);
|
||||
offset = (offset < limit) ? offset : limit;
|
||||
|
||||
size_t length = pgno_align2os_bytes(env, (prev_edge < edge) ? edge : prev_edge);
|
||||
size_t length = pgno_ceil2sp_bytes(env, (prev_edge < edge) ? edge : prev_edge);
|
||||
length = (length < limit) ? length : limit;
|
||||
length -= offset;
|
||||
|
||||
@@ -594,10 +594,10 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit
|
||||
* - upper or lower limit changes
|
||||
* - shrink threshold or growth step
|
||||
* But ignore change just a 'now/current' size. */
|
||||
if (bytes_align2os_bytes(env, env->geo_in_bytes.upper) != pgno2bytes(env, header.geometry.upper) ||
|
||||
bytes_align2os_bytes(env, env->geo_in_bytes.lower) != pgno2bytes(env, header.geometry.lower) ||
|
||||
bytes_align2os_bytes(env, env->geo_in_bytes.shrink) != pgno2bytes(env, pv2pages(header.geometry.shrink_pv)) ||
|
||||
bytes_align2os_bytes(env, env->geo_in_bytes.grow) != pgno2bytes(env, pv2pages(header.geometry.grow_pv))) {
|
||||
if (bytes_ceil2sp_bytes(env, env->geo_in_bytes.upper) != pgno2bytes(env, header.geometry.upper) ||
|
||||
bytes_ceil2sp_bytes(env, env->geo_in_bytes.lower) != pgno2bytes(env, header.geometry.lower) ||
|
||||
bytes_ceil2sp_bytes(env, env->geo_in_bytes.shrink) != pgno2bytes(env, pv2pages(header.geometry.shrink_pv)) ||
|
||||
bytes_ceil2sp_bytes(env, env->geo_in_bytes.grow) != pgno2bytes(env, pv2pages(header.geometry.grow_pv))) {
|
||||
|
||||
if (env->geo_in_bytes.shrink && env->geo_in_bytes.now > used_bytes)
|
||||
/* pre-shrink if enabled */
|
||||
@@ -613,7 +613,7 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit
|
||||
}
|
||||
|
||||
/* altering fields to match geometry given from user */
|
||||
expected_filesize = pgno_align2os_bytes(env, header.geometry.now);
|
||||
expected_filesize = pgno_ceil2sp_bytes(env, header.geometry.now);
|
||||
header.geometry.now = bytes2pgno(env, env->geo_in_bytes.now);
|
||||
header.geometry.lower = bytes2pgno(env, env->geo_in_bytes.lower);
|
||||
header.geometry.upper = bytes2pgno(env, env->geo_in_bytes.upper);
|
||||
@@ -627,7 +627,7 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit
|
||||
pv2pages(header.geometry.shrink_pv), unaligned_peek_u64(4, header.txnid_a), durable_caption(&header));
|
||||
} else {
|
||||
/* fetch back 'now/current' size, since it was ignored during comparison and may differ. */
|
||||
env->geo_in_bytes.now = pgno_align2os_bytes(env, header.geometry.now);
|
||||
env->geo_in_bytes.now = pgno_ceil2sp_bytes(env, header.geometry.now);
|
||||
}
|
||||
ENSURE(env, header.geometry.now >= header.geometry.first_unallocated);
|
||||
} else {
|
||||
@@ -639,7 +639,7 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit
|
||||
env->geo_in_bytes.shrink = pgno2bytes(env, pv2pages(header.geometry.shrink_pv));
|
||||
}
|
||||
|
||||
ENSURE(env, pgno_align2os_bytes(env, header.geometry.now) == env->geo_in_bytes.now);
|
||||
ENSURE(env, pgno_ceil2sp_bytes(env, header.geometry.now) == env->geo_in_bytes.now);
|
||||
ENSURE(env, env->geo_in_bytes.now >= used_bytes);
|
||||
if (!expected_filesize)
|
||||
expected_filesize = env->geo_in_bytes.now;
|
||||
@@ -693,7 +693,7 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit
|
||||
#endif /* MADV_DONTDUMP */
|
||||
#if defined(MADV_DODUMP)
|
||||
if (globals.runtime_flags & MDBX_DBG_DUMP) {
|
||||
const size_t meta_length_aligned2os = pgno_align2os_bytes(env, NUM_METAS);
|
||||
const size_t meta_length_aligned2os = pgno_ceil2sp_bytes(env, NUM_METAS);
|
||||
err = madvise(env->dxb_mmap.base, meta_length_aligned2os, MADV_DODUMP) ? ignore_enosys_and_eagain(errno)
|
||||
: MDBX_SUCCESS;
|
||||
if (unlikely(MDBX_IS_ERROR(err)))
|
||||
@@ -1018,15 +1018,15 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika
|
||||
#endif /* ENABLE_MEMCHECK || __SANITIZE_ADDRESS__ */
|
||||
|
||||
#if defined(MADV_DONTNEED) || defined(POSIX_MADV_DONTNEED)
|
||||
const size_t discard_edge_pgno = pgno_align2os_pgno(env, largest_pgno);
|
||||
const size_t discard_edge_pgno = pgno_ceil2sp_pgno(env, largest_pgno);
|
||||
if (prev_discarded_pgno >= discard_edge_pgno + env->madv_threshold) {
|
||||
const size_t prev_discarded_bytes = pgno_align2os_bytes(env, prev_discarded_pgno);
|
||||
const size_t prev_discarded_bytes = pgno_ceil2sp_bytes(env, prev_discarded_pgno);
|
||||
const size_t discard_edge_bytes = pgno2bytes(env, discard_edge_pgno);
|
||||
/* из-за выравнивания prev_discarded_bytes и discard_edge_bytes
|
||||
* могут быть равны */
|
||||
if (prev_discarded_bytes > discard_edge_bytes) {
|
||||
NOTICE("shrink-MADV_%s %zu..%zu", "DONTNEED", discard_edge_pgno, prev_discarded_pgno);
|
||||
munlock_after(env, discard_edge_pgno, bytes_align2os_bytes(env, env->dxb_mmap.current));
|
||||
munlock_after(env, discard_edge_pgno, bytes_ceil2sp_bytes(env, env->dxb_mmap.current));
|
||||
const uint32_t munlocks_before = atomic_load32(&env->lck->mlcnt[1], mo_Relaxed);
|
||||
#if defined(MADV_DONTNEED)
|
||||
int advise = MADV_DONTNEED;
|
||||
@@ -1073,7 +1073,7 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika
|
||||
pending->geometry.grow_pv ? /* grow_step */ pv2pages(pending->geometry.grow_pv) : shrink_step;
|
||||
const pgno_t with_stockpile_gap = largest_pgno + stockpile_gap;
|
||||
const pgno_t aligned =
|
||||
pgno_align2os_pgno(env, (size_t)with_stockpile_gap + aligner - with_stockpile_gap % aligner);
|
||||
pgno_ceil2sp_pgno(env, (size_t)with_stockpile_gap + aligner - with_stockpile_gap % aligner);
|
||||
const pgno_t bottom = (aligned > pending->geometry.lower) ? aligned : pending->geometry.lower;
|
||||
if (pending->geometry.now > bottom) {
|
||||
if (TROIKA_HAVE_STEADY(troika))
|
||||
@@ -1120,7 +1120,7 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika
|
||||
#else
|
||||
(void)sync_op;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, pending->geometry.first_unallocated), mode_bits);
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, pending->geometry.first_unallocated), mode_bits);
|
||||
} else {
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.fsync.weak += sync_op;
|
||||
@@ -1243,7 +1243,7 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.msync.weak += 1;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS),
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, NUM_METAS),
|
||||
(flags & MDBX_NOMETASYNC) ? MDBX_SYNC_NONE : MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
} else {
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
|
@@ -128,7 +128,7 @@ retry:;
|
||||
if (unlikely(err != MDBX_SUCCESS))
|
||||
return err;
|
||||
#endif
|
||||
const size_t usedbytes = pgno_align2os_bytes(env, head.ptr_c->geometry.first_unallocated);
|
||||
const size_t usedbytes = pgno_ceil2sp_bytes(env, head.ptr_c->geometry.first_unallocated);
|
||||
err = osal_msync(&env->dxb_mmap, 0, usedbytes, MDBX_SYNC_DATA);
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
imports.srwl_ReleaseShared(&env->remap_guard);
|
||||
|
@@ -1263,7 +1263,7 @@ no_gc:
|
||||
|
||||
eASSERT(env, newnext > txn->geo.end_pgno);
|
||||
const size_t grow_step = pv2pages(txn->geo.grow_pv);
|
||||
size_t aligned = pgno_align2os_pgno(env, (pgno_t)(newnext + grow_step - newnext % grow_step));
|
||||
size_t aligned = pgno_ceil2sp_pgno(env, (pgno_t)(newnext + grow_step - newnext % grow_step));
|
||||
|
||||
if (aligned > txn->geo.upper)
|
||||
aligned = txn->geo.upper;
|
||||
|
@@ -235,7 +235,7 @@ __cold int meta_wipe_steady(MDBX_env *env, txnid_t inclusive_upto) {
|
||||
if (err == MDBX_RESULT_TRUE) {
|
||||
err = MDBX_SUCCESS;
|
||||
if (!MDBX_AVOID_MSYNC && (env->flags & MDBX_WRITEMAP)) {
|
||||
err = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
err = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, NUM_METAS), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.msync.weak += 1;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
@@ -267,7 +267,7 @@ int meta_sync(const MDBX_env *env, const meta_ptr_t head) {
|
||||
int rc = MDBX_RESULT_TRUE;
|
||||
if (env->flags & MDBX_WRITEMAP) {
|
||||
if (!MDBX_AVOID_MSYNC) {
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, NUM_METAS), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, NUM_METAS), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.msync.weak += 1;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
@@ -405,7 +405,7 @@ __cold int __must_check_result meta_override(MDBX_env *env, size_t target, txnid
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.msync.weak += 1;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, model->geometry.first_unallocated),
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, model->geometry.first_unallocated),
|
||||
MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
if (unlikely(rc != MDBX_SUCCESS))
|
||||
return rc;
|
||||
@@ -417,7 +417,7 @@ __cold int __must_check_result meta_override(MDBX_env *env, size_t target, txnid
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.msync.weak += 1;
|
||||
#endif /* MDBX_ENABLE_PGOP_STAT */
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_align2os_bytes(env, target + 1), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
rc = osal_msync(&env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, target + 1), MDBX_SYNC_DATA | MDBX_SYNC_IODQ);
|
||||
} else {
|
||||
#if MDBX_ENABLE_PGOP_STAT
|
||||
env->lck->pgops.wops.weak += 1;
|
||||
|
@@ -10,7 +10,7 @@ int iov_init(MDBX_txn *const txn, iov_ctx_t *ctx, size_t items, size_t npages, m
|
||||
ctx->fd = fd;
|
||||
ctx->coherency_timestamp =
|
||||
(check_coherence || txn->env->lck->pgops.incoherence.weak) ? 0 : UINT64_MAX /* не выполнять сверку */;
|
||||
ctx->err = osal_ioring_prepare(ctx->ior, items, pgno_align2os_bytes(txn->env, npages));
|
||||
ctx->err = osal_ioring_prepare(ctx->ior, items, pgno_ceil2sp_bytes(txn->env, npages));
|
||||
if (likely(ctx->err == MDBX_SUCCESS)) {
|
||||
#if MDBX_NEED_WRITTEN_RANGE
|
||||
ctx->flush_begin = MAX_PAGENO;
|
||||
|
@@ -199,7 +199,7 @@ __cold int spill_slowpath(MDBX_txn *const txn, MDBX_cursor *const m0, const intp
|
||||
NOTICE("%s-spilling %zu dirty-entries, %zu dirty-npages", "msync", dirty_entries, dirty_npages);
|
||||
const MDBX_env *env = txn->env;
|
||||
tASSERT(txn, txn->wr.spilled.list == nullptr);
|
||||
rc = osal_msync(&txn->env->dxb_mmap, 0, pgno_align2os_bytes(env, txn->geo.first_unallocated), MDBX_SYNC_KICK);
|
||||
rc = osal_msync(&txn->env->dxb_mmap, 0, pgno_ceil2sp_bytes(env, txn->geo.first_unallocated), MDBX_SYNC_KICK);
|
||||
if (unlikely(rc != MDBX_SUCCESS))
|
||||
goto bailout;
|
||||
#if MDBX_AVOID_MSYNC
|
||||
|
Reference in New Issue
Block a user