mdbx: add dp_reserve_limit with 1024 default.

More for  More for https://github.com/erthink/libmdbx/issues/128

Change-Id: Ia92b1aea58640f183202495e3f6e2d531057afd2
This commit is contained in:
Leonid Yuriev 2020-11-21 16:21:57 +03:00
parent 8f60050991
commit 42019e0b8d
2 changed files with 17 additions and 8 deletions

View File

@ -3647,13 +3647,15 @@ int mdbx_dcmp(const MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a,
* Set MDBX_TXN_ERROR on failure. */ * Set MDBX_TXN_ERROR on failure. */
static MDBX_page *mdbx_page_malloc(MDBX_txn *txn, unsigned num) { static MDBX_page *mdbx_page_malloc(MDBX_txn *txn, unsigned num) {
MDBX_env *env = txn->mt_env; MDBX_env *env = txn->mt_env;
MDBX_page *np = env->me_dpages; MDBX_page *np = env->me_dp_reserve;
size_t size = env->me_psize; size_t size = env->me_psize;
if (likely(num == 1 && np)) { if (likely(num == 1 && np)) {
mdbx_assert(env, env->me_dp_reserve_len > 0);
ASAN_UNPOISON_MEMORY_REGION(np, size); ASAN_UNPOISON_MEMORY_REGION(np, size);
VALGRIND_MEMPOOL_ALLOC(env, np, size); VALGRIND_MEMPOOL_ALLOC(env, np, size);
VALGRIND_MAKE_MEM_DEFINED(&np->mp_next, sizeof(np->mp_next)); VALGRIND_MAKE_MEM_DEFINED(&np->mp_next, sizeof(np->mp_next));
env->me_dpages = np->mp_next; env->me_dp_reserve = np->mp_next;
env->me_dp_reserve_len -= 1;
} else { } else {
size = pgno2bytes(env, num); size = pgno2bytes(env, num);
np = mdbx_malloc(size); np = mdbx_malloc(size);
@ -3687,10 +3689,11 @@ static void mdbx_dpage_free(MDBX_env *env, MDBX_page *dp, unsigned pages) {
#if MDBX_DEBUG #if MDBX_DEBUG
dp->mp_pgno = MAX_PAGENO + 1; dp->mp_pgno = MAX_PAGENO + 1;
#endif #endif
if (pages == 1) { if (pages == 1 && env->me_dp_reserve_len < env->me_options.dp_reserve_limit) {
dp->mp_next = env->me_dpages; dp->mp_next = env->me_dp_reserve;
VALGRIND_MEMPOOL_FREE(env, dp); VALGRIND_MEMPOOL_FREE(env, dp);
env->me_dpages = dp; env->me_dp_reserve = dp;
env->me_dp_reserve_len += 1;
} else { } else {
/* large pages just get freed directly */ /* large pages just get freed directly */
VALGRIND_MEMPOOL_FREE(env, dp); VALGRIND_MEMPOOL_FREE(env, dp);
@ -9230,6 +9233,8 @@ __cold int mdbx_env_create(MDBX_env **penv) {
env->me_pid = mdbx_getpid(); env->me_pid = mdbx_getpid();
env->me_stuck_meta = -1; env->me_stuck_meta = -1;
env->me_options.dp_reserve_limit = 1024;
int rc; int rc;
const size_t os_psize = mdbx_syspagesize(); const size_t os_psize = mdbx_syspagesize();
if (unlikely(!is_powerof2(os_psize) || os_psize < MIN_PAGESIZE)) { if (unlikely(!is_powerof2(os_psize) || os_psize < MIN_PAGESIZE)) {
@ -11070,10 +11075,10 @@ __cold int mdbx_env_close_ex(MDBX_env *env, bool dont_sync) {
mdbx_ensure(env, mdbx_ipclock_destroy(&env->me_lckless_stub.wlock) == 0); mdbx_ensure(env, mdbx_ipclock_destroy(&env->me_lckless_stub.wlock) == 0);
#endif /* MDBX_LOCKING */ #endif /* MDBX_LOCKING */
while ((dp = env->me_dpages) != NULL) { while ((dp = env->me_dp_reserve) != NULL) {
ASAN_UNPOISON_MEMORY_REGION(&dp->mp_next, sizeof(dp->mp_next)); ASAN_UNPOISON_MEMORY_REGION(&dp->mp_next, sizeof(dp->mp_next));
VALGRIND_MAKE_MEM_DEFINED(&dp->mp_next, sizeof(dp->mp_next)); VALGRIND_MAKE_MEM_DEFINED(&dp->mp_next, sizeof(dp->mp_next));
env->me_dpages = dp->mp_next; env->me_dp_reserve = dp->mp_next;
mdbx_free(dp); mdbx_free(dp);
} }
VALGRIND_DESTROY_MEMPOOL(env); VALGRIND_DESTROY_MEMPOOL(env);

View File

@ -975,7 +975,7 @@ struct MDBX_env {
uint16_t *me_dbflags; /* array of flags from MDBX_db.md_flags */ uint16_t *me_dbflags; /* array of flags from MDBX_db.md_flags */
unsigned *me_dbiseqs; /* array of dbi sequence numbers */ unsigned *me_dbiseqs; /* array of dbi sequence numbers */
volatile txnid_t *me_oldest; /* ID of oldest reader last time we looked */ volatile txnid_t *me_oldest; /* ID of oldest reader last time we looked */
MDBX_page *me_dpages; /* list of malloc'd blocks for re-use */ MDBX_page *me_dp_reserve; /* list of malloc'd blocks for re-use */
/* PNL of pages that became unused in a write txn */ /* PNL of pages that became unused in a write txn */
MDBX_PNL me_retired_pages; MDBX_PNL me_retired_pages;
/* MDBX_DP of pages written during a write txn. */ /* MDBX_DP of pages written during a write txn. */
@ -992,6 +992,10 @@ struct MDBX_env {
volatile pgno_t *me_discarded_tail; volatile pgno_t *me_discarded_tail;
volatile uint32_t *me_meta_sync_txnid; volatile uint32_t *me_meta_sync_txnid;
MDBX_hsr_func *me_hsr_callback; /* Callback for kicking laggard readers */ MDBX_hsr_func *me_hsr_callback; /* Callback for kicking laggard readers */
unsigned me_dp_reserve_len;
struct {
unsigned dp_reserve_limit;
} me_options;
struct { struct {
#if MDBX_LOCKING > 0 #if MDBX_LOCKING > 0
mdbx_ipclock_t wlock; mdbx_ipclock_t wlock;