Compare commits

...

25 Commits

Author SHA1 Message Date
Leonid Yuriev
fcb8cd2145 mdbx: alter DB-format' signature and change version to v0.11.x (not a release).
Related to https://github.com/erthink/libmdbx/issues/238

Signed-off-by: Leonid Yuriev <leo@yuriev.ru>
2021-10-21 15:17:18 +03:00
Leonid Yuriev
514910621e mdbx: return MDBX_CORRUPTED instead of MDBX_PAGE_NOTFOUND for invalid pages. 2021-10-15 01:11:20 +03:00
Leonid Yuriev
7251f47d5b mdbx: fix typo which lead any error conversion to the 1. 2021-10-14 20:03:37 +03:00
Leonid Yuriev
edda9515d6 mdbx: release v0.10.5 (hotfix).
Acknowledgements:
-----------------
 - [Noel Kuntze](https://github.com/Thermi) for immediately bug reporting.

Fixes:
------
 - Fixed unaligned access regression after the `#pragma pack` fix for modern compilers.
 - Added UBSAN-test to CI to avoid a regression(s) similar to lately fixed.
 - Fixed possibility of meta-pages clashing after manually turn to a particular meta-page using `mdbx_chk` utility.

Minors:
-------
 - Refined handling of weak or invalid meta-pages while a DB opening.
 - Refined providing information for the @MAIN and @GC sub-databases of a last committed modification transaction's ID.

Signed-off-by: Leonid Yuriev <leo@yuriev.ru>
2021-10-13 16:35:26 +03:00
Leonid Yuriev
4632df5661 mdbx: cleanup non-persistent flags from meta-model. 2021-10-13 16:34:53 +03:00
Leonid Yuriev
11a5c50591 mdbx: fix turn_for_recovery() for possibility of meta-pages clashing after turn to a particular meta-page. 2021-10-13 16:34:53 +03:00
Leonid Yuriev
3092078709 mdbx: refine handling of weak or invalid meta-pages while a DB opening. 2021-10-13 16:34:53 +03:00
Leonid Yuriev
cbb71058ca mdbx: refine providing information for the @MAIN and @GC sub-databases of a last committed modification transaction's ID. 2021-10-13 16:34:25 +03:00
Leonid Yuriev
9dd15a4415 mdbx-ci: use Circle-CI for UBSAN-test pass. 2021-10-13 16:13:30 +03:00
Leonid Yuriev
30745e0621 mdbx: refix #pragma pack for modern compilers and aligned-required arches (hotfix).
Fix a regression after the https://github.com/erthink/libmdbx/issues/235
2021-10-13 16:13:22 +03:00
Leonid Yuriev
c9659e1aca mdbx: fix minor ChangeLog typo. 2021-10-11 17:18:06 +03:00
Leonid Yuriev
1fed51ac0d mdbx: update patch for buildroot (old versions). 2021-10-10 15:45:07 +03:00
Leonid Yuriev
590b225fcc mdbx: release v0.10.4
Acknowledgements:
-----------------
 - [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/mdbx-rs).
 - [Andrew Ashikhmin](https://github.com/yperbasis) for contributing to C++ API.

Fixes:
------
 - Fixed possibility of looping update GC during transaction commit (no public issue since the problem was discovered inside [Positive Technologies](https://www.ptsecurity.ru).
 - Fixed `#pragma pack` to avoid provoking some compilers to generate code with [unaligned access](https://github.com/erthink/libmdbx/issues/235).
 - Fixed `noexcept` for potentially throwing `txn::put()` of C++ API.

Minors:
-------
 - Added stochastic test script for checking small transactions cases.
 - Removed extra transaction commit/restart inside test framework.
 - In debugging builds fixed a too small (single page) by default DB shrink threshold.
2021-10-10 13:31:33 +03:00
Leonid Yuriev
2f8a429f91 mdbx: update ChangeLog. 2021-10-10 00:51:38 +03:00
Leonid Yuriev
64e6fa93fd mdbx: fix #pragma pack to avoid misalignment for some compilers.
Fixes https://github.com/erthink/libmdbx/issues/235.
2021-10-09 12:36:40 +03:00
Leonid Yuriev
ee917209fe mdbx-test: add stochastic-small script. 2021-10-09 12:30:39 +03:00
Leonid Yuriev
fa0a38c1ac mdbx: avoid single-page (too small) shrink threshold by default when MDBX_DEBUG > 0. 2021-10-09 12:30:39 +03:00
Leonid Yuriev
f936217309 mdbx-test: avoid extra transaction restart. 2021-10-09 12:30:35 +03:00
Leonid Yuriev
fe0ec8ceca mdbx: fix and refine mdbx_update_gc() to avoid infinite looping possibility (squashed). 2021-10-09 12:29:10 +03:00
Leonid Yuriev
6737d304a6 mdbx-ci: MDBX_FORCE_ASSERTION=1 for CI-build. 2021-10-09 12:29:05 +03:00
Leonid Yuriev
fe7186d549 mdbx: reflow comment (cosmetic). 2021-09-30 16:38:07 +03:00
Leonid Yuriev
c81226906a mdbx: update ChangeLog and Contributors list. 2021-09-28 00:37:33 +03:00
Leonid Yuriev
699361c5d0 mdbx: update bindings/wrappers list. 2021-09-27 20:39:19 +03:00
Leonid Yuriev
903bcd2466 mdbx-ci: switch github action from Ubuntu 16.04 to 18.04 2021-09-27 03:18:36 +03:00
yperbasis
c714ee9b55 mdbx++: remove noexcept from potentially throwing txn::put(). 2021-09-03 23:10:22 +03:00
19 changed files with 575 additions and 116 deletions

View File

@@ -11,8 +11,7 @@ jobs:
- TESTLOG: /tmp/test.log
steps:
- checkout
- run: make all
- run: ulimit -c unlimited && make check
- run: ulimit -c unlimited && MDBX_BUILD_OPTIONS="-DNDEBUG=1 -DMDBX_FORCE_ASSERTIONS=1" make test-ubsan
- run:
command: |
mkdir -p /tmp/artifacts

View File

@@ -3,4 +3,4 @@ freebsd_instance:
task:
install_script: pkg install -y gmake bash git
script: git fetch --tags --force && gmake check
script: git fetch --tags --force && gmake MDBX_BUILD_OPTIONS="-DNDEBUG=1 -DMDBX_FORCE_ASSERTIONS=1" check

View File

@@ -61,6 +61,7 @@ ARMEL
ARMT
Artem
asan
Ashikhmin
asis
asm
asprintf
@@ -1962,6 +1963,7 @@ xsize
yml
Yota
Yotta
yperbasis
Yq
yuriev
Zano

View File

@@ -27,11 +27,11 @@ jobs:
strategy:
matrix:
#, windows-latest
os: [ubuntu-latest, macos-latest, ubuntu-16.04]
os: [ubuntu-latest, macos-latest, ubuntu-18.04]
steps:
- uses: actions/checkout@v2
- name: fetch tags
run: git fetch --unshallow --tags --prune --force
- name: make check
run: make --keep-going all && MALLOC_CHECK_=7 MALLOC_PERTURB_=42 make --keep-going check
run: make MDBX_BUILD_OPTIONS="-DNDEBUG=1 -DMDBX_FORCE_ASSERTIONS=1" --keep-going all && MALLOC_CHECK_=7 MALLOC_PERTURB_=42 make MDBX_BUILD_OPTIONS="-DNDEBUG=1 -DMDBX_FORCE_ASSERTIONS=1" --keep-going check
shell: bash

View File

@@ -2,6 +2,7 @@ Contributors
============
- Alexey Naumov <alexey.naumov@gmail.com>
- Andrew Ashikhmin <andrey.ashikhmin@gmail.com>
- Chris Mikkelson <cmikk@qwest.net>
- Claude Brisson <claude.brisson@gmail.com>
- David Barbour <dmbarbour@gmail.com>
@@ -16,7 +17,7 @@ Contributors
- John Hewson <john@jahewson.com>
- Klaus Malorny <klaus.malorny@knipp.de>
- Kurt Zeilenga <kurt.zeilenga@isode.com>
- Leonid Yuriev <leo@yuriev.ru>, <lyuryev@ptsecurity.com>
- Leonid Yuriev <leo@yuriev.ru>, <lyuryev@ptsecurity.ru>
- Lorenz Bauer <lmb@cloudflare.com>
- Luke Yeager <lyeager@nvidia.com>
- Martin Hedenfalk <martin@bzero.se>

View File

@@ -20,6 +20,44 @@ ChangeLog
- Packages for [Astra Linux](https://astralinux.ru/), [ALT Linux](https://www.altlinux.org/), [ROSA Linux](https://www.rosalinux.ru/), etc.
## v0.10.5 at 2021-10-13
Acknowledgements:
- [Noel Kuntze](https://github.com/Thermi) for immediately bug reporting.
Fixes:
- Fixed unaligned access regression after the `#pragma pack` fix for modern compilers.
- Added UBSAN-test to CI to avoid a regression(s) similar to lately fixed.
- Fixed possibility of meta-pages clashing after manually turn to a particular meta-page using `mdbx_chk` utility.
Minors:
- Refined handling of weak or invalid meta-pages while a DB opening.
- Refined providing information for the @MAIN and @GC sub-databases of a last committed modification transaction's ID.
## v0.10.4 at 2021-10-10
Acknowledgements:
- [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/mdbx-rs).
- [Andrew Ashikhmin](https://github.com/yperbasis) for contributing to C++ API.
Fixes:
- Fixed possibility of looping update GC during transaction commit (no public issue since the problem was discovered inside [Positive Technologies](https://www.ptsecurity.ru)).
- Fixed `#pragma pack` to avoid provoking some compilers to generate code with [unaligned access](https://github.com/erthink/libmdbx/issues/235).
- Fixed `noexcept` for potentially throwing `txn::put()` of C++ API.
Minors:
- Added stochastic test script for checking small transactions cases.
- Removed extra transaction commit/restart inside test framework.
- In debugging builds fixed a too small (single page) by default DB shrink threshold.
## v0.10.3 at 2021-08-27
Acknowledgements:

View File

@@ -571,9 +571,9 @@ Bindings
| Ruby | [ruby-mdbx](https://rubygems.org/gems/mdbx/) | [Mahlon E. Smith](https://github.com/mahlonsmith) |
| Go | [mdbx-go](https://github.com/torquem-ch/mdbx-go) | [Alex Sharov](https://github.com/AskAlexSharov) |
| [Nim](https://en.wikipedia.org/wiki/Nim_(programming_language)) | [NimDBX](https://github.com/snej/nimdbx) | [Jens Alfke](https://github.com/snej)
| Rust | [heed](https://github.com/Kerollmops/heed), [mdbx-rs](https://github.com/Kerollmops/mdbx-rs) | [Clément Renault](https://github.com/Kerollmops) |
| Java | [mdbxjni](https://github.com/castortech/mdbxjni) | [Castor Technologies](https://castortech.com/) |
| .NET | [mdbx.NET](https://github.com/wangjia184/mdbx.NET) | [Jerry Wang](https://github.com/wangjia184) |
| Rust | [mdbx-rs](https://github.com/vorot93/mdbx-rs) | [Artem Vorotnikov](https://github.com/vorot93) |
| Java (obsolete) | [mdbxjni](https://github.com/castortech/mdbxjni) | [Castor Technologies](https://castortech.com/) |
| .NET (obsolete) | [mdbx.NET](https://github.com/wangjia184/mdbx.NET) | [Jerry Wang](https://github.com/wangjia184) |
<!-- section-end -->

View File

@@ -1,4 +1,4 @@
version: 0.10.3.{build}
version: 0.11.0.{build}
environment:
matrix:

4
mdbx.h
View File

@@ -568,9 +568,9 @@ typedef mode_t mdbx_mode_t;
extern "C" {
#endif
/* MDBX version 0.10.x */
/* MDBX version 0.11.x */
#define MDBX_VERSION_MAJOR 0
#define MDBX_VERSION_MINOR 10
#define MDBX_VERSION_MINOR 11
#ifndef LIBMDBX_API
#if defined(LIBMDBX_EXPORTS)

View File

@@ -3606,8 +3606,7 @@ public:
inline MDBX_error_t put(map_handle map, const slice &key, slice *value,
MDBX_put_flags_t flags) noexcept;
inline void put(map_handle map, const slice &key, slice value,
put_mode mode) noexcept;
inline void put(map_handle map, const slice &key, slice value, put_mode mode);
inline void insert(map_handle map, const slice &key, slice value);
inline value_result try_insert(map_handle map, const slice &key, slice value);
inline slice insert_reserve(map_handle map, const slice &key,
@@ -5166,7 +5165,7 @@ inline MDBX_error_t txn::put(map_handle map, const slice &key, slice *value,
}
inline void txn::put(map_handle map, const slice &key, slice value,
put_mode mode) noexcept {
put_mode mode) {
error::success_or_throw(put(map, key, &value, MDBX_put_flags_t(mode)));
}

View File

@@ -1,9 +1,9 @@
From 0c0df833879b3f815959c8bd9b6cc27cb9d71b9e Mon Sep 17 00:00:00 2001
From ce7b70a572cf77d8f37e5047d5d9c8361b2675ae Mon Sep 17 00:00:00 2001
From: Leonid Yuriev <leo@yuriev.ru>
Date: Tue, 3 Aug 2021 00:55:27 +0300
Date: Sun, 10 Oct 2021 15:31:40 +0300
Subject: [PATCH] package/libmdbx: new package (library/database).
This patch adds libmdbx v0.10.2:
This patch adds libmdbx v0.10.4:
- libmdbx is one of the fastest compact embeddable key-value ACID database.
- libmdbx has a specific set of properties and capabilities,
focused on creating unique lightweight solutions.
@@ -103,18 +103,18 @@ index 0000000000..d13f73938f
+ !BR2_TOOLCHAIN_GCC_AT_LEAST_4_4
diff --git a/package/libmdbx/libmdbx.hash b/package/libmdbx/libmdbx.hash
new file mode 100644
index 0000000000..c8a28ada34
index 0000000000..326cf57bb6
--- /dev/null
+++ b/package/libmdbx/libmdbx.hash
@@ -0,0 +1,5 @@
+# Hashes from: https://github.com/erthink/libmdbx/releases/
+sha256 745555704df76626a6612ad0c6bc6b1a66bfab98b9245b07dfb82640aa46d6fa libmdbx-amalgamated-0.10.2.tar.gz
+sha256 e11d5339a1e1cc34407898933b62a208936fd761a2cc31e11244d581d1d2b5d0 libmdbx-amalgamated-0.10.4.tar.gz
+
+# Locally calculated
+sha256 310fe25c858a9515fc8c8d7d1f24a67c9496f84a91e0a0e41ea9975b1371e569 LICENSE
diff --git a/package/libmdbx/libmdbx.mk b/package/libmdbx/libmdbx.mk
new file mode 100644
index 0000000000..60c5148625
index 0000000000..f38e4a533b
--- /dev/null
+++ b/package/libmdbx/libmdbx.mk
@@ -0,0 +1,42 @@
@@ -124,7 +124,7 @@ index 0000000000..60c5148625
+#
+################################################################################
+
+LIBMDBX_VERSION = 0.10.2
+LIBMDBX_VERSION = 0.10.4
+LIBMDBX_SOURCE = libmdbx-amalgamated-$(LIBMDBX_VERSION).tar.gz
+LIBMDBX_SITE = https://github.com/erthink/libmdbx/releases/download/v$(LIBMDBX_VERSION)
+LIBMDBX_SUPPORTS_IN_SOURCE_BUILD = NO
@@ -161,5 +161,5 @@ index 0000000000..60c5148625
+
+$(eval $(cmake-package))
--
2.32.0
2.33.0

View File

@@ -4848,8 +4848,8 @@ status_done:
* могла быть выделена, а затем пролита в одной из родительских
* транзакций. Поэтому пока помещаем её в retired-список, который будет
* фильтроваться относительно dirty- и spilled-списков родительских
* транзакций при коммите
* дочерних транзакций, либо же будет записан в GC в неизменном виде. */
* транзакций при коммите дочерних транзакций, либо же будет записан
* в GC в неизменном виде. */
goto retire;
}
@@ -8682,7 +8682,7 @@ retry_noaccount:
mdbx_pnl_check4assert(txn->tw.reclaimed_pglist,
txn->mt_next_pgno - MDBX_ENABLE_REFUND));
mdbx_tassert(txn, mdbx_dirtylist_check(txn));
if (unlikely(/* paranoia */ loop > ((MDBX_DEBUG > 0) ? 9 : 99))) {
if (unlikely(/* paranoia */ loop > ((MDBX_DEBUG > 0) ? 12 : 42))) {
mdbx_error("too more loops %u, bailout", loop);
rc = MDBX_PROBLEM;
goto bailout;
@@ -8743,8 +8743,6 @@ retry_noaccount:
/* If using records from GC which we have not yet deleted,
* now delete them and any we reserved for tw.reclaimed_pglist. */
while (cleaned_gc_id <= txn->tw.last_reclaimed) {
gc_rid = cleaned_gc_id;
settled = 0;
rc = mdbx_cursor_first(&couple.outer, &key, NULL);
if (unlikely(rc != MDBX_SUCCESS)) {
if (rc == MDBX_NOTFOUND)
@@ -8756,6 +8754,9 @@ retry_noaccount:
rc = MDBX_CORRUPTED;
goto bailout;
}
gc_rid = cleaned_gc_id;
settled = 0;
reused_gc_slot = 0;
cleaned_gc_id = unaligned_peek_u64(4, key.iov_base);
if (!MDBX_DISABLE_PAGECHECKS &&
unlikely(cleaned_gc_id < MIN_TXNID || cleaned_gc_id > MAX_TXNID)) {
@@ -9121,6 +9122,7 @@ retry_noaccount:
} else if (rc != MDBX_NOTFOUND)
goto bailout;
txn->tw.last_reclaimed = gc_rid;
cleaned_gc_id = gc_rid + 1;
}
reservation_gc_id = gc_rid--;
mdbx_trace("%s: take @%" PRIaTXN " from head-gc-id", dbg_prefix_mode,
@@ -9198,7 +9200,7 @@ retry_noaccount:
key.iov_len = sizeof(reservation_gc_id);
key.iov_base = &reservation_gc_id;
data.iov_len = (chunk + 1) * sizeof(pgno_t);
mdbx_trace("%s.reserve: %u [%u...%u] @%" PRIaTXN, dbg_prefix_mode, chunk,
mdbx_trace("%s.reserve: %u [%u...%u) @%" PRIaTXN, dbg_prefix_mode, chunk,
settled + 1, settled + chunk + 1, reservation_gc_id);
mdbx_prep_backlog(txn, &couple.outer, data.iov_len);
rc = mdbx_cursor_put(&couple.outer, &key, &data,
@@ -9374,15 +9376,21 @@ retry_noaccount:
}
mdbx_tassert(txn, rc == MDBX_SUCCESS);
if (unlikely(txn->tw.loose_count != 0 ||
filled_gc_slot !=
(txn->tw.lifo_reclaimed
? (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)
: 0))) {
mdbx_notice("** restart: reserve excess (filled-slot %u, loose-count %u)",
filled_gc_slot, txn->tw.loose_count);
if (unlikely(txn->tw.loose_count != 0)) {
mdbx_notice("** restart: got %u loose pages", txn->tw.loose_count);
goto retry;
}
if (unlikely(filled_gc_slot !=
(txn->tw.lifo_reclaimed
? (unsigned)MDBX_PNL_SIZE(txn->tw.lifo_reclaimed)
: 0))) {
const bool will_retry = loop < 9;
mdbx_notice("** %s: reserve excess (filled-slot %u, loop %u)",
will_retry ? "restart" : "ignore", filled_gc_slot, loop);
if (will_retry)
goto retry;
}
mdbx_tassert(txn,
txn->tw.lifo_reclaimed == NULL ||
@@ -10049,9 +10057,6 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
ts_3 = latency ? mdbx_osal_monotime() : 0;
if (likely(rc == MDBX_SUCCESS)) {
if (txn->mt_dbs[MAIN_DBI].md_flags & DBI_DIRTY)
txn->mt_dbs[MAIN_DBI].md_mod_txnid = txn->mt_txnid;
txn->mt_dbs[FREE_DBI].md_mod_txnid = txn->mt_txnid;
MDBX_meta meta, *head = mdbx_meta_head(env);
memcpy(meta.mm_magic_and_version, head->mm_magic_and_version, 8);
@@ -10064,7 +10069,15 @@ int mdbx_txn_commit_ex(MDBX_txn *txn, MDBX_commit_latency *latency) {
meta.mm_geo = txn->mt_geo;
meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI];
meta.mm_dbs[FREE_DBI].md_mod_txnid =
(txn->mt_dbistate[FREE_DBI] & DBI_DIRTY)
? txn->mt_txnid
: txn->mt_dbs[FREE_DBI].md_mod_txnid;
meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
meta.mm_dbs[MAIN_DBI].md_mod_txnid =
(txn->mt_dbistate[MAIN_DBI] & DBI_DIRTY)
? txn->mt_txnid
: txn->mt_dbs[MAIN_DBI].md_mod_txnid;
meta.mm_canary = txn->mt_canary;
mdbx_meta_set_txnid(env, &meta, txn->mt_txnid);
@@ -10111,7 +10124,8 @@ static int mdbx_validate_meta(MDBX_env *env, MDBX_meta *const meta,
const uint64_t magic_and_version =
unaligned_peek_u64(4, &meta->mm_magic_and_version);
if (unlikely(magic_and_version != MDBX_DATA_MAGIC &&
magic_and_version != MDBX_DATA_MAGIC_DEVEL)) {
magic_and_version != MDBX_DATA_MAGIC_LEGACY_COMPAT &&
magic_and_version != MDBX_DATA_MAGIC_LEGACY_DEVEL)) {
mdbx_error("meta[%u] has invalid magic/version %" PRIx64, meta_number,
magic_and_version);
return ((magic_and_version >> 8) != MDBX_MAGIC) ? MDBX_INVALID
@@ -10423,10 +10437,9 @@ __cold static int mdbx_read_header(MDBX_env *env, MDBX_meta *dest,
}
if (dest->mm_psize == 0 ||
((env->me_stuck_meta < 0)
? (!META_IS_STEADY(dest) &&
!meta_weak_acceptable(env, dest, lck_exclusive))
: false)) {
(env->me_stuck_meta < 0 &&
!(META_IS_STEADY(dest) ||
meta_weak_acceptable(env, dest, lck_exclusive)))) {
mdbx_error("%s", "no usable meta-pages, database is corrupted");
if (rc == MDBX_SUCCESS) {
/* TODO: try to restore the database by fully checking b-tree structure
@@ -10475,8 +10488,8 @@ __cold static MDBX_page *mdbx_meta_model(const MDBX_env *env, MDBX_page *model,
pages2pv(pv2pages(model_meta->mm_geo.shrink_pv)));
model_meta->mm_psize = env->me_psize;
model_meta->mm_flags = (uint16_t)env->me_flags;
model_meta->mm_flags |=
model_meta->mm_flags = (uint16_t)env->me_flags & DB_PERSISTENT_FLAGS;
model_meta->mm_flags =
MDBX_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */
model_meta->mm_dbs[FREE_DBI].md_root = P_INVALID;
model_meta->mm_dbs[MAIN_DBI].md_root = P_INVALID;
@@ -11050,11 +11063,12 @@ mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, intptr_t size_now,
(env->me_txn0 && env->me_txn0->mt_owner == mdbx_thread_self());
#if MDBX_DEBUG
if (growth_step < 0)
if (growth_step < 0) {
growth_step = 1;
if (shrink_threshold < 0)
shrink_threshold = 1;
#endif
if (shrink_threshold < 0)
shrink_threshold = 1;
}
#endif /* MDBX_DEBUG */
intptr_t reasonable_maxsize = 0;
bool need_unlock = false;
@@ -11699,65 +11713,80 @@ __cold static int mdbx_setup_dxb(MDBX_env *env, const int lck_rc,
mdbx_assert(env, lck_rc == MDBX_RESULT_TRUE);
/* exclusive mode */
MDBX_meta clone;
MDBX_meta const *const steady = mdbx_meta_steady(env);
const txnid_t steady_txnid = mdbx_meta_txnid_fluid(env, steady);
MDBX_meta steady_clone;
err = mdbx_validate_meta_copy(env, steady, &steady_clone);
if (unlikely(err != MDBX_SUCCESS)) {
mdbx_error("meta[%u] with %s txnid %" PRIaTXN
" is corrupted, %s needed",
bytes2pgno(env, (uint8_t *)steady - env->me_map), "steady",
steady_txnid, "manual recovery");
return MDBX_CORRUPTED;
}
MDBX_meta const *const head = mdbx_meta_head(env);
if (steady == head)
break;
const txnid_t steady_txnid = mdbx_meta_txnid_fluid(env, steady);
if (META_IS_STEADY(steady)) {
err = mdbx_validate_meta_copy(env, steady, &clone);
if (unlikely(err != MDBX_SUCCESS)) {
mdbx_error("meta[%u] with %s txnid %" PRIaTXN
" is corrupted, %s needed",
bytes2pgno(env, (uint8_t *)steady - env->me_map), "steady",
steady_txnid, "manual recovery");
return MDBX_CORRUPTED;
}
if (steady == head)
break;
}
const pgno_t pgno = bytes2pgno(env, (uint8_t *)head - env->me_map);
const txnid_t head_txnid = mdbx_meta_txnid_fluid(env, head);
MDBX_meta head_clone;
const bool head_valid =
mdbx_validate_meta_copy(env, head, &head_clone) == MDBX_SUCCESS;
mdbx_validate_meta_copy(env, head, &clone) == MDBX_SUCCESS;
mdbx_assert(env, !META_IS_STEADY(steady) || head_txnid != steady_txnid);
if (unlikely(!head_valid)) {
mdbx_error("meta[%u] with %s txnid %" PRIaTXN
" is corrupted, %s needed",
pgno, "last", head_txnid, "rollback");
if (unlikely(!META_IS_STEADY(steady))) {
mdbx_error("%s for open or automatic rollback, %s",
"there are no suitable meta-pages",
"manual recovery is required");
return MDBX_CORRUPTED;
}
mdbx_warning("meta[%u] with last txnid %" PRIaTXN
" is corrupted, rollback needed",
pgno, head_txnid);
goto purge_meta_head;
}
mdbx_assert(env, head_txnid != head_txnid);
if (head_txnid == steady_txnid)
break;
mdbx_assert(env, META_IS_STEADY(steady) && !META_IS_STEADY(head));
if (meta_bootid_match(head)) {
mdbx_warning(
"opening after an unclean shutdown, but boot-id(%016" PRIx64
"-%016" PRIx64
") is MATCH: rollback NOT needed, steady-sync NEEDED%s",
bootid.x, bootid.y,
(env->me_flags & MDBX_RDONLY) ? ", but unable in read-only mode"
: "");
if (env->me_flags & MDBX_RDONLY)
if (env->me_flags & MDBX_RDONLY) {
mdbx_error("%s, but boot-id(%016" PRIx64 "-%016" PRIx64 ") is MATCH: "
"rollback NOT needed, steady-sync NEEDED%s",
"opening after an unclean shutdown", bootid.x, bootid.y,
", but unable in read-only mode");
return MDBX_WANNA_RECOVERY;
meta = head_clone;
}
mdbx_warning("%s, but boot-id(%016" PRIx64 "-%016" PRIx64 ") is MATCH: "
"rollback NOT needed, steady-sync NEEDED%s",
"opening after an unclean shutdown", bootid.x, bootid.y,
"");
meta = clone;
atomic_store32(&env->me_lck->mti_unsynced_pages, meta.mm_geo.next,
mo_Relaxed);
break;
}
if (unlikely(!META_IS_STEADY(steady))) {
mdbx_error("%s, but %s for automatic rollback: %s",
"opening after an unclean shutdown",
"there are no suitable meta-pages",
"manual recovery is required");
return MDBX_CORRUPTED;
}
if (env->me_flags & MDBX_RDONLY) {
mdbx_error("rollback needed: (from head %" PRIaTXN
" to steady %" PRIaTXN "), but unable in read-only mode",
head_txnid, steady_txnid);
mdbx_error("%s and rollback needed: (from head %" PRIaTXN
" to steady %" PRIaTXN ")%s",
"opening after an unclean shutdown", head_txnid,
steady_txnid, ", but unable in read-only mode");
return MDBX_WANNA_RECOVERY;
}
purge_meta_head:
mdbx_notice("rollback: purge%s meta[%u] with%s txnid %" PRIaTXN,
mdbx_notice("%s and doing automatic rollback: "
"purge%s meta[%u] with%s txnid %" PRIaTXN,
"opening after an unclean shutdown",
head_valid ? "" : " invalid", pgno, head_valid ? " weak" : "",
head_txnid);
mdbx_ensure(env, META_IS_STEADY(steady));
err = mdbx_override_meta(env, pgno, 0, head_valid ? head : steady);
if (err) {
mdbx_error("rollback: overwrite meta[%u] with txnid %" PRIaTXN
@@ -12226,7 +12255,7 @@ __cold int mdbx_env_turn_for_recovery(MDBX_env *env, unsigned target) {
} else {
txnid_t txnid = mdbx_meta_txnid_stable(env, &meta);
if (new_txnid <= txnid)
safe64_txnid_next(new_txnid);
new_txnid = safe64_txnid_next(txnid);
}
}
@@ -13088,10 +13117,11 @@ mdbx_page_get_ex(MDBX_cursor *const mc, const pgno_t pgno,
mdbx_tassert(txn, front <= txn->mt_front);
if (unlikely(pgno >= txn->mt_next_pgno)) {
mdbx_error("page #%" PRIaPGNO " beyond next-pgno", pgno);
notfound:
ret.page = nullptr;
corrupted:
mc->mc_txn->mt_flags |= MDBX_TXN_ERROR;
ret.err = MDBX_PAGE_NOTFOUND;
bailout:
mc->mc_txn->mt_flags |= MDBX_TXN_ERROR;
return ret;
}
@@ -13131,33 +13161,36 @@ dirty:
"mismatch actual pgno (%" PRIaPGNO ") != expected (%" PRIaPGNO
")\n",
ret.page->mp_pgno, pgno);
goto corrupted;
goto notfound;
}
#if !MDBX_DISABLE_PAGECHECKS
if (unlikely(ret.page->mp_flags & P_ILL_BITS)) {
bad_page(ret.page, "invalid page's flags (%u)\n", ret.page->mp_flags);
goto corrupted;
ret.err =
bad_page(ret.page, "invalid page's flags (%u)\n", ret.page->mp_flags);
goto bailout;
}
if (unlikely(ret.page->mp_txnid > front) &&
(ret.page->mp_txnid > txn->mt_front || front < txn->mt_txnid)) {
bad_page(ret.page,
"invalid page txnid (%" PRIaTXN ") for %s' txnid (%" PRIaTXN ")\n",
ret.page->mp_txnid,
(front == txn->mt_front && front != txn->mt_txnid) ? "front-txn"
: "parent-page",
front);
goto corrupted;
unlikely(ret.page->mp_txnid > txn->mt_front || front < txn->mt_txnid)) {
ret.err = bad_page(
ret.page,
"invalid page txnid (%" PRIaTXN ") for %s' txnid (%" PRIaTXN ")\n",
ret.page->mp_txnid,
(front == txn->mt_front && front != txn->mt_txnid) ? "front-txn"
: "parent-page",
front);
goto bailout;
}
if (unlikely((ret.page->mp_upper < ret.page->mp_lower ||
((ret.page->mp_lower | ret.page->mp_upper) & 1) ||
PAGEHDRSZ + ret.page->mp_upper > env->me_psize) &&
!IS_OVERFLOW(ret.page))) {
bad_page(ret.page, "invalid page lower(%u)/upper(%u) with limit (%u)\n",
ret.page->mp_lower, ret.page->mp_upper, page_space(env));
goto corrupted;
ret.err =
bad_page(ret.page, "invalid page lower(%u)/upper(%u) with limit (%u)\n",
ret.page->mp_lower, ret.page->mp_upper, page_space(env));
goto bailout;
}
#endif /* !MDBX_DISABLE_PAGECHECKS */
@@ -13395,12 +13428,14 @@ __hot static int mdbx_page_search(MDBX_cursor *mc, const MDBX_val *key,
: mc->mc_txn->mt_txnid;
MDBX_txn *scan = mc->mc_txn;
do
if (scan->mt_dbistate[mc->mc_dbi] & DBI_DIRTY) {
if ((scan->mt_flags & MDBX_TXN_DIRTY) &&
(mc->mc_dbi == MAIN_DBI ||
(scan->mt_dbistate[mc->mc_dbi] & DBI_DIRTY))) {
pp_txnid = scan->mt_front;
break;
}
while ((scan = scan->mt_parent) != nullptr);
if (unlikely((rc = mdbx_page_get(mc, root, &mc->mc_pg[0], pp_txnid) != 0)))
while (unlikely((scan = scan->mt_parent) != nullptr));
if (unlikely((rc = mdbx_page_get(mc, root, &mc->mc_pg[0], pp_txnid)) != 0))
return rc;
}
@@ -19240,7 +19275,7 @@ __cold int mdbx_dbi_dupsort_depthmask(MDBX_txn *txn, MDBX_dbi dbi,
break;
case F_DUPDATA | F_SUBDATA:
/* sub-tree */
*mask |= 1 << unaligned_peek_u16(1, &db->md_depth);
*mask |= 1 << UNALIGNED_PEEK_16(db, MDBX_db, md_depth);
break;
default:
mdbx_error("wrong node-flags %u", flags);
@@ -19686,7 +19721,7 @@ static int dbi_open(MDBX_txn *txn, const char *table_name, unsigned user_flags,
/* Got info, register DBI in this txn */
memset(txn->mt_dbxs + slot, 0, sizeof(MDBX_dbx));
txn->mt_dbs[slot] = *(MDBX_db *)data.iov_base;
memcpy(&txn->mt_dbs[slot], data.iov_base, sizeof(MDBX_db));
env->me_dbflags[slot] = 0;
rc = mdbx_dbi_bind(txn, slot, user_flags, keycmp, datacmp);
if (unlikely(rc != MDBX_SUCCESS)) {

View File

@@ -384,7 +384,7 @@ MDBX_MAYBE_UNUSED static
#define MDBX_MAGIC UINT64_C(/* 56-bit prime */ 0x59659DBDEF4C11)
/* FROZEN: The version number for a database's datafile format. */
#define MDBX_DATA_VERSION 2
#define MDBX_DATA_VERSION 3
/* The version number for a database's lockfile format. */
#define MDBX_LOCK_VERSION 4
@@ -437,7 +437,7 @@ typedef uint16_t indx_t;
/*----------------------------------------------------------------------------*/
/* Core structures for database and shared memory (i.e. format definition) */
#pragma pack(push, 1)
#pragma pack(push, 4)
/* Information about a single database in the environment. */
typedef struct MDBX_db {
@@ -517,6 +517,8 @@ typedef struct MDBX_meta {
} MDBX_meta;
#pragma pack(1)
/* Common header for all page types. The page type depends on mp_flags.
*
* P_BRANCH and P_LEAF pages have unsorted 'MDBX_node's at the end, with
@@ -787,7 +789,11 @@ typedef struct MDBX_lockinfo {
#define MDBX_DATA_MAGIC \
((MDBX_MAGIC << 8) + MDBX_PNL_ASCENDING * 64 + MDBX_DATA_VERSION)
#define MDBX_DATA_MAGIC_DEVEL ((MDBX_MAGIC << 8) + 255)
#define MDBX_DATA_MAGIC_LEGACY_COMPAT \
((MDBX_MAGIC << 8) + MDBX_PNL_ASCENDING * 64 + 2)
#define MDBX_DATA_MAGIC_LEGACY_DEVEL ((MDBX_MAGIC << 8) + 255)
#define MDBX_LOCK_MAGIC ((MDBX_MAGIC << 8) + MDBX_LOCK_VERSION)

View File

@@ -19,7 +19,7 @@
namespace chrono {
#pragma pack(push, 1)
#pragma pack(push, 4)
typedef union time {
uint64_t fixedpoint;

View File

@@ -135,8 +135,6 @@ inline bool parse_option_intptr(int argc, char *const argv[], int &narg,
//-----------------------------------------------------------------------------
#pragma pack(push, 1)
struct keygen_params_pod {
/* Параметры генератора пар key-value. Также может быть полезным описание
* алгоритма генерации в keygen.h
@@ -307,8 +305,6 @@ struct actor_config_pod {
wait4id(wait4id) {}
};
#pragma pack(pop)
extern const struct option_verb mode_bits[];
extern const struct option_verb table_bits[];
void dump(const char *title = "config-dump: ");

View File

@@ -128,6 +128,8 @@ void testcase_nested::push_txn() {
std::move(speculum_snapshot));
log_verbose("begin level#%zu txn #%" PRIu64 ", flags 0x%x, serial %" PRIu64,
stack.size(), mdbx_txn_id(nested_txn), flags, serial);
if (!dbi && stack.size() == 1)
dbi = db_table_open(true);
}
bool testcase_nested::pop_txn(bool abort) {
@@ -139,6 +141,9 @@ bool testcase_nested::pop_txn(bool abort) {
log_verbose(
"abort level#%zu txn #%" PRIu64 ", undo serial %" PRIu64 " <- %" PRIu64,
stack.size(), mdbx_txn_id(txn), serial, std::get<1>(stack.top()));
if (dbi > 0 && stack.size() == 1 &&
is_handle_created_in_current_txn(dbi, txn))
dbi = 0;
int err = mdbx_txn_abort(txn);
if (unlikely(err != MDBX_SUCCESS))
failure_perror("mdbx_txn_abort()", err);

365
test/stochastic_small.sh Executable file
View File

@@ -0,0 +1,365 @@
#!/usr/bin/env bash
if ! which make cc c++ tee >/dev/null; then
echo "Please install the following prerequisites: make cc c++ tee banner" >&2
exit 1
fi
LIST=--hill
FROM=1
UPTO=9999
MONITOR=
LOOPS=
SKIP_MAKE=no
BANNER="$(which banner 2>/dev/null | echo echo)"
UNAME="$(uname -s 2>/dev/null || echo Unknown)"
DB_UPTO_MB=17408
while [ -n "$1" ]
do
case "$1" in
--help)
echo "--multi Engage multi-process test scenario (default)"
echo "--single Execute series of single-process tests (for QEMU, etc)"
echo "--with-valgrind Run tests under Valgrind's memcheck tool"
echo "--skip-make Don't (re)build libmdbx and test's executable"
echo "--from NN Start iterating from the NN ops per test case"
echo "--upto NN Don't run tests with more than NN ops per test case"
echo "--loops NN Stop after the NN loops"
echo "--dir PATH Specifies directory for test DB and other files (it will be cleared)"
echo "--db-upto-mb NN Limits upper size of test DB to the NN megabytes"
echo "--help Print this usage help and exit"
exit -2
;;
--multi)
LIST=basic
;;
--single)
LIST="--nested --hill --append --ttl --copy"
;;
--with-valgrind)
echo " NOTE: Valgrind could produce some false-positive warnings"
echo " in multi-process environment with shared memory."
echo " For instance, when the process 'A' explicitly marks a memory"
echo " region as 'undefined', the process 'B' fill it,"
echo " and after this process 'A' read such region, etc."
MONITOR="valgrind --trace-children=yes --log-file=valgrind-%p.log --leak-check=full --track-origins=yes --error-exitcode=42 --suppressions=test/valgrind_suppress.txt"
rm -f valgrind-*.log
;;
--skip-make)
SKIP_MAKE=yes
;;
--from)
FROM=$(($2))
if [ -z "$FROM" -o "$FROM" -lt 1 ]; then
echo "Invalid value '$FROM' for --from option"
exit -2
fi
shift
;;
--upto)
UPTO=$(($2))
if [ -z "$UPTO" -o "$UPTO" -lt 1 ]; then
echo "Invalid value '$UPTO' for --upto option"
exit -2
fi
shift
;;
--loops)
LOOPS=$(($2))
if [ -z "$LOOPS" -o "$LOOPS" -lt 1 -o "$LOOPS" -gt 99 ]; then
echo "Invalid value '$LOOPS' for --loops option"
exit -2
fi
shift
;;
--dir)
TESTDB_DIR="$2"
if [ -z "$TESTDB_DIR" ]; then
echo "Invalid value '$TESTDB_DIR' for --dir option"
exit -2
fi
shift
;;
--db-upto-mb)
DB_UPTO_MB=$(($2))
if [ -z "$DB_UPTO_MB" -o "$DB_UPTO_MB" -lt 1 -o "$DB_UPTO_MB" -gt 4194304 ]; then
echo "Invalid value '$DB_UPTO_MB' for --db-upto-mb option"
exit -2
fi
shift
;;
*)
echo "Unknown option '$1'"
exit -2
;;
esac
shift
done
set -euo pipefail
if [ -z "$MONITOR" ]; then
if which time >/dev/null 2>/dev/null; then
MONITOR=$(which time)
if $MONITOR -o /dev/stdout true >/dev/null 2>/dev/null; then
MONITOR="$MONITOR -o /dev/stdout"
fi
fi
export MALLOC_CHECK_=7 MALLOC_PERTURB_=42
fi
###############################################################################
# 1. clean data from prev runs and examine available RAM
WANNA_MOUNT=0
case ${UNAME} in
Linux)
MAKE=make
if [ -z "${TESTDB_DIR:-}" ]; then
for old_test_dir in $(ls -d /dev/shm/mdbx-test.[0-9]* 2>/dev/null); do
rm -rf $old_test_dir
done
TESTDB_DIR="/dev/shm/mdbx-test.$$"
fi
mkdir -p $TESTDB_DIR && rm -f $TESTDB_DIR/*
if LC_ALL=C free | grep -q -i available; then
ram_avail_mb=$(($(LC_ALL=C free | grep -i Mem: | tr -s [:blank:] ' ' | cut -d ' ' -f 7) / 1024))
else
ram_avail_mb=$(($(LC_ALL=C free | grep -i Mem: | tr -s [:blank:] ' ' | cut -d ' ' -f 4) / 1024))
fi
;;
FreeBSD)
MAKE=gmake
if [ -z "${TESTDB_DIR:-}" ]; then
for old_test_dir in $(ls -d /tmp/mdbx-test.[0-9]* 2>/dev/null); do
umount $old_test_dir && rm -r $old_test_dir
done
TESTDB_DIR="/tmp/mdbx-test.$$"
rm -rf $TESTDB_DIR && mkdir -p $TESTDB_DIR
WANNA_MOUNT=1
else
mkdir -p $TESTDB_DIR && rm -f $TESTDB_DIR/*
fi
ram_avail_mb=$(($(LC_ALL=C vmstat -s | grep -ie '[0-9] pages free$' | cut -d p -f 1) * ($(LC_ALL=C vmstat -s | grep -ie '[0-9] bytes per page$' | cut -d b -f 1) / 1024) / 1024))
;;
Darwin)
MAKE=make
if [ -z "${TESTDB_DIR:-}" ]; then
for vol in $(ls -d /Volumes/mdx[0-9]*[0-9]tst 2>/dev/null); do
disk=$(mount | grep $vol | cut -d ' ' -f 1)
echo "umount: volume $vol disk $disk"
hdiutil unmount $vol -force
hdiutil detach $disk
done
TESTDB_DIR="/Volumes/mdx$$tst"
WANNA_MOUNT=1
else
mkdir -p $TESTDB_DIR && rm -f $TESTDB_DIR/*
fi
pagesize=$(($(LC_ALL=C vm_stat | grep -o 'page size of [0-9]\+ bytes' | cut -d' ' -f 4) / 1024))
freepages=$(LC_ALL=C vm_stat | grep '^Pages free:' | grep -o '[0-9]\+\.$' | cut -d'.' -f 1)
ram_avail_mb=$((pagesize * freepages / 1024))
echo "pagesize ${pagesize}K, freepages ${freepages}, ram_avail_mb ${ram_avail_mb}"
;;
*)
echo "FIXME: ${UNAME} not supported by this script"
exit 2
;;
esac
rm -f ${TESTDB_DIR}/*
###############################################################################
# 2. estimate reasonable RAM space for test-db
echo "=== ${ram_avail_mb}M RAM available"
ram_reserve4logs_mb=1234
if [ $ram_avail_mb -lt $ram_reserve4logs_mb ]; then
echo "=== At least ${ram_reserve4logs_mb}Mb RAM required"
exit 3
fi
#
# В режимах отличных от MDBX_WRITEMAP изменения до записи в файл
# будут накапливаться в памяти, что может потребовать свободной
# памяти размером с БД. Кроме этого, в тест входит сценарий
# создания копия БД на ходу. Поэтому БД не может быть больше 1/3
# от доступной памяти. Однако, следует учесть что malloc() будет
# не сразу возвращать выделенную память системе, а также
# предусмотреть места для логов.
#
# In non-MDBX_WRITEMAP modes, updates (dirty pages) will
# accumulate in memory before writing to the disk, which may
# require a free memory up to the size of a whole database. In
# addition, the test includes a script create a copy of the
# database on the go. Therefore, the database cannot be more 1/3
# of available memory. Moreover, should be taken into account
# that malloc() will not return the allocated memory to the
# system immediately, as well some space is required for logs.
#
db_size_mb=$(((ram_avail_mb - ram_reserve4logs_mb) / 4))
if [ $db_size_mb -gt $DB_UPTO_MB ]; then
db_size_mb=$DB_UPTO_MB
fi
echo "=== use ${db_size_mb}M for DB"
###############################################################################
# 3. Create test-directory in ramfs/tmpfs, i.e. create/format/mount if required
case ${UNAME} in
Linux)
;;
FreeBSD)
if [[ WANNA_MOUNT ]]; then
mount -t tmpfs tmpfs $TESTDB_DIR
fi
;;
Darwin)
if [[ WANNA_MOUNT ]]; then
ramdisk_size_mb=$((42 + db_size_mb * 2 + ram_reserve4logs_mb))
number_of_sectors=$((ramdisk_size_mb * 2048))
ramdev=$(hdiutil attach -nomount ram://${number_of_sectors})
diskutil erasevolume ExFAT "mdx$$tst" ${ramdev}
fi
;;
*)
echo "FIXME: ${UNAME} not supported by this script"
exit 2
;;
esac
###############################################################################
# 4. build the test executables
if [ "$SKIP_MAKE" != "yes" ]; then
${MAKE} -j$(which nproc >/dev/null 2>/dev/null && nproc || echo 2) build-test
fi
###############################################################################
# 5. run stochastic iterations
if which lz4 >/dev/null; then
function logger {
lz4 > ${TESTDB_DIR}/long.log.lz4
}
elif which gzip >/dev/null; then
function logger {
gzip > ${TESTDB_DIR}/long.log.gz
}
else
function logger {
cat > ${TESTDB_DIR}/long.log
}
fi
syncmodes=("" ,+nosync-safe ,+nosync-utterly)
options=(writemap coalesce lifo notls perturb)
function join { local IFS="$1"; shift; echo "$*"; }
function bits2options {
local bits=$1
local i
local list=()
for ((i = 0; i < ${#options[@]}; ++i)); do
list[$i]=$( (( (bits & (1 << i)) != 0 )) && echo -n '+' || echo -n '-'; echo ${options[$i]})
done
join , ${list[@]}
}
function failed {
echo "FAILED" >&2
exit 1
}
function check_deep {
if [ "$case" = "basic" -o "$case" = "--hill" ]; then
tee >(logger) | grep -e reach -e achieve
else
logger
fi
}
function probe {
echo "----------------------------------------------- $(date)"
echo "${caption}"
rm -f ${TESTDB_DIR}/* || failed
for case in $LIST
do
echo "Run ./mdbx_test ${speculum} --random-writemap=no --ignore-dbfull --repeat=1 --pathname=${TESTDB_DIR}/long.db --cleanup-after=no $@ $case"
${MONITOR} ./mdbx_test ${speculum} --random-writemap=no --ignore-dbfull --repeat=1 --pathname=${TESTDB_DIR}/long.db --cleanup-before=yes --cleanup-after=no "$@" $case | check_deep \
&& ${MONITOR} ./mdbx_chk ${TESTDB_DIR}/long.db | tee ${TESTDB_DIR}/long-chk.log \
&& ([ ! -e ${TESTDB_DIR}/long.db-copy ] || ${MONITOR} ./mdbx_chk ${TESTDB_DIR}/long.db-copy | tee ${TESTDB_DIR}/long-chk-copy.log) \
|| failed
done
}
#------------------------------------------------------------------------------
count=0
loop=0
cases='?'
for ((wbatch=FROM; wbatch<=UPTO; ++wbatch)); do
if [ -n "$LOOPS" ] && [ $loop -ge "$LOOPS" ]; then echo "The '--loops $LOOPS' limit reached"; break; fi
echo "======================================================================="
speculum=$([ $wbatch -le 1000 ] && echo '--speculum' || true)
nops=$((wbatch/7 + 1))
for ((rep=1; rep < 11; ++rep)); do
echo "======================================================================="
${BANNER} "$nops / $wbatch, repeat $rep"
subcase=0
for ((bits=2**${#options[@]}; --bits >= 0; )); do
seed=$(($(date +%s) + RANDOM))
split=30
caption="Probe #$((++count)) int-key,int-data, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=+key.integer,+data.integer --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=max \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
split=24
caption="Probe #$((++count)) int-key,int-data, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=+key.integer,+data.integer --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=max \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
split=16
caption="Probe #$((++count)) int-key,w/o-dups, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=+key.integer,-data.dups --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=1111 \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
caption="Probe #$((++count)) int-key,int-data, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=+key.integer,+data.integer --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=max \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
caption="Probe #$((++count)) w/o-dups, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=-data.dups --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=1111 \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
split=4
caption="Probe #$((++count)) int-key,w/o-dups, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=+key.integer,-data.dups --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=1111 \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
caption="Probe #$((++count)) int-key,int-data, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=+key.integer,+data.integer --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=max \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
caption="Probe #$((++count)) w/o-dups, split=${split}, case $((++subcase)) of ${cases}" probe \
--pagesize=4K --size-upper-upto=${db_size_mb}M --table=-data.dups --keygen.split=${split} --keylen.min=min --keylen.max=max --datalen.min=min --datalen.max=1111 \
--nops=$nops --batch.write=$wbatch --mode=$(bits2options $bits)${syncmodes[count%3]} \
--keygen.seed=${seed}
done # options
cases="${subcase}"
done # repeats
loop=$((loop + 1))
if [ -n "$LOOPS" ] && [ $loop -ge "$LOOPS" ]; then break; fi
done # wbatch
echo "=== ALL DONE ====================== $(date)"

View File

@@ -477,6 +477,15 @@ void testcase::update_canary(uint64_t increment) {
log_trace("<< update_canary: sequence = %" PRIu64, canary_now.y);
}
bool testcase::is_handle_created_in_current_txn(const MDBX_dbi handle,
MDBX_txn *txn) {
unsigned flags, state;
int err = mdbx_dbi_flags_ex(txn, handle, &flags, &state);
if (unlikely(err != MDBX_SUCCESS))
failure_perror("mdbx_dbi_flags_ex()", err);
return (state & MDBX_DBI_CREAT) != 0;
}
int testcase::db_open__begin__table_create_open_clean(MDBX_dbi &handle) {
db_open();
@@ -484,6 +493,9 @@ int testcase::db_open__begin__table_create_open_clean(MDBX_dbi &handle) {
for (;;) {
txn_begin(false);
handle = db_table_open(true);
if (is_handle_created_in_current_txn(handle, txn_guard.get()))
return MDBX_SUCCESS;
db_table_clear(handle);
err = breakable_commit();
if (likely(err == MDBX_SUCCESS)) {

View File

@@ -268,6 +268,7 @@ protected:
void db_table_clear(MDBX_dbi handle, MDBX_txn *txn = nullptr);
void db_table_close(MDBX_dbi handle);
int db_open__begin__table_create_open_clean(MDBX_dbi &handle);
bool is_handle_created_in_current_txn(const MDBX_dbi handle, MDBX_txn *txn);
bool wait4start();
void report(size_t nops_done);