mdbx: интенсивное использование __restrict, в том числе при определении элементов структур.

This commit is contained in:
Леонид Юрьев (Leonid Yuriev) 2023-11-02 23:15:38 +03:00
parent 96504bf338
commit 903d964f4d
2 changed files with 36 additions and 30 deletions

View File

@ -2265,7 +2265,7 @@ static void pnl_free(MDBX_PNL pl) {
}
/* Shrink the PNL to the default size if it has grown larger */
static void pnl_shrink(MDBX_PNL *ppl) {
static void pnl_shrink(MDBX_PNL __restrict *__restrict ppl) {
assert(pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) >= MDBX_PNL_INITIAL &&
pnl_bytes2size(pnl_size2bytes(MDBX_PNL_INITIAL)) <
MDBX_PNL_INITIAL * 3 / 2);
@ -2288,7 +2288,8 @@ static void pnl_shrink(MDBX_PNL *ppl) {
}
/* Grow the PNL to the size growed to at least given size */
static int pnl_reserve(MDBX_PNL *ppl, const size_t wanna) {
static int pnl_reserve(MDBX_PNL __restrict *__restrict ppl,
const size_t wanna) {
const size_t allocated = MDBX_PNL_ALLOCLEN(*ppl);
assert(MDBX_PNL_GETSIZE(*ppl) <= MDBX_PGL_LIMIT &&
MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_GETSIZE(*ppl));
@ -2318,8 +2319,8 @@ static int pnl_reserve(MDBX_PNL *ppl, const size_t wanna) {
}
/* Make room for num additional elements in an PNL */
static __always_inline int __must_check_result pnl_need(MDBX_PNL *ppl,
size_t num) {
static __always_inline int __must_check_result
pnl_need(MDBX_PNL __restrict *__restrict ppl, size_t num) {
assert(MDBX_PNL_GETSIZE(*ppl) <= MDBX_PGL_LIMIT &&
MDBX_PNL_ALLOCLEN(*ppl) >= MDBX_PNL_GETSIZE(*ppl));
assert(num <= MDBX_PGL_LIMIT);
@ -2328,7 +2329,7 @@ static __always_inline int __must_check_result pnl_need(MDBX_PNL *ppl,
: pnl_reserve(ppl, wanna);
}
static __always_inline void pnl_xappend(MDBX_PNL pl, pgno_t pgno) {
static __always_inline void pnl_xappend(__restrict MDBX_PNL pl, pgno_t pgno) {
assert(MDBX_PNL_GETSIZE(pl) < MDBX_PNL_ALLOCLEN(pl));
if (AUDIT_ENABLED()) {
for (size_t i = MDBX_PNL_GETSIZE(pl); i > 0; --i)
@ -2339,10 +2340,8 @@ static __always_inline void pnl_xappend(MDBX_PNL pl, pgno_t pgno) {
}
/* Append an pgno range onto an unsorted PNL */
__always_inline static int __must_check_result pnl_append_range(bool spilled,
MDBX_PNL *ppl,
pgno_t pgno,
size_t n) {
__always_inline static int __must_check_result pnl_append_range(
bool spilled, __restrict MDBX_PNL *ppl, pgno_t pgno, size_t n) {
assert(n > 0);
int rc = pnl_need(ppl, n);
if (unlikely(rc != MDBX_SUCCESS))
@ -2369,7 +2368,7 @@ __always_inline static int __must_check_result pnl_append_range(bool spilled,
}
/* Append an pgno range into the sorted PNL */
__hot static int __must_check_result pnl_insert_range(MDBX_PNL *ppl,
__hot static int __must_check_result pnl_insert_range(__restrict MDBX_PNL *ppl,
pgno_t pgno, size_t n) {
assert(n > 0);
int rc = pnl_need(ppl, n);
@ -2673,7 +2672,8 @@ static void txl_free(MDBX_TXL tl) {
osal_free(tl - 1);
}
static int txl_reserve(MDBX_TXL *ptl, const size_t wanna) {
static int txl_reserve(MDBX_TXL __restrict *__restrict ptl,
const size_t wanna) {
const size_t allocated = (size_t)MDBX_PNL_ALLOCLEN(*ptl);
assert(MDBX_PNL_GETSIZE(*ptl) <= MDBX_TXL_MAX &&
MDBX_PNL_ALLOCLEN(*ptl) >= MDBX_PNL_GETSIZE(*ptl));
@ -2702,8 +2702,8 @@ static int txl_reserve(MDBX_TXL *ptl, const size_t wanna) {
return MDBX_ENOMEM;
}
static __always_inline int __must_check_result txl_need(MDBX_TXL *ptl,
size_t num) {
static __always_inline int __must_check_result
txl_need(MDBX_TXL __restrict *__restrict ptl, size_t num) {
assert(MDBX_PNL_GETSIZE(*ptl) <= MDBX_TXL_MAX &&
MDBX_PNL_ALLOCLEN(*ptl) >= MDBX_PNL_GETSIZE(*ptl));
assert(num <= MDBX_PGL_LIMIT);
@ -2712,7 +2712,7 @@ static __always_inline int __must_check_result txl_need(MDBX_TXL *ptl,
: txl_reserve(ptl, wanna);
}
static __always_inline void txl_xappend(MDBX_TXL tl, txnid_t id) {
static __always_inline void txl_xappend(MDBX_TXL __restrict tl, txnid_t id) {
assert(MDBX_PNL_GETSIZE(tl) < MDBX_PNL_ALLOCLEN(tl));
tl[0] += 1;
MDBX_PNL_LAST(tl) = id;
@ -2724,7 +2724,8 @@ static void txl_sort(MDBX_TXL tl) {
txnid_sort(MDBX_PNL_BEGIN(tl), MDBX_PNL_END(tl));
}
static int __must_check_result txl_append(MDBX_TXL *ptl, txnid_t id) {
static int __must_check_result txl_append(MDBX_TXL __restrict *ptl,
txnid_t id) {
if (unlikely(MDBX_PNL_GETSIZE(*ptl) == MDBX_PNL_ALLOCLEN(*ptl))) {
int rc = txl_need(ptl, MDBX_TXL_GRANULATE);
if (unlikely(rc != MDBX_SUCCESS))
@ -4582,7 +4583,8 @@ static void refund_loose(MDBX_txn *txn) {
/* Filter-out loose chain & dispose refunded pages. */
unlink_loose:
for (MDBX_page **link = &txn->tw.loose_pages; *link;) {
for (MDBX_page *__restrict *__restrict link = &txn->tw.loose_pages;
*link;) {
MDBX_page *dp = *link;
tASSERT(txn, dp->mp_flags == P_LOOSE);
MDBX_ASAN_UNPOISON_MEMORY_REGION(&mp_next(dp), sizeof(MDBX_page *));

View File

@ -93,6 +93,10 @@
disable : 5105) /* winbase.h(9531): warning C5105: macro expansion \
producing 'defined' has undefined behavior */
#endif
#if _MSC_VER < 1920
/* avoid "error C2219: syntax error: type qualifier must be after '*'" */
#define __restrict
#endif
#if _MSC_VER > 1930
#pragma warning(disable : 6235) /* <expression> is always a constant */
#pragma warning(disable : 6237) /* <expression> is never evaluated and might \
@ -1193,7 +1197,7 @@ struct MDBX_txn {
MDBX_db *mt_dbs;
#if MDBX_ENABLE_DBI_SPARSE
unsigned *mt_dbi_sparse;
unsigned *__restrict mt_dbi_sparse;
#endif /* MDBX_ENABLE_DBI_SPARSE */
/* Non-shared DBI state flags inside transaction */
@ -1205,10 +1209,10 @@ struct MDBX_txn {
#define DBI_OLDEN 0x40 /* Handle was closed/reopened outside txn */
#define DBI_LINDO 0x80 /* Lazy initialization done for DBI-slot */
/* Array of non-shared txn's flags of DBI */
uint8_t *mt_dbi_state;
uint8_t *__restrict mt_dbi_state;
/* Array of sequence numbers for each DB handle. */
uint32_t *mt_dbi_seqs;
uint32_t *__restrict mt_dbi_seqs;
MDBX_cursor **mt_cursors;
MDBX_canary mt_canary;
@ -1222,8 +1226,8 @@ struct MDBX_txn {
struct {
meta_troika_t troika;
/* In write txns, array of cursors for each DB */
MDBX_PNL relist; /* Reclaimed GC pages */
txnid_t last_reclaimed; /* ID of last used record */
MDBX_PNL __restrict relist; /* Reclaimed GC pages */
txnid_t last_reclaimed; /* ID of last used record */
#if MDBX_ENABLE_REFUND
pgno_t loose_refund_wl /* FIXME: describe */;
#endif /* MDBX_ENABLE_REFUND */
@ -1235,14 +1239,14 @@ struct MDBX_txn {
* dirtylist into mt_parent after freeing hidden mt_parent pages. */
size_t dirtyroom;
/* For write txns: Modified pages. Sorted when not MDBX_WRITEMAP. */
MDBX_dpl *dirtylist;
MDBX_dpl *__restrict dirtylist;
/* The list of reclaimed txns from GC */
MDBX_TXL lifo_reclaimed;
MDBX_TXL __restrict lifo_reclaimed;
/* The list of pages that became unused during this transaction. */
MDBX_PNL retired_pages;
MDBX_PNL __restrict retired_pages;
/* The list of loose pages that became unused and may be reused
* in this transaction, linked through `mp_next`. */
MDBX_page *loose_pages;
MDBX_page *__restrict loose_pages;
/* Number of loose pages (tw.loose_pages) */
size_t loose_count;
union {
@ -1251,7 +1255,7 @@ struct MDBX_txn {
/* The sorted list of dirty pages we temporarily wrote to disk
* because the dirty list was full. page numbers in here are
* shifted left by 1, deleted slots have the LSB set. */
MDBX_PNL list;
MDBX_PNL __restrict list;
} spilled;
size_t writemap_dirty_npages;
size_t writemap_spilled_npages;
@ -1295,7 +1299,7 @@ struct MDBX_cursor {
/* The database auxiliary record for this cursor */
MDBX_dbx *mc_dbx;
/* The mt_dbi_state[] for this DBI */
uint8_t *mc_dbi_state;
uint8_t *__restrict mc_dbi_state;
uint8_t mc_snum; /* number of pushed pages */
uint8_t mc_top; /* index of top page, normally mc_snum-1 */
@ -1401,7 +1405,7 @@ struct MDBX_env {
MDBX_txn *me_txn0; /* preallocated write transaction */
MDBX_dbx *me_dbxs; /* array of static DB info */
uint16_t *me_db_flags; /* array of flags from MDBX_db.md_flags */
uint16_t *__restrict me_db_flags; /* array of flags from MDBX_db.md_flags */
MDBX_atomic_uint32_t *me_dbi_seqs; /* array of dbi sequence numbers */
unsigned
me_maxgc_ov1page; /* Number of pgno_t fit in a single overflow page */
@ -1468,10 +1472,10 @@ struct MDBX_env {
unsigned me_numdbs; /* number of DBs opened */
unsigned me_dp_reserve_len;
MDBX_page *me_dp_reserve; /* list of malloc'ed blocks for re-use */
MDBX_page *__restrict me_dp_reserve; /* list of malloc'ed blocks for re-use */
/* PNL of pages that became unused in a write txn */
MDBX_PNL me_retired_pages;
MDBX_PNL __restrict me_retired_pages;
osal_ioring_t me_ioring;
#if defined(_WIN32) || defined(_WIN64)