mdbx: avoid "unused static function" warnings.

Change-Id: I438a9fa3fa099ad96bca5c1af1f528da4a01ddff
This commit is contained in:
Leonid Yuriev 2019-09-19 13:23:47 +03:00
parent 90a3ed992e
commit d15a6b935c
3 changed files with 21 additions and 19 deletions

View File

@ -153,7 +153,7 @@
#if !defined(__noop) && !defined(_MSC_VER)
# ifdef __cplusplus
static inline void __noop_consume_args() {}
static __maybe_unused inline void __noop_consume_args() {}
template <typename First, typename... Rest>
static inline void
__noop_consume_args(const First &first, const Rest &... rest) {
@ -161,7 +161,7 @@
}
# define __noop(...) __noop_consume_args(__VA_ARGS__)
# elif defined(__GNUC__) && (!defined(__STRICT_ANSI__) || !__STRICT_ANSI__)
static __inline void __noop_consume_args(void* anchor, ...) {
static __maybe_unused __inline void __noop_consume_args(void* anchor, ...) {
(void) anchor;
}
# define __noop(...) __noop_consume_args(0, ##__VA_ARGS__)

View File

@ -1319,17 +1319,17 @@ typedef struct MDBX_node {
/* Do not spill pages to disk if txn is getting full, may fail instead */
#define MDBX_NOSPILL 0x8000
static __inline pgno_t pgno_add(pgno_t base, pgno_t augend) {
static __maybe_unused __inline pgno_t pgno_add(pgno_t base, pgno_t augend) {
assert(base <= MAX_PAGENO);
return (augend < MAX_PAGENO - base) ? base + augend : MAX_PAGENO;
}
static __inline pgno_t pgno_sub(pgno_t base, pgno_t subtrahend) {
static __maybe_unused __inline pgno_t pgno_sub(pgno_t base, pgno_t subtrahend) {
assert(base >= MIN_PAGENO);
return (subtrahend < base - MIN_PAGENO) ? base - subtrahend : MIN_PAGENO;
}
static __inline void mdbx_jitter4testing(bool tiny) {
static __maybe_unused __inline void mdbx_jitter4testing(bool tiny) {
#if MDBX_DEBUG
if (MDBX_DBG_JITTER & mdbx_runtime_flags)
mdbx_osal_jitter(tiny);

View File

@ -358,7 +358,7 @@ typedef pthread_mutex_t mdbx_fastmutex_t;
/*----------------------------------------------------------------------------*/
/* Memory/Compiler barriers, cache coherence */
static __inline void mdbx_compiler_barrier(void) {
static __maybe_unused __inline void mdbx_compiler_barrier(void) {
#if defined(__clang__) || defined(__GNUC__)
__asm__ __volatile__("" ::: "memory");
#elif defined(_MSC_VER)
@ -386,7 +386,7 @@ static __inline void mdbx_compiler_barrier(void) {
#endif
}
static __inline void mdbx_memory_barrier(void) {
static __maybe_unused __inline void mdbx_memory_barrier(void) {
#if __has_extension(c_atomic) || __has_extension(cxx_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__ATOMIC_SEQ_CST)
@ -465,8 +465,8 @@ static __inline void mdbx_memory_barrier(void) {
#endif
#endif /* ndef MDBX_CPU_CACHE_MMAP_NONCOHERENT */
static __inline void mdbx_invalidate_mmap_noncoherent_cache(void *addr,
size_t nbytes) {
static __maybe_unused __inline void
mdbx_invalidate_mmap_noncoherent_cache(void *addr, size_t nbytes) {
#if MDBX_CPU_CACHE_MMAP_NONCOHERENT
#ifdef DCACHE
/* MIPS has cache coherency issues.
@ -511,7 +511,7 @@ MDBX_INTERNAL_VAR uint32_t mdbx_linux_kernel_version;
/* Get the size of a memory page for the system.
* This is the basic size that the platform's memory manager uses, and is
* fundamental to the use of memory-mapped files. */
static __inline size_t mdbx_syspagesize(void) {
static __maybe_unused __inline size_t mdbx_syspagesize(void) {
#if defined(_WIN32) || defined(_WIN64)
SYSTEM_INFO si;
GetSystemInfo(&si);
@ -525,7 +525,7 @@ static __inline size_t mdbx_syspagesize(void) {
LIBMDBX_API char *mdbx_strdup(const char *str);
#endif
static __inline int mdbx_get_errno(void) {
static __maybe_unused __inline int mdbx_get_errno(void) {
#if defined(_WIN32) || defined(_WIN64)
DWORD rc = GetLastError();
#else
@ -624,7 +624,7 @@ MDBX_INTERNAL_FUNC int mdbx_msync(mdbx_mmap_t *map, size_t offset,
size_t length, int async);
MDBX_INTERNAL_FUNC int mdbx_check4nonlocal(mdbx_filehandle_t handle, int flags);
static __inline mdbx_pid_t mdbx_getpid(void) {
static __maybe_unused __inline mdbx_pid_t mdbx_getpid(void) {
#if defined(_WIN32) || defined(_WIN64)
return GetCurrentProcessId();
#else
@ -632,7 +632,7 @@ static __inline mdbx_pid_t mdbx_getpid(void) {
#endif
}
static __inline mdbx_tid_t mdbx_thread_self(void) {
static __maybe_unused __inline mdbx_tid_t mdbx_thread_self(void) {
#if defined(_WIN32) || defined(_WIN64)
return GetCurrentThreadId();
#else
@ -860,7 +860,8 @@ MDBX_INTERNAL_VAR MDBX_DiscardVirtualMemory mdbx_DiscardVirtualMemory;
#error FIXME atomic-ops
#endif
static __inline uint32_t mdbx_atomic_add32(volatile uint32_t *p, uint32_t v) {
static __maybe_unused __inline uint32_t mdbx_atomic_add32(volatile uint32_t *p,
uint32_t v) {
#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT)
assert(atomic_is_lock_free(p));
return atomic_fetch_add((_Atomic uint32_t *)p, v);
@ -877,7 +878,8 @@ static __inline uint32_t mdbx_atomic_add32(volatile uint32_t *p, uint32_t v) {
#endif
}
static __inline uint64_t mdbx_atomic_add64(volatile uint64_t *p, uint64_t v) {
static __maybe_unused __inline uint64_t mdbx_atomic_add64(volatile uint64_t *p,
uint64_t v) {
#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT)
assert(atomic_is_lock_free(p));
return atomic_fetch_add((_Atomic uint64_t *)p, v);
@ -900,8 +902,8 @@ static __inline uint64_t mdbx_atomic_add64(volatile uint64_t *p, uint64_t v) {
#define mdbx_atomic_sub32(p, v) mdbx_atomic_add32(p, 0 - (v))
#define mdbx_atomic_sub64(p, v) mdbx_atomic_add64(p, 0 - (v))
static __inline bool mdbx_atomic_compare_and_swap32(volatile uint32_t *p,
uint32_t c, uint32_t v) {
static __maybe_unused __inline bool
mdbx_atomic_compare_and_swap32(volatile uint32_t *p, uint32_t c, uint32_t v) {
#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT)
assert(atomic_is_lock_free(p));
return atomic_compare_exchange_strong((_Atomic uint32_t *)p, &c, v);
@ -918,8 +920,8 @@ static __inline bool mdbx_atomic_compare_and_swap32(volatile uint32_t *p,
#endif
}
static __inline bool mdbx_atomic_compare_and_swap64(volatile uint64_t *p,
uint64_t c, uint64_t v) {
static __maybe_unused __inline bool
mdbx_atomic_compare_and_swap64(volatile uint64_t *p, uint64_t c, uint64_t v) {
#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT)
assert(atomic_is_lock_free(p));
return atomic_compare_exchange_strong((_Atomic uint64_t *)p, &c, v);