Merge branch 'master' into preupdate_hook

This commit is contained in:
Midas Lambrichts 2021-04-08 21:51:44 +02:00
commit 55dc6e1979
34 changed files with 7548 additions and 4924 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "rusqlite"
version = "0.24.2"
version = "0.25.0"
authors = ["The rusqlite developers"]
edition = "2018"
description = "Ergonomic wrapper for SQLite"
@ -12,7 +12,6 @@ license = "MIT"
categories = ["database"]
[badges]
travis-ci = { repository = "rusqlite/rusqlite" }
appveyor = { repository = "rusqlite/rusqlite" }
codecov = { repository = "rusqlite/rusqlite" }
maintenance = { status = "actively-developed" }
@ -110,7 +109,7 @@ fallible-iterator = "0.2"
fallible-streaming-iterator = "0.1"
memchr = "2.3"
uuid = { version = "0.8", optional = true }
smallvec = "1.0"
smallvec = "1.6.1"
[dev-dependencies]
doc-comment = "0.3"
@ -125,7 +124,7 @@ bencher = "0.1"
[dependencies.libsqlite3-sys]
path = "libsqlite3-sys"
version = "0.21.0"
version = "0.22.0"
[[test]]
name = "config_log"

View File

@ -124,11 +124,11 @@ You can adjust this behavior in a number of ways:
* If you use the `bundled` feature, `libsqlite3-sys` will use the
[cc](https://crates.io/crates/cc) crate to compile SQLite from source and
link against that. This source is embedded in the `libsqlite3-sys` crate and
is currently SQLite 3.34.0 (as of `rusqlite` 0.24.1 / `libsqlite3-sys`
0.21.0). This is probably the simplest solution to any build problems. You can enable this by adding the following in your `Cargo.toml` file:
is currently SQLite 3.35.4 (as of `rusqlite` 0.25.0 / `libsqlite3-sys`
0.22.0). This is probably the simplest solution to any build problems. You can enable this by adding the following in your `Cargo.toml` file:
```toml
[dependencies.rusqlite]
version = "0.24.2"
version = "0.25.0"
features = ["bundled"]
```
* When using the `bundled` feature, the build script will honor `SQLITE_MAX_VARIABLE_NUMBER` and `SQLITE_MAX_EXPR_DEPTH` variables. It will also honor a `LIBSQLITE3_FLAGS` variable, which can have a format like `"-USQLITE_ALPHA -DSQLITE_BETA SQLITE_GAMMA ..."`. That would disable the `SQLITE_ALPHA` flag, and set the `SQLITE_BETA` and `SQLITE_GAMMA` flags. (The initial `-D` can be omitted, as on the last one.)

View File

@ -1,6 +1,6 @@
[package]
name = "libsqlite3-sys"
version = "0.21.0"
version = "0.22.0"
authors = ["The rusqlite developers"]
edition = "2018"
repository = "https://github.com/rusqlite/rusqlite"
@ -37,7 +37,7 @@ wasm32-wasi-vfs = []
winsqlite3 = ["min_sqlite_version_3_7_16"]
[build-dependencies]
bindgen = { version = "0.57", optional = true, default-features = false, features = ["runtime"] }
bindgen = { version = "0.58", optional = true, default-features = false, features = ["runtime"] }
pkg-config = { version = "0.3", optional = true }
cc = { version = "1.0", optional = true }

1
libsqlite3-sys/README.md Symbolic link
View File

@ -0,0 +1 @@
../README.md

View File

@ -393,32 +393,32 @@ mod bindings {
if cfg!(all(windows, feature = "winsqlite3")) {
bindings = bindings
.clang_arg("-DBINDGEN_USE_WINSQLITE3")
.blacklist_item("NTDDI_.+")
.blacklist_item("WINAPI_FAMILY.*")
.blacklist_item("_WIN32_.+")
.blacklist_item("_VCRT_COMPILER_PREPROCESSOR")
.blacklist_item("_SAL_VERSION")
.blacklist_item("__SAL_H_VERSION")
.blacklist_item("_USE_DECLSPECS_FOR_SAL")
.blacklist_item("_USE_ATTRIBUTES_FOR_SAL")
.blacklist_item("_CRT_PACKING")
.blacklist_item("_HAS_EXCEPTIONS")
.blacklist_item("_STL_LANG")
.blacklist_item("_HAS_CXX17")
.blacklist_item("_HAS_CXX20")
.blacklist_item("_HAS_NODISCARD")
.blacklist_item("WDK_NTDDI_VERSION")
.blacklist_item("OSVERSION_MASK")
.blacklist_item("SPVERSION_MASK")
.blacklist_item("SUBVERSION_MASK")
.blacklist_item("WINVER")
.blacklist_item("__security_cookie")
.blacklist_type("size_t")
.blacklist_type("__vcrt_bool")
.blacklist_type("wchar_t")
.blacklist_function("__security_init_cookie")
.blacklist_function("__report_gsfailure")
.blacklist_function("__va_start");
.blocklist_item("NTDDI_.+")
.blocklist_item("WINAPI_FAMILY.*")
.blocklist_item("_WIN32_.+")
.blocklist_item("_VCRT_COMPILER_PREPROCESSOR")
.blocklist_item("_SAL_VERSION")
.blocklist_item("__SAL_H_VERSION")
.blocklist_item("_USE_DECLSPECS_FOR_SAL")
.blocklist_item("_USE_ATTRIBUTES_FOR_SAL")
.blocklist_item("_CRT_PACKING")
.blocklist_item("_HAS_EXCEPTIONS")
.blocklist_item("_STL_LANG")
.blocklist_item("_HAS_CXX17")
.blocklist_item("_HAS_CXX20")
.blocklist_item("_HAS_NODISCARD")
.blocklist_item("WDK_NTDDI_VERSION")
.blocklist_item("OSVERSION_MASK")
.blocklist_item("SPVERSION_MASK")
.blocklist_item("SUBVERSION_MASK")
.blocklist_item("WINVER")
.blocklist_item("__security_cookie")
.blocklist_type("size_t")
.blocklist_type("__vcrt_bool")
.blocklist_type("wchar_t")
.blocklist_function("__security_init_cookie")
.blocklist_function("__report_gsfailure")
.blocklist_function("__va_start");
}
// When cross compiling unless effort is taken to fix the issue, bindgen
@ -440,14 +440,14 @@ mod bindings {
if generating_bundled_bindings() || is_cross_compiling {
// Get rid of va_list, as it's not
bindings = bindings
.blacklist_function("sqlite3_vmprintf")
.blacklist_function("sqlite3_vsnprintf")
.blacklist_function("sqlite3_str_vappendf")
.blacklist_type("va_list")
.blacklist_type("__builtin_va_list")
.blacklist_type("__gnuc_va_list")
.blacklist_type("__va_list_tag")
.blacklist_item("__GNUC_VA_LIST");
.blocklist_function("sqlite3_vmprintf")
.blocklist_function("sqlite3_vsnprintf")
.blocklist_function("sqlite3_str_vappendf")
.blocklist_type("va_list")
.blocklist_type("__builtin_va_list")
.blocklist_type("__gnuc_va_list")
.blocklist_type("__va_list_tag")
.blocklist_item("__GNUC_VA_LIST");
}
bindings

View File

@ -1,9 +1,9 @@
/* automatically generated by rust-bindgen 0.56.0 */
/* automatically generated by rust-bindgen 0.57.0 */
pub const SQLITE_VERSION: &'static [u8; 7usize] = b"3.34.0\0";
pub const SQLITE_VERSION_NUMBER: i32 = 3034000;
pub const SQLITE_VERSION: &'static [u8; 7usize] = b"3.35.4\0";
pub const SQLITE_VERSION_NUMBER: i32 = 3035004;
pub const SQLITE_SOURCE_ID: &'static [u8; 85usize] =
b"2020-12-01 16:14:00 a26b6597e3ae272231b96f9982c3bcc17ddec2f2b6eb4df06a224b91089fed5b\0";
b"2021-04-02 15:20:15 5d4c65779dab868b285519b19e4cf9d451d50c6048f06f653aa701ec212df45e\0";
pub const SQLITE_OK: i32 = 0;
pub const SQLITE_ERROR: i32 = 1;
pub const SQLITE_INTERNAL: i32 = 2;
@ -388,7 +388,8 @@ pub const SQLITE_TESTCTRL_RESULT_INTREAL: i32 = 27;
pub const SQLITE_TESTCTRL_PRNG_SEED: i32 = 28;
pub const SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS: i32 = 29;
pub const SQLITE_TESTCTRL_SEEK_COUNT: i32 = 30;
pub const SQLITE_TESTCTRL_LAST: i32 = 30;
pub const SQLITE_TESTCTRL_TRACEFLAGS: i32 = 31;
pub const SQLITE_TESTCTRL_LAST: i32 = 31;
pub const SQLITE_STATUS_MEMORY_USED: i32 = 0;
pub const SQLITE_STATUS_PAGECACHE_USED: i32 = 1;
pub const SQLITE_STATUS_PAGECACHE_OVERFLOW: i32 = 2;

File diff suppressed because it is too large Load Diff

View File

@ -123,9 +123,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION "3.34.0"
#define SQLITE_VERSION_NUMBER 3034000
#define SQLITE_SOURCE_ID "2020-12-01 16:14:00 a26b6597e3ae272231b96f9982c3bcc17ddec2f2b6eb4df06a224b91089fed5b"
#define SQLITE_VERSION "3.35.4"
#define SQLITE_VERSION_NUMBER 3035004
#define SQLITE_SOURCE_ID "2021-04-02 15:20:15 5d4c65779dab868b285519b19e4cf9d451d50c6048f06f653aa701ec212df45e"
/*
** CAPI3REF: Run-Time Library Version Numbers
@ -2115,7 +2115,13 @@ struct sqlite3_mem_methods {
** The second parameter is a pointer to an integer into which
** is written 0 or 1 to indicate whether triggers are disabled or enabled
** following this call. The second parameter may be a NULL pointer, in
** which case the trigger setting is not reported back. </dd>
** which case the trigger setting is not reported back.
**
** <p>Originally this option disabled all triggers. ^(However, since
** SQLite version 3.35.0, TEMP triggers are still allowed even if
** this option is off. So, in other words, this option now only disables
** triggers in the main database schema or in the schemas of ATTACH-ed
** databases.)^ </dd>
**
** [[SQLITE_DBCONFIG_ENABLE_VIEW]]
** <dt>SQLITE_DBCONFIG_ENABLE_VIEW</dt>
@ -2126,7 +2132,13 @@ struct sqlite3_mem_methods {
** The second parameter is a pointer to an integer into which
** is written 0 or 1 to indicate whether views are disabled or enabled
** following this call. The second parameter may be a NULL pointer, in
** which case the view setting is not reported back. </dd>
** which case the view setting is not reported back.
**
** <p>Originally this option disabled all views. ^(However, since
** SQLite version 3.35.0, TEMP views are still allowed even if
** this option is off. So, in other words, this option now only disables
** views in the main database schema or in the schemas of ATTACH-ed
** databases.)^ </dd>
**
** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]]
** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt>
@ -3499,6 +3511,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
** that uses dot-files in place of posix advisory locking.
** <tr><td> file:data.db?mode=readonly <td>
** An error. "readonly" is not a valid option for the "mode" parameter.
** Use "ro" instead: "file:data.db?mode=ro".
** </table>
**
** ^URI hexadecimal escape sequences (%HH) are supported within the path and
@ -3697,7 +3710,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
** If the Y parameter to sqlite3_free_filename(Y) is anything other
** than a NULL pointer or a pointer previously acquired from
** sqlite3_create_filename(), then bad things such as heap
** corruption or segfaults may occur. The value Y should be
** corruption or segfaults may occur. The value Y should not be
** used again after sqlite3_free_filename(Y) has been called. This means
** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y,
** then the corresponding [sqlite3_module.xClose() method should also be
@ -7765,7 +7778,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_PRNG_SEED 28
#define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29
#define SQLITE_TESTCTRL_SEEK_COUNT 30
#define SQLITE_TESTCTRL_LAST 30 /* Largest TESTCTRL */
#define SQLITE_TESTCTRL_TRACEFLAGS 31
#define SQLITE_TESTCTRL_LAST 31 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@ -10438,6 +10452,14 @@ SQLITE_API int sqlite3session_patchset(
*/
SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
/*
** CAPI3REF: Query for the amount of heap memory used by a session object.
**
** This API returns the total amount of heap memory in bytes currently
** used by the session object passed as the only argument.
*/
SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession);
/*
** CAPI3REF: Create An Iterator To Traverse A Changeset
** CONSTRUCTOR: sqlite3_changeset_iter
@ -10540,18 +10562,23 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this
** is not the case, this function returns [SQLITE_MISUSE].
**
** If argument pzTab is not NULL, then *pzTab is set to point to a
** nul-terminated utf-8 encoded string containing the name of the table
** affected by the current change. The buffer remains valid until either
** sqlite3changeset_next() is called on the iterator or until the
** conflict-handler function returns. If pnCol is not NULL, then *pnCol is
** set to the number of columns in the table affected by the change. If
** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change
** Arguments pOp, pnCol and pzTab may not be NULL. Upon return, three
** outputs are set through these pointers:
**
** *pOp is set to one of [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE],
** depending on the type of change that the iterator currently points to;
**
** *pnCol is set to the number of columns in the table affected by the change; and
**
** *pzTab is set to point to a nul-terminated utf-8 encoded string containing
** the name of the table affected by the current change. The buffer remains
** valid until either sqlite3changeset_next() is called on the iterator
** or until the conflict-handler function returns.
**
** If pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change
** is an indirect change, or false (0) otherwise. See the documentation for
** [sqlite3session_indirect()] for a description of direct and indirect
** changes. Finally, if pOp is not NULL, then *pOp is set to one of
** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the
** type of change that the iterator currently points to.
** changes.
**
** If no error occurs, SQLITE_OK is returned. If an error does occur, an
** SQLite error code is returned. The values of the output variables may not

View File

@ -23,7 +23,7 @@ pub enum ErrorCode {
/// Operation terminated by sqlite3_interrupt()
OperationInterrupted,
/// Some kind of disk I/O error occurred
SystemIOFailure,
SystemIoFailure,
/// The database disk image is malformed
DatabaseCorrupt,
/// Unknown opcode in sqlite3_file_control()
@ -43,7 +43,7 @@ pub enum ErrorCode {
/// Data type mismatch
TypeMismatch,
/// Library used incorrectly
APIMisuse,
ApiMisuse,
/// Uses OS features not supported on host
NoLargeFileSupport,
/// Authorization denied
@ -73,7 +73,7 @@ impl Error {
super::SQLITE_NOMEM => ErrorCode::OutOfMemory,
super::SQLITE_READONLY => ErrorCode::ReadOnly,
super::SQLITE_INTERRUPT => ErrorCode::OperationInterrupted,
super::SQLITE_IOERR => ErrorCode::SystemIOFailure,
super::SQLITE_IOERR => ErrorCode::SystemIoFailure,
super::SQLITE_CORRUPT => ErrorCode::DatabaseCorrupt,
super::SQLITE_NOTFOUND => ErrorCode::NotFound,
super::SQLITE_FULL => ErrorCode::DiskFull,
@ -83,7 +83,7 @@ impl Error {
super::SQLITE_TOOBIG => ErrorCode::TooBig,
super::SQLITE_CONSTRAINT => ErrorCode::ConstraintViolation,
super::SQLITE_MISMATCH => ErrorCode::TypeMismatch,
super::SQLITE_MISUSE => ErrorCode::APIMisuse,
super::SQLITE_MISUSE => ErrorCode::ApiMisuse,
super::SQLITE_NOLFS => ErrorCode::NoLargeFileSupport,
super::SQLITE_AUTH => ErrorCode::AuthorizationForStatementDenied,
super::SQLITE_RANGE => ErrorCode::ParameterOutOfRange,

View File

@ -18,6 +18,7 @@ pub fn SQLITE_TRANSIENT() -> sqlite3_destructor_type {
/// Run-Time Limit Categories
#[repr(i32)]
#[non_exhaustive]
#[allow(clippy::upper_case_acronyms)]
pub enum Limit {
/// The maximum size of any string or BLOB or table row, in bytes.
SQLITE_LIMIT_LENGTH = SQLITE_LIMIT_LENGTH,

View File

@ -6,8 +6,8 @@ cd "$SCRIPT_DIR" || { echo "fatal error"; exit 1; }
export SQLITE3_LIB_DIR=$SCRIPT_DIR/sqlite3
# Download and extract amalgamation
SQLITE=sqlite-amalgamation-3340000
curl -O https://sqlite.org/2020/$SQLITE.zip
SQLITE=sqlite-amalgamation-3350400
curl -O https://sqlite.org/2021/$SQLITE.zip
unzip -p "$SQLITE.zip" "$SQLITE/sqlite3.c" > "$SQLITE3_LIB_DIR/sqlite3.c"
unzip -p "$SQLITE.zip" "$SQLITE/sqlite3.h" > "$SQLITE3_LIB_DIR/sqlite3.h"
unzip -p "$SQLITE.zip" "$SQLITE/sqlite3ext.h" > "$SQLITE3_LIB_DIR/sqlite3ext.h"

View File

@ -3,9 +3,10 @@
//! To create a [`Backup`], you must have two distinct [`Connection`]s - one
//! for the source (which can be used while the backup is running) and one for
//! the destination (which cannot). A [`Backup`] handle exposes three methods:
//! [`step`](Backup::step) will attempt to back up a specified number of pages, [`progress`](Backup::progress) gets
//! the current progress of the backup as of the last call to [`step`](Backup::step), and
//! [`run_to_completion`](Backup::run_to_completion) will attempt to back up the entire source database,
//! [`step`](Backup::step) will attempt to back up a specified number of pages,
//! [`progress`](Backup::progress) gets the current progress of the backup as of
//! the last call to [`step`](Backup::step), and [`run_to_completion`](Backup::run_to_completion)
//! will attempt to back up the entire source database,
//! allowing you to specify how many pages are backed up at a time and how long
//! the thread should sleep between chunks of pages.
//!
@ -130,7 +131,8 @@ impl Connection {
}
}
/// `feature = "backup"` Possible successful results of calling [`Backup::step`].
/// `feature = "backup"` Possible successful results of calling
/// [`Backup::step`].
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum StepResult {
@ -152,9 +154,10 @@ pub enum StepResult {
/// `feature = "backup"` Struct specifying the progress of a backup. The
/// percentage completion can be calculated as `(pagecount - remaining) /
/// pagecount`. The progress of a backup is as of the last call to [`step`](Backup::step) - if
/// the source database is modified after a call to [`step`](Backup::step), the progress value
/// will become outdated and potentially incorrect.
/// pagecount`. The progress of a backup is as of the last call to
/// [`step`](Backup::step) - if the source database is modified after a call to
/// [`step`](Backup::step), the progress value will become outdated and
/// potentially incorrect.
#[derive(Copy, Clone, Debug)]
pub struct Progress {
/// Number of pages in the source database that still need to be backed up.
@ -225,7 +228,8 @@ impl Backup<'_, '_> {
})
}
/// Gets the progress of the backup as of the last call to [`step`](Backup::step).
/// Gets the progress of the backup as of the last call to
/// [`step`](Backup::step).
#[inline]
pub fn progress(&self) -> Progress {
unsafe {
@ -240,7 +244,8 @@ impl Backup<'_, '_> {
/// negative, will attempt to back up all remaining pages. This will hold a
/// lock on the source database for the duration, so it is probably not
/// what you want for databases that are currently active (see
/// [`run_to_completion`](Backup::run_to_completion) for a better alternative).
/// [`run_to_completion`](Backup::run_to_completion) for a better
/// alternative).
///
/// # Failure
///
@ -262,12 +267,12 @@ impl Backup<'_, '_> {
}
}
/// Attempts to run the entire backup. Will call [`step(pages_per_step)`](Backup::step) as
/// many times as necessary, sleeping for `pause_between_pages` between
/// each call to give the source database time to process any pending
/// queries. This is a direct implementation of "Example 2: Online Backup
/// of a Running Database" from [SQLite's Online Backup API
/// documentation](https://www.sqlite.org/backup.html).
/// Attempts to run the entire backup. Will call
/// [`step(pages_per_step)`](Backup::step) as many times as necessary,
/// sleeping for `pause_between_pages` between each call to give the
/// source database time to process any pending queries. This is a
/// direct implementation of "Example 2: Online Backup of a Running
/// Database" from [SQLite's Online Backup API documentation](https://www.sqlite.org/backup.html).
///
/// If `progress` is not `None`, it will be called after each step with the
/// current progress of the backup. Note that is possible the progress may
@ -276,7 +281,8 @@ impl Backup<'_, '_> {
///
/// # Failure
///
/// Will return `Err` if any of the calls to [`step`](Backup::step) return `Err`.
/// Will return `Err` if any of the calls to [`step`](Backup::step) return
/// `Err`.
pub fn run_to_completion(
&self,
pages_per_step: c_int,

View File

@ -19,11 +19,11 @@ impl Connection {
///
/// There can only be a single busy handler for a particular database
/// connection at any given moment. If another busy handler was defined
/// (using [`busy_handler`](Connection::busy_handler)) prior to calling this routine, that other
/// busy handler is cleared.
/// (using [`busy_handler`](Connection::busy_handler)) prior to calling this
/// routine, that other busy handler is cleared.
///
/// Newly created connections currently have a default busy timeout of 5000ms, but this may be
/// subject to change.
/// Newly created connections currently have a default busy timeout of
/// 5000ms, but this may be subject to change.
pub fn busy_timeout(&self, timeout: Duration) -> Result<()> {
let ms: i32 = timeout
.as_secs()
@ -48,12 +48,13 @@ impl Connection {
///
/// There can only be a single busy handler defined for each database
/// connection. Setting a new busy handler clears any previously set
/// handler. Note that calling [`busy_timeout()`](Connection::busy_timeout) or evaluating `PRAGMA
/// busy_timeout=N` will change the busy handler and thus
/// clear any previously set busy handler.
/// handler. Note that calling [`busy_timeout()`](Connection::busy_timeout)
/// or evaluating `PRAGMA busy_timeout=N` will change the busy handler
/// and thus clear any previously set busy handler.
///
/// Newly created connections default to a [`busy_timeout()`](Connection::busy_timeout) handler
/// with a timeout of 5000ms, although this is subject to change.
/// Newly created connections default to a
/// [`busy_timeout()`](Connection::busy_timeout) handler with a timeout
/// of 5000ms, although this is subject to change.
pub fn busy_handler(&self, callback: Option<fn(i32) -> bool>) -> Result<()> {
unsafe extern "C" fn busy_handler_callback(p_arg: *mut c_void, count: c_int) -> c_int {
let handler_fn: fn(i32) -> bool = mem::transmute(p_arg);

View File

@ -63,7 +63,8 @@ pub struct StatementCache(RefCell<LruCache<Arc<str>, RawStatement>>);
/// Cacheable statement.
///
/// Statement will return automatically to the cache by default.
/// If you want the statement to be discarded, call [`discard()`](CachedStatement::discard) on it.
/// If you want the statement to be discarded, call
/// [`discard()`](CachedStatement::discard) on it.
pub struct CachedStatement<'conn> {
stmt: Option<Statement<'conn>>,
cache: &'conn StatementCache,

View File

@ -97,7 +97,8 @@ impl InnerConnection {
)
};
let res = self.decode_result(r);
// The xDestroy callback is not called if the sqlite3_create_collation_v2() function fails.
// The xDestroy callback is not called if the sqlite3_create_collation_v2()
// function fails.
if res.is_err() {
drop(unsafe { Box::from_raw(boxed_f) });
}
@ -109,6 +110,7 @@ impl InnerConnection {
x_coll_needed: fn(&Connection, &str) -> Result<()>,
) -> Result<()> {
use std::mem;
#[allow(clippy::needless_return)]
unsafe extern "C" fn collation_needed_callback(
arg1: *mut c_void,
arg2: *mut ffi::sqlite3,
@ -128,7 +130,7 @@ impl InnerConnection {
let conn = Connection::from_handle(arg2).unwrap();
let collation_name = {
let c_slice = CStr::from_ptr(arg3).to_bytes();
str::from_utf8(c_slice).expect("illegal coallation sequence name")
str::from_utf8(c_slice).expect("illegal collation sequence name")
};
callback(&conn, collation_name)
});

View File

@ -25,6 +25,10 @@ impl Column<'_> {
impl Statement<'_> {
/// Get all the column names in the result set of the prepared statement.
///
/// If associated DB schema can be altered concurrently, you should make
/// sure that current statement has already been stepped once before
/// calling this method.
pub fn column_names(&self) -> Vec<&str> {
let n = self.column_count();
let mut cols = Vec::with_capacity(n as usize);
@ -37,11 +41,34 @@ impl Statement<'_> {
/// Return the number of columns in the result set returned by the prepared
/// statement.
///
/// If associated DB schema can be altered concurrently, you should make
/// sure that current statement has already been stepped once before
/// calling this method.
#[inline]
pub fn column_count(&self) -> usize {
self.stmt.column_count()
}
/// Check that column name reference lifetime is limited:
/// https://www.sqlite.org/c3ref/column_name.html
/// > The returned string pointer is valid...
///
/// `column_name` reference can become invalid if `stmt` is reprepared
/// (because of schema change) when `query_row` is called. So we assert
/// that a compilation error happens if this reference is kept alive:
/// ```compile_fail
/// use rusqlite::{Connection, Result};
/// fn main() -> Result<()> {
/// let db = Connection::open_in_memory()?;
/// let mut stmt = db.prepare("SELECT 1 as x")?;
/// let column_name = stmt.column_name(0)?;
/// let x = stmt.query_row([], |r| r.get::<_, i64>(0))?; // E0502
/// assert_eq!(1, x);
/// assert_eq!("x", column_name);
/// Ok(())
/// }
/// ```
#[inline]
pub(super) fn column_name_unwrap(&self, col: usize) -> &str {
// Just panic if the bounds are wrong for now, we never call this
@ -52,6 +79,10 @@ impl Statement<'_> {
/// Returns the name assigned to a particular column in the result set
/// returned by the prepared statement.
///
/// If associated DB schema can be altered concurrently, you should make
/// sure that current statement has already been stepped once before
/// calling this method.
///
/// ## Failure
///
/// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
@ -73,6 +104,10 @@ impl Statement<'_> {
/// If there is no AS clause then the name of the column is unspecified and
/// may change from one release of SQLite to the next.
///
/// If associated DB schema can be altered concurrently, you should make
/// sure that current statement has already been stepped once before
/// calling this method.
///
/// # Failure
///
/// Will return an `Error::InvalidColumnName` when there is no column with
@ -92,6 +127,10 @@ impl Statement<'_> {
}
/// Returns a slice describing the columns of the result of the query.
///
/// If associated DB schema can be altered concurrently, you should make
/// sure that current statement has already been stepped once before
/// calling this method.
#[cfg(feature = "column_decltype")]
pub fn columns(&self) -> Vec<Column> {
let n = self.column_count();
@ -234,4 +273,24 @@ mod test {
}
Ok(())
}
/// `column_name` reference should stay valid until `stmt` is reprepared (or
/// reset) even if DB schema is altered (SQLite documentation is
/// ambiguous here because it says reference "is valid until (...) the next
/// call to sqlite3_column_name() or sqlite3_column_name16() on the same
/// column.". We assume that reference is valid if only `sqlite3_column_name()` is used):
#[test]
#[cfg(feature = "modern_sqlite")]
fn test_column_name_reference() -> Result<()> {
let db = Connection::open_in_memory()?;
db.execute_batch("CREATE TABLE y (x);")?;
let stmt = db.prepare("SELECT x FROM y;")?;
let column_name = stmt.column_name(0)?;
assert_eq!("x", column_name);
db.execute_batch("ALTER TABLE y RENAME COLUMN x TO z;")?;
// column name is not refreshed until statement is re-prepared
let same_column_name = stmt.column_name(0)?;
assert_eq!(same_column_name, column_name);
Ok(())
}
}

View File

@ -10,6 +10,7 @@ use crate::{Connection, Result};
#[repr(i32)]
#[allow(non_snake_case, non_camel_case_types)]
#[non_exhaustive]
#[allow(clippy::upper_case_acronyms)]
pub enum DbConfig {
//SQLITE_DBCONFIG_MAINDBNAME = 1000, /* const char* */
//SQLITE_DBCONFIG_LOOKASIDE = 1001, /* void* int int */

View File

@ -43,7 +43,8 @@ pub enum Error {
/// Error converting a file path to a string.
InvalidPath(PathBuf),
/// Error returned when an [`execute`](crate::Connection::execute) call returns rows.
/// Error returned when an [`execute`](crate::Connection::execute) call
/// returns rows.
ExecuteReturnedResults,
/// Error when a query that was expected to return at least one row (e.g.,
@ -67,12 +68,13 @@ pub enum Error {
/// any or insert many.
StatementChangedRows(usize),
/// Error returned by [`functions::Context::get`](crate::functions::Context::get) when the function argument
/// cannot be converted to the requested type.
/// Error returned by
/// [`functions::Context::get`](crate::functions::Context::get) when the
/// function argument cannot be converted to the requested type.
#[cfg(feature = "functions")]
InvalidFunctionParameterType(usize, Type),
/// Error returned by [`vtab::Values::get`](crate::vtab::Values::get) when the filter argument cannot
/// be converted to the requested type.
/// Error returned by [`vtab::Values::get`](crate::vtab::Values::get) when
/// the filter argument cannot be converted to the requested type.
#[cfg(feature = "vtab")]
InvalidFilterParameterType(usize, Type),
@ -82,7 +84,8 @@ pub enum Error {
#[allow(dead_code)]
UserFunctionError(Box<dyn error::Error + Send + Sync + 'static>),
/// Error available for the implementors of the [`ToSql`](crate::types::ToSql) trait.
/// Error available for the implementors of the
/// [`ToSql`](crate::types::ToSql) trait.
ToSqlConversionFailure(Box<dyn error::Error + Send + Sync + 'static>),
/// Error when the SQL is not a `SELECT`, is not read-only.
@ -98,8 +101,10 @@ pub enum Error {
#[cfg(feature = "functions")]
UnwindingPanic,
/// An error returned when [`Context::get_aux`](crate::functions::Context::get_aux) attempts to retrieve data
/// of a different type than what had been stored using [`Context::set_aux`](crate::functions::Context::set_aux).
/// An error returned when
/// [`Context::get_aux`](crate::functions::Context::get_aux) attempts to
/// retrieve data of a different type than what had been stored using
/// [`Context::set_aux`](crate::functions::Context::set_aux).
#[cfg(feature = "functions")]
GetAuxWrongType,

View File

@ -128,7 +128,8 @@ impl Context<'_> {
///
/// # Failure
///
/// Will panic if `idx` is greater than or equal to [`self.len()`](Context::len).
/// Will panic if `idx` is greater than or equal to
/// [`self.len()`](Context::len).
///
/// Will return Err if the underlying SQLite type cannot be converted to a
/// `T`.
@ -158,7 +159,8 @@ impl Context<'_> {
///
/// # Failure
///
/// Will panic if `idx` is greater than or equal to [`self.len()`](Context::len).
/// Will panic if `idx` is greater than or equal to
/// [`self.len()`](Context::len).
#[inline]
pub fn get_raw(&self, idx: usize) -> ValueRef<'_> {
let arg = self.args[idx];
@ -167,7 +169,8 @@ impl Context<'_> {
/// Fetch or insert the auxilliary data associated with a particular
/// parameter. This is intended to be an easier-to-use way of fetching it
/// compared to calling [`get_aux`](Context::get_aux) and [`set_aux`](Context::set_aux) separately.
/// compared to calling [`get_aux`](Context::get_aux) and
/// [`set_aux`](Context::set_aux) separately.
///
/// See `https://www.sqlite.org/c3ref/get_auxdata.html` for a discussion of
/// this feature, or the unit tests of this module for an example.
@ -208,9 +211,9 @@ impl Context<'_> {
}
/// Gets the auxilliary data that was associated with a given parameter via
/// [`set_aux`](Context::set_aux). Returns `Ok(None)` if no data has been associated, and
/// Ok(Some(v)) if it has. Returns an error if the requested type does not
/// match.
/// [`set_aux`](Context::set_aux). Returns `Ok(None)` if no data has been
/// associated, and Ok(Some(v)) if it has. Returns an error if the
/// requested type does not match.
pub fn get_aux<T: Send + Sync + 'static>(&self, arg: c_int) -> Result<Option<Arc<T>>> {
let p = unsafe { ffi::sqlite3_get_auxdata(self.ctx, arg) as *const AuxInner };
if p.is_null() {
@ -268,8 +271,9 @@ where
T: ToSql,
{
/// Initializes the aggregation context. Will be called prior to the first
/// call to [`step()`](Aggregate::step) to set up the context for an invocation of the
/// function. (Note: `init()` will not be called if there are no rows.)
/// call to [`step()`](Aggregate::step) to set up the context for an
/// invocation of the function. (Note: `init()` will not be called if
/// there are no rows.)
fn init(&self, _: &mut Context<'_>) -> Result<A>;
/// "step" function called once for each row in an aggregate group. May be
@ -277,10 +281,12 @@ where
fn step(&self, _: &mut Context<'_>, _: &mut A) -> Result<()>;
/// Computes and returns the final result. Will be called exactly once for
/// each invocation of the function. If [`step()`](Aggregate::step) was called at least
/// once, will be given `Some(A)` (the same `A` as was created by
/// [`init`](Aggregate::init) and given to [`step`](Aggregate::step)); if [`step()`](Aggregate::step) was not called (because
/// the function is running against 0 rows), will be given `None`.
/// each invocation of the function. If [`step()`](Aggregate::step) was
/// called at least once, will be given `Some(A)` (the same `A` as was
/// created by [`init`](Aggregate::init) and given to
/// [`step`](Aggregate::step)); if [`step()`](Aggregate::step) was not
/// called (because the function is running against 0 rows), will be
/// given `None`.
///
/// The passed context will have no arguments.
fn finalize(&self, _: &mut Context<'_>, _: Option<A>) -> Result<T>;
@ -344,7 +350,8 @@ impl Connection {
/// given the same input, `deterministic` should be `true`.
///
/// The function will remain available until the connection is closed or
/// until it is explicitly removed via [`remove_function`](Connection::remove_function).
/// until it is explicitly removed via
/// [`remove_function`](Connection::remove_function).
///
/// # Example
///
@ -440,7 +447,8 @@ impl Connection {
/// database connection.
///
/// `fn_name` and `n_arg` should match the name and number of arguments
/// given to [`create_scalar_function`](Connection::create_scalar_function) or [`create_aggregate_function`](Connection::create_aggregate_function).
/// given to [`create_scalar_function`](Connection::create_scalar_function)
/// or [`create_aggregate_function`](Connection::create_aggregate_function).
///
/// # Failure
///

View File

@ -8,6 +8,7 @@ use crate::ffi;
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(i32)]
#[non_exhaustive]
#[allow(clippy::upper_case_acronyms)]
pub enum Action {
/// Unsupported / unexpected action
UNKNOWN = -1,

View File

@ -433,18 +433,14 @@ fn ensure_safe_sqlite_threading_mode() -> Result<()> {
}
unsafe {
let msg = "\
Could not ensure safe initialization of SQLite.
To fix this, either:
* Upgrade SQLite to at least version 3.7.0
* Ensure that SQLite has been initialized in Multi-thread or Serialized mode and call
rusqlite::bypass_sqlite_initialization() prior to your first connection attempt.";
if ffi::sqlite3_config(ffi::SQLITE_CONFIG_MULTITHREAD) != ffi::SQLITE_OK {
panic!(msg);
}
if ffi::sqlite3_initialize() != ffi::SQLITE_OK {
panic!(msg);
if ffi::sqlite3_config(ffi::SQLITE_CONFIG_MULTITHREAD) != ffi::SQLITE_OK || ffi::sqlite3_initialize() != ffi::SQLITE_OK {
panic!(
"Could not ensure safe initialization of SQLite.\n\
To fix this, either:\n\
* Upgrade SQLite to at least version 3.7.0\n\
* Ensure that SQLite has been initialized in Multi-thread or Serialized mode and call\n\
rusqlite::bypass_sqlite_initialization() prior to your first connection attempt."
);
}
}
});

View File

@ -1921,4 +1921,17 @@ mod test {
}
Ok(())
}
#[test]
#[cfg(feature = "bundled")] // SQLite >= 3.35.0
fn test_returning() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER PRIMARY KEY)")?;
let row_id =
db.query_row::<i64, _, _>("INSERT INTO foo DEFAULT VALUES RETURNING ROWID", [], |r| {
r.get(0)
})?;
assert_eq!(row_id, 1);
Ok(())
}
}

View File

@ -170,6 +170,7 @@ impl RawStatement {
r
}
// does not work for PRAGMA
#[inline]
#[cfg(all(feature = "extra_check", feature = "modern_sqlite"))] // 3.7.4
pub fn readonly(&self) -> bool {

View File

@ -29,7 +29,8 @@ impl<'stmt> Rows<'stmt> {
/// This interface is not compatible with Rust's `Iterator` trait, because
/// the lifetime of the returned row is tied to the lifetime of `self`.
/// This is a fallible "streaming iterator". For a more natural interface,
/// consider using [`query_map`](crate::Statement::query_map) or [`query_and_then`](crate::Statement::query_and_then) instead, which
/// consider using [`query_map`](crate::Statement::query_map) or
/// [`query_and_then`](crate::Statement::query_and_then) instead, which
/// return types that implement `Iterator`.
#[allow(clippy::should_implement_trait)] // cannot implement Iterator
#[inline]
@ -331,8 +332,8 @@ impl<'stmt> Row<'stmt> {
///
/// ## Failure
///
/// Panics if calling [`row.get_ref(idx)`](Row::get_ref) would return an error,
/// including:
/// Panics if calling [`row.get_ref(idx)`](Row::get_ref) would return an
/// error, including:
///
/// * If `idx` is outside the range of columns in the returned query.
/// * If `idx` is not a valid column name for this row.

View File

@ -411,7 +411,8 @@ impl Drop for ChangesetIter<'_> {
}
/// `feature = "session"` An item passed to a conflict-handler by
/// [`Connection::apply`](crate::Connection::apply), or an item generated by [`ChangesetIter::next`](ChangesetIter::next).
/// [`Connection::apply`](crate::Connection::apply), or an item generated by
/// [`ChangesetIter::next`](ChangesetIter::next).
// TODO enum ? Delete, Insert, Update, ...
pub struct ChangesetItem {
it: *mut ffi::sqlite3_changeset_iter,
@ -667,6 +668,7 @@ impl Connection {
#[repr(i32)]
#[derive(Debug, PartialEq)]
#[non_exhaustive]
#[allow(clippy::upper_case_acronyms)]
pub enum ConflictType {
UNKNOWN = -1,
SQLITE_CHANGESET_DATA = ffi::SQLITE_CHANGESET_DATA,
@ -694,6 +696,7 @@ impl From<i32> for ConflictType {
#[repr(i32)]
#[derive(Debug, PartialEq)]
#[non_exhaustive]
#[allow(clippy::upper_case_acronyms)]
pub enum ConflictAction {
SQLITE_CHANGESET_OMIT = ffi::SQLITE_CHANGESET_OMIT,
SQLITE_CHANGESET_REPLACE = ffi::SQLITE_CHANGESET_REPLACE,

View File

@ -114,11 +114,11 @@ impl Statement<'_> {
///
/// # Note
///
/// This function is a convenience wrapper around [`execute()`](Statement::execute) intended for
/// queries that insert a single item. It is possible to misuse this
/// function in a way that it cannot detect, such as by calling it on a
/// statement which _updates_ a single
/// item rather than inserting one. Please don't do that.
/// This function is a convenience wrapper around
/// [`execute()`](Statement::execute) intended for queries that insert a
/// single item. It is possible to misuse this function in a way that it
/// cannot detect, such as by calling it on a statement which _updates_
/// a single item rather than inserting one. Please don't do that.
///
/// # Failure
///
@ -136,8 +136,9 @@ impl Statement<'_> {
/// rows.
///
/// Due to lifetime restricts, the rows handle returned by `query` does not
/// implement the `Iterator` trait. Consider using [`query_map`](Statement::query_map) or
/// [`query_and_then`](Statement::query_and_then) instead, which do.
/// implement the `Iterator` trait. Consider using
/// [`query_map`](Statement::query_map) or [`query_and_then`](Statement::query_and_then)
/// instead, which do.
///
/// ## Example
///
@ -220,7 +221,6 @@ impl Statement<'_> {
/// Will return `Err` if binding parameters fails.
#[inline]
pub fn query<P: Params>(&mut self, params: P) -> Result<Rows<'_>> {
self.check_readonly()?;
params.__bind_in(self)?;
Ok(Rows::new(self))
}
@ -397,8 +397,9 @@ impl Statement<'_> {
/// iterator over the result of calling the mapping function over the
/// query's rows.
///
/// Note: This function is deprecated in favor of [`Statement::query_and_then`],
/// which can now take named parameters directly.
/// Note: This function is deprecated in favor of
/// [`Statement::query_and_then`], which can now take named parameters
/// directly.
///
/// If any parameters that were in the prepared statement are not included
/// in `params`, they will continue to use the most-recently bound value
@ -437,9 +438,10 @@ impl Statement<'_> {
/// ignored.
///
/// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the
/// query truly is optional, you can call [`.optional()`](crate::OptionalExtension::optional) on the result of
/// this to get a `Result<Option<T>>` (requires that the trait `rusqlite::OptionalExtension`
/// is imported).
/// query truly is optional, you can call
/// [`.optional()`](crate::OptionalExtension::optional) on the result of
/// this to get a `Result<Option<T>>` (requires that the trait
/// `rusqlite::OptionalExtension` is imported).
///
/// # Failure
///
@ -457,16 +459,18 @@ impl Statement<'_> {
/// Convenience method to execute a query with named parameter(s) that is
/// expected to return a single row.
///
/// Note: This function is deprecated in favor of [`Statement::query_and_then`],
/// which can now take named parameters directly.
/// Note: This function is deprecated in favor of
/// [`Statement::query_and_then`], which can now take named parameters
/// directly.
///
/// If the query returns more than one row, all rows except the first are
/// ignored.
///
/// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the
/// query truly is optional, you can call [`.optional()`](crate::OptionalExtension::optional) on the result of
/// this to get a `Result<Option<T>>` (requires that the trait `rusqlite::OptionalExtension`
/// is imported).
/// query truly is optional, you can call
/// [`.optional()`](crate::OptionalExtension::optional) on the result of
/// this to get a `Result<Option<T>>` (requires that the trait
/// `rusqlite::OptionalExtension` is imported).
///
/// # Failure
///
@ -718,21 +722,6 @@ impl Statement<'_> {
self.conn.decode_result(stmt.finalize())
}
#[cfg(not(feature = "modern_sqlite"))]
#[inline]
fn check_readonly(&self) -> Result<()> {
Ok(())
}
#[cfg(feature = "modern_sqlite")]
#[inline]
fn check_readonly(&self) -> Result<()> {
/*if !self.stmt.readonly() { does not work for PRAGMA
return Err(Error::InvalidQuery);
}*/
Ok(())
}
#[cfg(all(feature = "modern_sqlite", feature = "extra_check"))]
#[inline]
fn check_update(&self) -> Result<()> {
@ -755,6 +744,7 @@ impl Statement<'_> {
#[cfg(not(feature = "extra_check"))]
#[inline]
#[allow(clippy::unnecessary_wraps)]
fn check_update(&self) -> Result<()> {
Ok(())
}
@ -793,6 +783,7 @@ impl Statement<'_> {
#[cfg(not(feature = "extra_check"))]
#[inline]
#[allow(clippy::unnecessary_wraps)]
pub(crate) fn check_no_tail(&self) -> Result<()> {
Ok(())
}

View File

@ -377,8 +377,9 @@ impl Connection {
/// Begin a new transaction with the default behavior (DEFERRED).
///
/// The transaction defaults to rolling back when it is dropped. If you
/// want the transaction to commit, you must call [`commit`](Transaction::commit) or
/// [`set_drop_behavior(DropBehavior::Commit)`](Transaction::set_drop_behavior).
/// want the transaction to commit, you must call
/// [`commit`](Transaction::commit) or [`set_drop_behavior(DropBehavior:
/// :Commit)`](Transaction::set_drop_behavior).
///
/// ## Example
///
@ -458,7 +459,8 @@ impl Connection {
///
/// The savepoint defaults to rolling back when it is dropped. If you want
/// the savepoint to commit, you must call [`commit`](Savepoint::commit) or
/// [`set_drop_behavior(DropBehavior::Commit)`](Savepoint::set_drop_behavior).
/// [`set_drop_behavior(DropBehavior::Commit)`](Savepoint::
/// set_drop_behavior).
///
/// ## Example
///

View File

@ -101,9 +101,9 @@ impl FromSql for DateTime<Utc> {
let s = value.as_str()?;
let fmt = if s.len() >= 11 && s.as_bytes()[10] == b'T' {
"%FT%T%.f%:z"
"%FT%T%.f%#z"
} else {
"%F %T%.f%:z"
"%F %T%.f%#z"
};
if let Ok(dt) = DateTime::parse_from_str(s, fmt) {
@ -127,7 +127,10 @@ impl FromSql for DateTime<Local> {
#[cfg(test)]
mod test {
use crate::{Connection, Result};
use crate::{
types::{FromSql, ValueRef},
Connection, Result,
};
use chrono::{DateTime, Duration, Local, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc};
fn checked_memory_handle() -> Result<Connection> {
@ -261,4 +264,10 @@ mod test {
assert!(result.is_ok());
Ok(())
}
#[test]
fn test_lenient_parse_timezone() {
assert!(DateTime::<Utc>::column_result(ValueRef::Text(b"1970-01-01T00:00:00Z")).is_ok());
assert!(DateTime::<Utc>::column_result(ValueRef::Text(b"1970-01-01T00:00:00+00")).is_ok());
}
}

View File

@ -15,9 +15,11 @@
//! [`FromSql`] has different behaviour depending on the SQL and Rust types, and
//! the value.
//!
//! * `INTEGER` to integer: returns an [`Error::IntegralValueOutOfRange`](crate::Error::IntegralValueOutOfRange) error if
//! the value does not fit in the Rust type.
//! * `REAL` to integer: always returns an [`Error::InvalidColumnType`](crate::Error::InvalidColumnType) error.
//! * `INTEGER` to integer: returns an
//! [`Error::IntegralValueOutOfRange`](crate::Error::IntegralValueOutOfRange)
//! error if the value does not fit in the Rust type.
//! * `REAL` to integer: always returns an
//! [`Error::InvalidColumnType`](crate::Error::InvalidColumnType) error.
//! * `INTEGER` to float: casts using `as` operator. Never fails.
//! * `REAL` to float: casts using `as` operator. Never fails.
//!
@ -62,8 +64,8 @@ impl ToSql for DateTimeSql {
"##
)]
//! [`ToSql`] and [`FromSql`] are also implemented for `Option<T>` where `T`
//! implements [`ToSql`] or [`FromSql`] for the cases where you want to know if a
//! value was NULL (which gets translated to `None`).
//! implements [`ToSql`] or [`FromSql`] for the cases where you want to know if
//! a value was NULL (which gets translated to `None`).
pub use self::from_sql::{FromSql, FromSqlError, FromSqlResult};
pub use self::to_sql::{ToSql, ToSqlOutput};

View File

@ -3,7 +3,8 @@ use super::{Null, Type};
/// Owning [dynamic type value](http://sqlite.org/datatype3.html). Value's type is typically
/// dictated by SQLite (not by the caller).
///
/// See [`ValueRef`](crate::types::ValueRef) for a non-owning dynamic type value.
/// See [`ValueRef`](crate::types::ValueRef) for a non-owning dynamic type
/// value.
#[derive(Clone, Debug, PartialEq)]
pub enum Value {
/// The value is a `NULL` value.

View File

@ -35,7 +35,8 @@ impl ValueRef<'_> {
impl<'a> ValueRef<'a> {
/// If `self` is case `Integer`, returns the integral value. Otherwise,
/// returns [`Err(Error::InvalidColumnType)`](crate::Error::InvalidColumnType).
/// returns [`Err(Error::InvalidColumnType)`](crate::Error::
/// InvalidColumnType).
#[inline]
pub fn as_i64(&self) -> FromSqlResult<i64> {
match *self {
@ -45,7 +46,8 @@ impl<'a> ValueRef<'a> {
}
/// If `self` is case `Real`, returns the floating point value. Otherwise,
/// returns [`Err(Error::InvalidColumnType)`](crate::Error::InvalidColumnType).
/// returns [`Err(Error::InvalidColumnType)`](crate::Error::
/// InvalidColumnType).
#[inline]
pub fn as_f64(&self) -> FromSqlResult<f64> {
match *self {

View File

@ -48,12 +48,12 @@ use crate::{Connection, Error, Result};
/// ```
pub fn load_module(conn: &Connection) -> Result<()> {
let aux: Option<()> = None;
conn.create_module("csv", read_only_module::<CSVTab>(), aux)
conn.create_module("csv", read_only_module::<CsvTab>(), aux)
}
/// An instance of the CSV virtual table
#[repr(C)]
struct CSVTab {
struct CsvTab {
/// Base class. Must be first
base: ffi::sqlite3_vtab,
/// Name of the CSV file
@ -65,7 +65,7 @@ struct CSVTab {
offset_first_row: csv::Position,
}
impl CSVTab {
impl CsvTab {
fn reader(&self) -> Result<csv::Reader<File>, csv::Error> {
csv::ReaderBuilder::new()
.has_headers(self.has_headers)
@ -96,20 +96,20 @@ impl CSVTab {
}
}
unsafe impl<'vtab> VTab<'vtab> for CSVTab {
unsafe impl<'vtab> VTab<'vtab> for CsvTab {
type Aux = ();
type Cursor = CSVTabCursor<'vtab>;
type Cursor = CsvTabCursor<'vtab>;
fn connect(
_: &mut VTabConnection,
_aux: Option<&()>,
args: &[&[u8]],
) -> Result<(String, CSVTab)> {
) -> Result<(String, CsvTab)> {
if args.len() < 4 {
return Err(Error::ModuleError("no CSV file specified".to_owned()));
}
let mut vtab = CSVTab {
let mut vtab = CsvTab {
base: ffi::sqlite3_vtab::default(),
filename: "".to_owned(),
has_headers: false,
@ -122,7 +122,7 @@ unsafe impl<'vtab> VTab<'vtab> for CSVTab {
let args = &args[3..];
for c_slice in args {
let (param, value) = CSVTab::parameter(c_slice)?;
let (param, value) = CsvTab::parameter(c_slice)?;
match param {
"filename" => {
if !Path::new(value).exists() {
@ -166,7 +166,7 @@ unsafe impl<'vtab> VTab<'vtab> for CSVTab {
}
}
"delimiter" => {
if let Some(b) = CSVTab::parse_byte(value) {
if let Some(b) = CsvTab::parse_byte(value) {
vtab.delimiter = b;
} else {
return Err(Error::ModuleError(format!(
@ -176,7 +176,7 @@ unsafe impl<'vtab> VTab<'vtab> for CSVTab {
}
}
"quote" => {
if let Some(b) = CSVTab::parse_byte(value) {
if let Some(b) = CsvTab::parse_byte(value) {
if b == b'0' {
vtab.quote = 0;
} else {
@ -259,16 +259,16 @@ unsafe impl<'vtab> VTab<'vtab> for CSVTab {
Ok(())
}
fn open(&self) -> Result<CSVTabCursor<'_>> {
Ok(CSVTabCursor::new(self.reader()?))
fn open(&self) -> Result<CsvTabCursor<'_>> {
Ok(CsvTabCursor::new(self.reader()?))
}
}
impl CreateVTab<'_> for CSVTab {}
impl CreateVTab<'_> for CsvTab {}
/// A cursor for the CSV virtual table
#[repr(C)]
struct CSVTabCursor<'vtab> {
struct CsvTabCursor<'vtab> {
/// Base class. Must be first
base: ffi::sqlite3_vtab_cursor,
/// The CSV reader object
@ -278,12 +278,12 @@ struct CSVTabCursor<'vtab> {
/// Values of the current row
cols: csv::StringRecord,
eof: bool,
phantom: PhantomData<&'vtab CSVTab>,
phantom: PhantomData<&'vtab CsvTab>,
}
impl CSVTabCursor<'_> {
fn new<'vtab>(reader: csv::Reader<File>) -> CSVTabCursor<'vtab> {
CSVTabCursor {
impl CsvTabCursor<'_> {
fn new<'vtab>(reader: csv::Reader<File>) -> CsvTabCursor<'vtab> {
CsvTabCursor {
base: ffi::sqlite3_vtab_cursor::default(),
reader,
row_number: 0,
@ -294,12 +294,12 @@ impl CSVTabCursor<'_> {
}
/// Accessor to the associated virtual table.
fn vtab(&self) -> &CSVTab {
unsafe { &*(self.base.pVtab as *const CSVTab) }
fn vtab(&self) -> &CsvTab {
unsafe { &*(self.base.pVtab as *const CsvTab) }
}
}
unsafe impl VTabCursor for CSVTabCursor<'_> {
unsafe impl VTabCursor for CsvTabCursor<'_> {
// Only a full table scan is supported. So `filter` simply rewinds to
// the beginning.
fn filter(

View File

@ -2,8 +2,8 @@
//!
//! Follow these steps to create your own virtual table:
//! 1. Write implemenation of [`VTab`] and [`VTabCursor`] traits.
//! 2. Create an instance of the [`Module`] structure specialized for [`VTab`] impl.
//! from step 1.
//! 2. Create an instance of the [`Module`] structure specialized for [`VTab`]
//! impl. from step 1.
//! 3. Register your [`Module`] structure using [`Connection::create_module`].
//! 4. Run a `CREATE VIRTUAL TABLE` command that specifies the new module in the
//! `USING` clause.
@ -261,6 +261,7 @@ pub trait CreateVTab<'vtab>: VTab<'vtab> {
/// See [Virtual Table Constraint Operator Codes](https://sqlite.org/c3ref/c_index_constraint_eq.html) for details.
#[derive(Debug, PartialEq)]
#[allow(non_snake_case, non_camel_case_types, missing_docs)]
#[allow(clippy::upper_case_acronyms)]
pub enum IndexConstraintOp {
SQLITE_INDEX_CONSTRAINT_EQ,
SQLITE_INDEX_CONSTRAINT_GT,
@ -429,7 +430,8 @@ impl IndexConstraint<'_> {
pub struct IndexConstraintUsage<'a>(&'a mut ffi::sqlite3_index_constraint_usage);
impl IndexConstraintUsage<'_> {
/// if `argv_index` > 0, constraint is part of argv to [`VTabCursor::filter`]
/// if `argv_index` > 0, constraint is part of argv to
/// [`VTabCursor::filter`]
#[inline]
pub fn set_argv_index(&mut self, argv_index: c_int) {
self.0.argvIndex = argv_index;
@ -495,8 +497,8 @@ pub unsafe trait VTabCursor: Sized {
/// Begin a search of a virtual table.
/// (See [SQLite doc](https://sqlite.org/vtab.html#the_xfilter_method))
fn filter(&mut self, idx_num: c_int, idx_str: Option<&str>, args: &Values<'_>) -> Result<()>;
/// Advance cursor to the next row of a result set initiated by [`filter`](VTabCursor::filter).
/// (See [SQLite doc](https://sqlite.org/vtab.html#the_xnext_method))
/// Advance cursor to the next row of a result set initiated by
/// [`filter`](VTabCursor::filter). (See [SQLite doc](https://sqlite.org/vtab.html#the_xnext_method))
fn next(&mut self) -> Result<()>;
/// Must return `false` if the cursor currently points to a valid row of
/// data, or `true` otherwise.

View File

@ -13,7 +13,7 @@ use crate::vtab::{
eponymous_only_module, Context, IndexConstraintOp, IndexInfo, VTab, VTabConnection, VTabCursor,
Values,
};
use crate::{Connection, Result};
use crate::{Connection, Error, Result};
/// `feature = "series"` Register the "generate_series" module.
pub fn load_module(conn: &Connection) -> Result<()> {
@ -38,6 +38,8 @@ bitflags::bitflags! {
const STEP = 4;
// output in descending order
const DESC = 8;
// output in descending order
const ASC = 16;
// Both start and stop
const BOTH = QueryPlanFlags::START.bits | QueryPlanFlags::STOP.bits;
}
@ -71,54 +73,42 @@ unsafe impl<'vtab> VTab<'vtab> for SeriesTab {
fn best_index(&self, info: &mut IndexInfo) -> Result<()> {
// The query plan bitmask
let mut idx_num: QueryPlanFlags = QueryPlanFlags::empty();
// Index of the start= constraint
let mut start_idx = None;
// Index of the stop= constraint
let mut stop_idx = None;
// Index of the step= constraint
let mut step_idx = None;
// Mask of unusable constraints
let mut unusable_mask: QueryPlanFlags = QueryPlanFlags::empty();
// Constraints on start, stop, and step
let mut a_idx: [Option<usize>; 3] = [None, None, None];
for (i, constraint) in info.constraints().enumerate() {
if !constraint.is_usable() {
if constraint.column() < SERIES_COLUMN_START {
continue;
}
if constraint.operator() != IndexConstraintOp::SQLITE_INDEX_CONSTRAINT_EQ {
continue;
}
match constraint.column() {
SERIES_COLUMN_START => {
start_idx = Some(i);
idx_num |= QueryPlanFlags::START;
let (i_col, i_mask) = match constraint.column() {
SERIES_COLUMN_START => (0, QueryPlanFlags::START),
SERIES_COLUMN_STOP => (1, QueryPlanFlags::STOP),
SERIES_COLUMN_STEP => (2, QueryPlanFlags::STEP),
_ => {
unreachable!()
}
SERIES_COLUMN_STOP => {
stop_idx = Some(i);
idx_num |= QueryPlanFlags::STOP;
}
SERIES_COLUMN_STEP => {
step_idx = Some(i);
idx_num |= QueryPlanFlags::STEP;
}
_ => {}
};
if !constraint.is_usable() {
unusable_mask |= i_mask;
} else if constraint.operator() == IndexConstraintOp::SQLITE_INDEX_CONSTRAINT_EQ {
idx_num |= i_mask;
a_idx[i_col] = Some(i);
}
}
let mut num_of_arg = 0;
if let Some(start_idx) = start_idx {
num_of_arg += 1;
let mut constraint_usage = info.constraint_usage(start_idx);
constraint_usage.set_argv_index(num_of_arg);
// Number of arguments that SeriesTabCursor::filter expects
let mut n_arg = 0;
for j in a_idx.iter().flatten() {
n_arg += 1;
let mut constraint_usage = info.constraint_usage(*j);
constraint_usage.set_argv_index(n_arg);
constraint_usage.set_omit(true);
}
if let Some(stop_idx) = stop_idx {
num_of_arg += 1;
let mut constraint_usage = info.constraint_usage(stop_idx);
constraint_usage.set_argv_index(num_of_arg);
constraint_usage.set_omit(true);
}
if let Some(step_idx) = step_idx {
num_of_arg += 1;
let mut constraint_usage = info.constraint_usage(step_idx);
constraint_usage.set_argv_index(num_of_arg);
constraint_usage.set_omit(true);
if !(unusable_mask & !idx_num).is_empty() {
return Err(Error::SqliteFailure(
ffi::Error::new(ffi::SQLITE_CONSTRAINT),
None,
));
}
if idx_num.contains(QueryPlanFlags::BOTH) {
// Both start= and stop= boundaries are available.
@ -135,6 +125,8 @@ unsafe impl<'vtab> VTab<'vtab> for SeriesTab {
if let Some(order_by) = order_bys.next() {
if order_by.is_order_by_desc() {
idx_num |= QueryPlanFlags::DESC;
} else {
idx_num |= QueryPlanFlags::ASC;
}
true
} else {
@ -145,7 +137,9 @@ unsafe impl<'vtab> VTab<'vtab> for SeriesTab {
info.set_order_by_consumed(true);
}
} else {
info.set_estimated_cost(2_147_483_647f64);
// If either boundary is missing, we have to generate a huge span
// of numbers. Make this case very expensive so that the query
// planner will work hard to avoid it.
info.set_estimated_rows(2_147_483_647);
}
info.set_idx_num(idx_num.bits());
@ -193,7 +187,7 @@ impl SeriesTabCursor<'_> {
}
unsafe impl VTabCursor for SeriesTabCursor<'_> {
fn filter(&mut self, idx_num: c_int, _idx_str: Option<&str>, args: &Values<'_>) -> Result<()> {
let idx_num = QueryPlanFlags::from_bits_truncate(idx_num);
let mut idx_num = QueryPlanFlags::from_bits_truncate(idx_num);
let mut i = 0;
if idx_num.contains(QueryPlanFlags::START) {
self.min_value = args.get(i)?;
@ -209,8 +203,14 @@ unsafe impl VTabCursor for SeriesTabCursor<'_> {
}
if idx_num.contains(QueryPlanFlags::STEP) {
self.step = args.get(i)?;
if self.step < 1 {
#[allow(clippy::comparison_chain)]
if self.step == 0 {
self.step = 1;
} else if self.step < 0 {
self.step = -self.step;
if !idx_num.contains(QueryPlanFlags::ASC) {
idx_num |= QueryPlanFlags::DESC;
}
}
} else {
self.step = 1;
@ -274,6 +274,7 @@ mod test {
use crate::ffi;
use crate::vtab::series;
use crate::{Connection, Result};
use fallible_iterator::FallibleIterator;
#[test]
fn test_series_module() -> Result<()> {
@ -294,6 +295,18 @@ mod test {
assert_eq!(expected, value?);
expected += 5;
}
let mut s =
db.prepare("SELECT * FROM generate_series WHERE start=1 AND stop=9 AND step=2")?;
let series: Vec<i32> = s.query([])?.map(|r| r.get(0)).collect()?;
assert_eq!(vec![1, 3, 5, 7, 9], series);
let mut s = db.prepare("SELECT * FROM generate_series LIMIT 5")?;
let series: Vec<i32> = s.query([])?.map(|r| r.get(0)).collect()?;
assert_eq!(vec![0, 1, 2, 3, 4], series);
let mut s = db.prepare("SELECT * FROM generate_series(0,32,5) ORDER BY value DESC")?;
let series: Vec<i32> = s.query([])?.map(|r| r.get(0)).collect()?;
assert_eq!(vec![30, 25, 20, 15, 10, 5, 0], series);
Ok(())
}
}