Bug 1597898 - Part 2: Vendor newly added rust dependencies, r=nanj

Differential Revision: https://phabricator.services.mozilla.com/D54281
This commit is contained in:
Victor Porof 2020-07-29 17:40:41 +00:00
parent 75bdf2b80e
commit 477c1858e0
50 changed files with 2661 additions and 996 deletions

View File

@ -7,11 +7,6 @@ branch = "r0.13.1"
git = "https://github.com/shravanrn/nix/"
replace-with = "vendored-sources"
[source."https://github.com/mozilla/rkv"]
git = "https://github.com/mozilla/rkv"
replace-with = "vendored-sources"
rev = "e3c3388e6632cf55e08d773b32e58b1cab9b2731"
[source."https://github.com/mozilla/neqo"]
git = "https://github.com/mozilla/neqo"
replace-with = "vendored-sources"

10
Cargo.lock generated
View File

@ -515,7 +515,7 @@ dependencies = [
"nserror",
"nsstring",
"rental",
"rkv 0.11.1",
"rkv 0.15.0",
"rust_cascade",
"sha2",
"storage_variant",
@ -2530,7 +2530,7 @@ dependencies = [
"moz_task",
"nserror",
"nsstring",
"rkv 0.10.4",
"rkv 0.15.0",
"storage_variant",
"tempfile",
"thin-vec",
@ -4100,8 +4100,9 @@ dependencies = [
[[package]]
name = "rkv"
version = "0.11.1"
source = "git+https://github.com/mozilla/rkv?rev=e3c3388e6632cf55e08d773b32e58b1cab9b2731#e3c3388e6632cf55e08d773b32e58b1cab9b2731"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e97d1b6321740ce36d77d67d22ff84ac8a996cf69dbd0727b8bcae52f1c98aaa"
dependencies = [
"arrayref",
"bincode",
@ -4113,6 +4114,7 @@ dependencies = [
"lmdb-rkv",
"log",
"ordered-float",
"paste",
"serde",
"serde_derive",
"url",

View File

@ -1,38 +0,0 @@
environment:
matrix:
- TARGET: x86_64-pc-windows-msvc
TOOLCHAIN: stable
- TARGET: i686-pc-windows-msvc
TOOLCHAIN: stable
- TARGET: x86_64-pc-windows-msvc
TOOLCHAIN: beta
- TARGET: i686-pc-windows-msvc
TOOLCHAIN: beta
- TARGET: x86_64-pc-windows-msvc
TOOLCHAIN: nightly
- TARGET: i686-pc-windows-msvc
TOOLCHAIN: nightly
install:
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
- rustup-init.exe -y --default-host %TARGET% --default-toolchain %TOOLCHAIN%
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- choco install make -y
- choco install mingw -y
- refreshenv
- rustc -Vv
- cargo -Vv
- make -v
- gcc -v
# Disable AppVeyor's build phase, let 'cargo test' take care of the build
build: false
test_script:
- SET RUST_BACKTRACE=1
- cargo test --all --target %TARGET% --verbose
- cargo test --all --release --target %TARGET% --verbose
cache:
- C:\Users\appveyor\.cargo\registry
- target

File diff suppressed because one or more lines are too long

View File

@ -1,4 +0,0 @@
imports_layout = "Vertical"
max_width = 120
match_block_trailing_comma = true
use_small_heuristics = "Off"

View File

@ -1,48 +0,0 @@
language: rust
sudo: false
cache: cargo
rust:
- 1.37.0
- stable
- beta
- nightly
os:
- linux
- osx
matrix:
allow_failures:
- rust: nightly
fast_finish: true
before_script:
# We install a known-to-have-rustfmt version of the nightly toolchain
# in order to run the nightly version of rustfmt, which supports rules
# that we depend upon. When updating, pick a suitable nightly version
# from https://rust-lang.github.io/rustup-components-history/
- rustup toolchain install nightly-2019-09-11
- rustup component add rustfmt --toolchain nightly-2019-09-11
- rustup component add clippy --toolchain nightly-2019-09-11
# Use official clang in order to test out building on osx.
- if [[ "$TRAVIS_OS_NAME" = "osx" ]]; then
brew update;
brew install llvm;
export PATH="/usr/local/opt/llvm/bin:$PATH";
export LDFLAGS="-L/usr/local/opt/llvm/lib";
export CPPFLAGS="-I/usr/local/opt/llvm/include";
fi
script:
- cargo +nightly-2019-09-11 fmt --all -- --check
- CC="clang" cargo +nightly-2019-09-11 clippy --all-features -- -D warnings
- cargo build --verbose
- export RUST_BACKTRACE=1
- cargo test --all --verbose
- cargo test --lib --no-default-features --verbose
- cargo test --lib --no-default-features --features "db-dup-sort" --verbose
- cargo test --lib --no-default-features --features "db-int-key" --verbose
- cargo test --release --all --verbose
- ./run-all-examples.sh

492
third_party/rust/rkv/Cargo.lock generated vendored Normal file
View File

@ -0,0 +1,492 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "addr2line"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
[[package]]
name = "arrayref"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
[[package]]
name = "autocfg"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "backtrace"
version = "0.3.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293"
dependencies = [
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "bincode"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d"
dependencies = [
"byteorder",
"serde",
]
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "byteorder"
version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "cc"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "failure"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86"
dependencies = [
"backtrace",
"failure_derive",
]
[[package]]
name = "failure_derive"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4"
dependencies = [
"proc-macro2",
"quote",
"syn",
"synstructure",
]
[[package]]
name = "getrandom"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724"
[[package]]
name = "id-arena"
version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005"
[[package]]
name = "idna"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9"
dependencies = [
"matches",
"unicode-bidi",
"unicode-normalization",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701"
[[package]]
name = "lmdb-rkv"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b"
dependencies = [
"bitflags",
"byteorder",
"libc",
"lmdb-rkv-sys",
]
[[package]]
name = "lmdb-rkv-sys"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5"
dependencies = [
"cc",
"libc",
"pkg-config",
]
[[package]]
name = "log"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
dependencies = [
"cfg-if",
]
[[package]]
name = "matches"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
[[package]]
name = "miniz_oxide"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f"
dependencies = [
"adler",
]
[[package]]
name = "num-traits"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
dependencies = [
"autocfg",
]
[[package]]
name = "object"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5"
[[package]]
name = "ordered-float"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3741934be594d77de1c8461ebcbbe866f585ea616a9753aa78f2bdc69f0e4579"
dependencies = [
"num-traits",
]
[[package]]
name = "paste"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
dependencies = [
"paste-impl",
"proc-macro-hack",
]
[[package]]
name = "paste-impl"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
dependencies = [
"proc-macro-hack",
]
[[package]]
name = "percent-encoding"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
[[package]]
name = "pkg-config"
version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33"
[[package]]
name = "ppv-lite86"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
[[package]]
name = "proc-macro-hack"
version = "0.5.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4"
[[package]]
name = "proc-macro2"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom",
"libc",
"rand_chacha",
"rand_core",
"rand_hc",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core",
]
[[package]]
name = "redox_syscall"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "remove_dir_all"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
"winapi",
]
[[package]]
name = "rkv"
version = "0.15.0"
dependencies = [
"arrayref",
"bincode",
"bitflags",
"byteorder",
"failure",
"id-arena",
"lazy_static",
"lmdb-rkv",
"log",
"ordered-float",
"paste",
"serde",
"serde_derive",
"tempfile",
"url",
"uuid",
]
[[package]]
name = "rustc-demangle"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783"
[[package]]
name = "serde"
version = "1.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "syn"
version = "1.0.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "synstructure"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
dependencies = [
"proc-macro2",
"quote",
"syn",
"unicode-xid",
]
[[package]]
name = "tempfile"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
dependencies = [
"cfg-if",
"libc",
"rand",
"redox_syscall",
"remove_dir_all",
"winapi",
]
[[package]]
name = "tinyvec"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed"
[[package]]
name = "unicode-bidi"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5"
dependencies = [
"matches",
]
[[package]]
name = "unicode-normalization"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-xid"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
[[package]]
name = "url"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb"
dependencies = [
"idna",
"matches",
"percent-encoding",
]
[[package]]
name = "uuid"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View File

@ -1,49 +1,88 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "rkv"
version = "0.11.1"
authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"]
edition = "2018"
license = "Apache-2.0"
description = "a simple, humane, typed Rust interface to LMDB"
documentation = "https://docs.rs/rkv"
name = "rkv"
version = "0.15.0"
authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"]
exclude = ["/tests/envs/*"]
description = "A simple, humane, typed key-value storage solution"
homepage = "https://github.com/mozilla/rkv"
repository = "https://github.com/mozilla/rkv"
documentation = "https://docs.rs/rkv"
readme = "README.md"
keywords = ["lmdb", "database", "storage"]
categories = ["database"]
exclude = ["/tests/envs/*"]
license = "Apache-2.0"
repository = "https://github.com/mozilla/rkv"
[dependencies.arrayref]
version = "0.3"
[dependencies.bincode]
version = "1.0"
[dependencies.bitflags]
version = "1"
[dependencies.byteorder]
version = "1"
[dependencies.failure]
version = "0.1"
features = ["derive"]
default_features = false
[dependencies.id-arena]
version = "2.2"
[dependencies.lazy_static]
version = "1.0"
[dependencies.lmdb-rkv]
version = "0.14"
[dependencies.log]
version = "0.4"
[dependencies.ordered-float]
version = "1.0"
[dependencies.paste]
version = "0.1"
[dependencies.serde]
version = "1.0"
features = ["derive", "rc"]
[dependencies.serde_derive]
version = "1.0"
[dependencies.url]
version = "2.0"
[dependencies.uuid]
version = "0.8"
[dev-dependencies.byteorder]
version = "1"
[dev-dependencies.tempfile]
version = "3"
[features]
default = ["db-dup-sort", "db-int-key"]
backtrace = ["failure/backtrace", "failure/std"]
db-dup-sort = []
db-int-key = []
default = ["db-dup-sort", "db-int-key"]
no-canonicalize-path = []
with-asan = ["lmdb-rkv/with-asan"]
with-fuzzer = ["lmdb-rkv/with-fuzzer"]
with-fuzzer-no-link = ["lmdb-rkv/with-fuzzer-no-link"]
[dependencies]
arrayref = "0.3"
bincode = "1.0"
bitflags = "1"
byteorder = "1"
id-arena = "2.2"
lazy_static = "1.0"
lmdb-rkv = "0.14"
log = "0.4"
ordered-float = "1.0"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_derive = "1.0"
url = "2.0"
uuid = "0.8"
# Get rid of failure's dependency on backtrace. Eventually
# backtrace will move into Rust core, but we don't need it here.
[dependencies.failure]
version = "0.1"
default_features = false
features = ["derive"]
[dev-dependencies]
byteorder = "1"
tempfile = "3"

View File

@ -9,8 +9,6 @@ The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed ke
## ⚠️ Warning ⚠️
The LMDB backend is currently unstable and crash-prone. We're attempting to fix these crashes in bugs [1538539](https://bugzilla.mozilla.org/show_bug.cgi?id=1538539), [1538541](https://bugzilla.mozilla.org/show_bug.cgi?id=1538541) and [1550174](https://bugzilla.mozilla.org/show_bug.cgi?id=1550174).
To use rkv in production/release environments at Mozilla, you may do so with the "SafeMode" backend, for example:
```rust
@ -23,9 +21,9 @@ let shared_rkv = manager.get_or_create(path, Rkv::new::<SafeMode>).unwrap();
...
```
The "SafeMode` backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk on commit.
The "SafeMode" backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk (only on commit).
In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing the LMDB crashes, or offering more choices of backend engines (e.g. SQLite).
In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing some LMDB crashes, or offering more choices of backend engines (e.g. SQLite).
## Use
@ -49,8 +47,7 @@ There are several features that you can opt-in and out of when using rkv:
By default, `db-dup-sort` and `db-int-key` features offer high level database APIs which allow multiple values per key, and optimizations around integer-based keys respectively. Opt out of these default features when specifying the rkv dependency in your Cargo.toml file to disable them; doing so avoids a certain amount of overhead required to support them.
If you specify the `backtrace` feature, backtraces will be enabled in "failure"
errors. This feature is disabled by default.
If you specify the `backtrace` feature, backtraces will be enabled in "failure" errors. This feature is disabled by default.
To aid fuzzing efforts, `with-asan`, `with-fuzzer`, and `with-fuzzer-no-link` configure the build scripts responsible with compiling the underlying backing engines (e.g. LMDB) to build with these LLMV features enabled. Please refer to the official LLVM/Clang documentation on them for more informatiuon. These features are also disabled by default.

View File

@ -7,17 +7,19 @@
//!
//! cargo run --example iterator
use std::fs;
use std::str;
use std::{
fs,
str,
};
use tempfile::Builder;
use rkv::backend::{
Lmdb,
LmdbDatabase,
LmdbEnvironment,
};
use rkv::{
backend::{
Lmdb,
LmdbDatabase,
LmdbEnvironment,
},
Manager,
Rkv,
SingleStore,

View File

@ -11,14 +11,14 @@ use std::fs;
use tempfile::Builder;
use rkv::backend::{
BackendStat,
Lmdb,
LmdbDatabase,
LmdbEnvironment,
LmdbRwTransaction,
};
use rkv::{
backend::{
BackendStat,
Lmdb,
LmdbDatabase,
LmdbEnvironment,
LmdbRwTransaction,
},
Manager,
Rkv,
StoreOptions,
@ -35,7 +35,7 @@ fn getput<'w, 's>(store: MultiStore, writer: &'w mut Writer, ids: &'s mut Vec<St
// this is a multi-valued database, so get returns an iterator
let mut iter = store.get(writer, k).unwrap();
while let Some(Ok((_key, val))) = iter.next() {
if let Value::Str(s) = val.unwrap() {
if let Value::Str(s) = val {
ids.push(s.to_owned());
} else {
panic!("didn't get a string back!");

View File

@ -16,48 +16,39 @@ mod traits;
pub use common::*;
pub use traits::*;
pub use impl_lmdb::DatabaseImpl as LmdbDatabase;
pub use impl_lmdb::EnvironmentBuilderImpl as Lmdb;
pub use impl_lmdb::EnvironmentImpl as LmdbEnvironment;
pub use impl_lmdb::ErrorImpl as LmdbError;
pub use impl_lmdb::IterImpl as LmdbIter;
pub use impl_lmdb::{
ArchMigrateError as LmdbArchMigrateError,
ArchMigrateResult as LmdbArchMigrateResult,
ArchMigrator as LmdbArchMigrator,
DatabaseFlagsImpl as LmdbDatabaseFlags,
DatabaseImpl as LmdbDatabase,
EnvironmentBuilderImpl as Lmdb,
EnvironmentFlagsImpl as LmdbEnvironmentFlags,
EnvironmentImpl as LmdbEnvironment,
ErrorImpl as LmdbError,
InfoImpl as LmdbInfo,
IterImpl as LmdbIter,
RoCursorImpl as LmdbRoCursor,
RoTransactionImpl as LmdbRoTransaction,
RwCursorImpl as LmdbRwCursor,
RwTransactionImpl as LmdbRwTransaction,
StatImpl as LmdbStat,
WriteFlagsImpl as LmdbWriteFlags,
};
pub use impl_lmdb::{
InfoImpl as LmdbInfo,
StatImpl as LmdbStat,
};
pub use impl_lmdb::{
RoCursorImpl as LmdbRoCursor,
RwCursorImpl as LmdbRwCursor,
};
pub use impl_lmdb::{
RoTransactionImpl as LmdbRoTransaction,
RwTransactionImpl as LmdbRwTransaction,
};
pub use impl_safe::DatabaseImpl as SafeModeDatabase;
pub use impl_safe::EnvironmentBuilderImpl as SafeMode;
pub use impl_safe::EnvironmentImpl as SafeModeEnvironment;
pub use impl_safe::ErrorImpl as SafeModeError;
pub use impl_safe::IterImpl as SafeModeIter;
pub use impl_safe::{
DatabaseFlagsImpl as SafeModeDatabaseFlags,
DatabaseImpl as SafeModeDatabase,
EnvironmentBuilderImpl as SafeMode,
EnvironmentFlagsImpl as SafeModeEnvironmentFlags,
EnvironmentImpl as SafeModeEnvironment,
ErrorImpl as SafeModeError,
InfoImpl as SafeModeInfo,
IterImpl as SafeModeIter,
RoCursorImpl as SafeModeRoCursor,
RoTransactionImpl as SafeModeRoTransaction,
RwCursorImpl as SafeModeRwCursor,
RwTransactionImpl as SafeModeRwTransaction,
StatImpl as SafeModeStat,
WriteFlagsImpl as SafeModeWriteFlags,
};
pub use impl_safe::{
InfoImpl as SafeModeInfo,
StatImpl as SafeModeStat,
};
pub use impl_safe::{
RoCursorImpl as SafeModeRoCursor,
RwCursorImpl as SafeModeRwCursor,
};
pub use impl_safe::{
RoTransactionImpl as SafeModeRoTransaction,
RwTransactionImpl as SafeModeRwTransaction,
};

View File

@ -27,9 +27,10 @@ pub enum DatabaseFlags {
REVERSE_KEY,
#[cfg(feature = "db-dup-sort")]
DUP_SORT,
#[cfg(feature = "db-dup-sort")]
DUP_FIXED,
#[cfg(feature = "db-int-key")]
INTEGER_KEY,
DUP_FIXED,
INTEGER_DUP,
REVERSE_DUP,
}

View File

@ -8,6 +8,8 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
mod arch_migrator;
mod arch_migrator_error;
mod cursor;
mod database;
mod environment;
@ -18,6 +20,11 @@ mod iter;
mod stat;
mod transaction;
pub use arch_migrator::{
MigrateError as ArchMigrateError,
MigrateResult as ArchMigrateResult,
Migrator as ArchMigrator,
};
pub use cursor::{
RoCursorImpl,
RwCursorImpl,

View File

@ -8,68 +8,53 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//! A utility for migrating data from one LMDB environment to another.
//! Notably, this tool can migrate data from an enviroment created with
//! a different bit-depth than the current rkv consumer, which enables
//! the consumer to retrieve data from an environment that can't be read
//! directly using the rkv APIs.
//! A utility for migrating data from one LMDB environment to another. Notably, this tool
//! can migrate data from an enviroment created with a different bit-depth than the
//! current rkv consumer, which enables the consumer to retrieve data from an environment
//! that can't be read directly using the rkv APIs.
//!
//! The utility supports both 32-bit and 64-bit LMDB source environments,
//! and it automatically migrates data in both the default database
//! and any named (sub) databases. It also migrates the source environment's
//! "map size" and "max DBs" configuration options to the destination
//! environment.
//! The utility supports both 32-bit and 64-bit LMDB source environments, and it
//! automatically migrates data in both the default database and any named (sub)
//! databases. It also migrates the source environment's "map size" and "max DBs"
//! configuration options to the destination environment.
//!
//! The destination environment must be at the rkv consumer's bit depth
//! and should be empty of data. It can be an empty directory, in which case
//! the utility will create a new LMDB environment within the directory.
//! The destination environment must be at the rkv consumer's bit depth and should be
//! empty of data. It can be an empty directory, in which case the utility will create a
//! new LMDB environment within the directory.
//!
//! The tool currently has these limitations:
//!
//! 1. It doesn't support migration from environments created with
//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment,
//! create a temporary directory, copy the environment's data file
//! to a file called data.mdb in the temporary directory, then migrate
//! the temporary directory as the source environment.
//! 2. It doesn't support migration from databases created with
//! `DatabaseFlags::DUP_SORT` (with or without `DatabaseFlags::DUP_FIXED`).
//! 3. It doesn't account for existing data in the destination environment,
//! which means that it can overwrite data (causing data loss) or fail
//! to migrate data if the destination environment contains existing data.
//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a
//! temporary directory, copy the environment's data file to a file called data.mdb in
//! the temporary directory, then migrate the temporary directory as the source
//! environment.
//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT`
//! (with or without `DatabaseFlags::DUP_FIXED`).
//! 3. It doesn't account for existing data in the destination environment, which means
//! that it can overwrite data (causing data loss) or fail to migrate data if the
//! destination environment contains existing data.
//!
//! ## Basic Usage
//!
//! Call `Migrator::new()` with the path to the source environment to create
//! a `Migrator` instance; then call the instance's `migrate()` method
//! with the path to the destination environment to migrate data from the source
//! to the destination environment. For example, this snippet migrates data
//! from the tests/envs/ref_env_32 environment to a new environment
//! in a temporary directory:
//! Call `Migrator::new()` with the path to the source environment to create a `Migrator`
//! instance; then call the instance's `migrate()` method with the path to the destination
//! environment to migrate data from the source to the destination environment. For
//! example, this snippet migrates data from the tests/envs/ref_env_32 environment to a
//! new environment in a temporary directory:
//!
//! ```
//! use rkv::migrate::Migrator;
//! use rkv::migrator::LmdbArchMigrator as Migrator;
//! use std::path::Path;
//! use tempfile::tempdir;
//! let mut migrator = Migrator::new(Path::new("tests/envs/ref_env_32")).unwrap();
//! migrator.migrate(&tempdir().unwrap().path()).unwrap();
//! ```
//!
//! Both `Migrator::new()` and `migrate()` return a `MigrateResult` that is
//! either an `Ok()` result or an `Err<MigrateError>`, where `MigrateError`
//! is an enum whose variants identify specific kinds of migration failures.
//! Both `Migrator::new()` and `migrate()` return a `MigrateResult` that is either an
//! `Ok()` result or an `Err<MigrateError>`, where `MigrateError` is an enum whose
//! variants identify specific kinds of migration failures.
pub use crate::error::MigrateError;
use bitflags::bitflags;
use byteorder::{
LittleEndian,
ReadBytesExt,
};
use lmdb::{
DatabaseFlags,
Environment,
Transaction,
WriteFlags,
};
use std::{
collections::{
BTreeMap,
@ -92,12 +77,25 @@ use std::{
str,
};
use bitflags::bitflags;
use byteorder::{
LittleEndian,
ReadBytesExt,
};
use lmdb::{
DatabaseFlags,
Environment,
Transaction,
WriteFlags,
};
pub use super::arch_migrator_error::MigrateError;
const PAGESIZE: u16 = 4096;
// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian.
// It appears at offset 12 on 32-bit systems and 16 on 64-bit systems.
// We don't support big-endian migration, but presumably we could do so
// by detecting the order of the bytes.
// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian. It appears at
// offset 12 on 32-bit systems and 16 on 64-bit systems. We don't support big-endian
// migration, but presumably we could do so by detecting the order of the bytes.
const MAGIC: [u8; 4] = [0xDE, 0xC0, 0xEF, 0xBE];
pub type MigrateResult<T> = Result<T, MigrateError>;
@ -126,9 +124,8 @@ bitflags! {
}
}
// The bit depth of the executable that created an LMDB environment.
// The Migrator determines this automatically based on the location of
// the magic number in the data.mdb file.
// The bit depth of the executable that created an LMDB environment. The Migrator
// determines this automatically based on the location of the magic number in data.mdb.
#[derive(Clone, Copy, PartialEq)]
enum Bits {
U32,
@ -369,8 +366,8 @@ impl Page {
}
fn parse_leaf_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<LeafNode> {
// The order of the mn_lo and mn_hi fields is endian-dependent and would
// be reversed in an LMDB environment created on a big-endian system.
// The order of the mn_lo and mn_hi fields is endian-dependent and would be
// reversed in an LMDB environment created on a big-endian system.
let mn_lo = cursor.read_u16::<LittleEndian>()?;
let mn_hi = cursor.read_u16::<LittleEndian>()?;
@ -385,7 +382,6 @@ impl Page {
let mv_size = Self::leaf_node_size(mn_lo, mn_hi);
if mn_flags.contains(NodeFlags::BIGDATA) {
let overflow_pgno = cursor.read_uint::<LittleEndian>(bits.size())?;
Ok(LeafNode::BigData {
mn_lo,
mn_hi,
@ -402,7 +398,6 @@ impl Page {
let mut cursor = std::io::Cursor::new(&value[..]);
let db = Database::new(&mut cursor, bits)?;
validate_page_num(db.md_root, bits)?;
Ok(LeafNode::SubData {
mn_lo,
mn_hi,
@ -417,7 +412,6 @@ impl Page {
let start = usize::try_from(cursor.position())?;
let end = usize::try_from(cursor.position() + u64::from(mv_size))?;
let value = cursor.get_ref()[start..end].to_vec();
Ok(LeafNode::Regular {
mn_lo,
mn_hi,
@ -449,15 +443,15 @@ impl Page {
}
fn parse_branch_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<BranchNode> {
// The order of the mn_lo and mn_hi fields is endian-dependent and would
// be reversed in an LMDB environment created on a big-endian system.
// The order of the mn_lo and mn_hi fields is endian-dependent and would be
// reversed in an LMDB environment created on a big-endian system.
let mn_lo = cursor.read_u16::<LittleEndian>()?;
let mn_hi = cursor.read_u16::<LittleEndian>()?;
let mn_flags = cursor.read_u16::<LittleEndian>()?;
// Branch nodes overload the mn_lo, mn_hi, and mn_flags fields
// to store the page number, so we derive the number from those fields.
// Branch nodes overload the mn_lo, mn_hi, and mn_flags fields to store the page
// number, so we derive the number from those fields.
let mp_pgno = Self::branch_node_page_num(mn_lo, mn_hi, mn_flags, bits);
let mn_ksize = cursor.read_u16::<LittleEndian>()?;
@ -502,10 +496,10 @@ pub struct Migrator {
}
impl Migrator {
/// Create a new Migrator for the LMDB environment at the given path.
/// This tries to open the data.mdb file in the environment and determine
/// the bit depth of the executable that created it, so it can fail
/// and return an Err if the file can't be opened or the depth determined.
/// Create a new Migrator for the LMDB environment at the given path. This tries to
/// open the data.mdb file in the environment and determine the bit depth of the
/// executable that created it, so it can fail and return an Err if the file can't be
/// opened or the depth determined.
pub fn new(path: &Path) -> MigrateResult<Migrator> {
let mut path = PathBuf::from(path);
path.push("data.mdb");
@ -533,20 +527,18 @@ impl Migrator {
})
}
/// Dump the data in one of the databases in the LMDB environment.
/// If the `database` paremeter is None, then we dump the data in the main
/// database. If it's the name of a subdatabase, then we dump the data
/// in that subdatabase.
/// Dump the data in one of the databases in the LMDB environment. If the `database`
/// paremeter is None, then we dump the data in the main database. If it's the name
/// of a subdatabase, then we dump the data in that subdatabase.
///
/// Note that the output isn't identical to that of the mdb_dump utility,
/// since mdb_dump includes subdatabase key/value pairs when dumping
/// the main database, and those values are architecture-dependent, since
/// they contain pointer-sized data.
///
/// If we wanted to support identical output, we could parameterize
/// inclusion of subdatabase pairs in get_pairs() and include them
/// when dumping data, while continuing to exclude them when migrating
/// Note that the output isn't identical to that of the `mdb_dump` utility, since
/// `mdb_dump` includes subdatabase key/value pairs when dumping the main database,
/// and those values are architecture-dependent, since they contain pointer-sized
/// data.
///
/// If we wanted to support identical output, we could parameterize inclusion of
/// subdatabase pairs in get_pairs() and include them when dumping data, while
/// continuing to exclude them when migrating data.
pub fn dump<T: Write>(&mut self, database: Option<&str>, mut out: T) -> MigrateResult<()> {
let meta_data = self.get_meta_data()?;
let root_page_num = meta_data.mm_dbs.main.md_root;
@ -593,20 +585,18 @@ impl Migrator {
Ok(())
}
/// Migrate all data in all of databases in the existing LMDB environment
/// to a new environment. This includes all key/value pairs in the main
/// database that aren't metadata about subdatabases and all key/value pairs
/// in all subdatabases.
/// Migrate all data in all of databases in the existing LMDB environment to a new
/// environment. This includes all key/value pairs in the main database that aren't
/// metadata about subdatabases and all key/value pairs in all subdatabases.
///
/// We also set the map size and maximum databases of the new environment
/// to their values for the existing environment. But we don't set
/// other metadata, and we don't check that the new environment is empty
/// before migrating data.
/// We also set the map size and maximum databases of the new environment to their
/// values for the existing environment. But we don't set other metadata, and we
/// don't check that the new environment is empty before migrating data.
///
/// Thus it's possible for this to overwrite existing data or fail
/// to migrate data if the new environment isn't empty. It's the consumer's
/// responsibility to ensure that data can be safely migrated to the new
/// environment. In general, this means that environment should be empty.
/// Thus it's possible for this to overwrite existing data or fail to migrate data if
/// the new environment isn't empty. It's the consumer's responsibility to ensure
/// that data can be safely migrated to the new environment. In general, this means
/// that environment should be empty.
pub fn migrate(&mut self, dest: &Path) -> MigrateResult<()> {
let meta_data = self.get_meta_data()?;
let root_page_num = meta_data.mm_dbs.main.md_root;
@ -619,24 +609,23 @@ impl Migrator {
.set_max_dbs(subdbs.len() as u32)
.open(dest)?;
// Create the databases before we open a read-write transaction,
// since database creation requires its own read-write transaction,
// which would hang while awaiting completion of an existing one.
// Create the databases before we open a read-write transaction, since database
// creation requires its own read-write transaction, which would hang while
// awaiting completion of an existing one.
env.create_db(None, meta_data.mm_dbs.main.md_flags)?;
for (subdb_name, subdb_info) in &subdbs {
env.create_db(Some(str::from_utf8(&subdb_name)?), subdb_info.md_flags)?;
}
// Now open the read-write transaction that we'll use to migrate
// all the data.
// Now open the read-write transaction that we'll use to migrate all the data.
let mut txn = env.begin_rw_txn()?;
// Migrate the main database.
let pairs = self.get_pairs(root_page)?;
let db = env.open_db(None)?;
for (key, value) in pairs {
// If we knew that the target database was empty, we could
// specify WriteFlags::APPEND to speed up the migration.
// If we knew that the target database was empty, we could specify
// WriteFlags::APPEND to speed up the migration.
txn.put(db, &key, &value, WriteFlags::empty())?;
}
@ -646,8 +635,8 @@ impl Migrator {
let pairs = self.get_pairs(root_page)?;
let db = env.open_db(Some(str::from_utf8(&subdb_name)?))?;
for (key, value) in pairs {
// If we knew that the target database was empty, we could
// specify WriteFlags::APPEND to speed up the migration.
// If we knew that the target database was empty, we could specify
// WriteFlags::APPEND to speed up the migration.
txn.put(db, &key, &value, WriteFlags::empty())?;
}
}
@ -716,9 +705,9 @@ impl Migrator {
overflow_pgno,
..
} => {
// XXX perhaps we could reduce memory consumption
// during a migration by waiting to read big data
// until it's time to write it to the new database.
// Perhaps we could reduce memory consumption during a
// migration by waiting to read big data until it's time
// to write it to the new database.
let value = self.read_data(
*overflow_pgno * u64::from(PAGESIZE) + page_header_size(self.bits),
*mv_size as usize,
@ -728,16 +717,15 @@ impl Migrator {
LeafNode::SubData {
..
} => {
// We don't include subdatabase leaves in pairs,
// since there's no architecture-neutral
// representation of them, and in any case they're
// meta-data that should get recreated when we
// migrate the subdatabases themselves.
// We don't include subdatabase leaves in pairs, since
// there's no architecture-neutral representation of them,
// and in any case they're meta-data that should get
// recreated when we migrate the subdatabases themselves.
//
// If we wanted to create identical dumps to those
// produced by mdb_dump, however, we could allow
// consumers to specify that they'd like to include
// these records.
// produced by `mdb_dump`, however, we could allow
// consumers to specify that they'd like to include these
// records.
},
};
}
@ -787,27 +775,18 @@ impl Migrator {
#[cfg(test)]
mod tests {
use super::MigrateResult;
use super::Migrator;
use crate::error::MigrateError;
use super::*;
use std::{
env,
fs,
mem::size_of,
};
use lmdb::{
Environment,
Error as LmdbError,
};
use std::{
env,
fs::{
self,
File,
},
io::{
Read,
Seek,
SeekFrom,
},
mem::size_of,
path::PathBuf,
};
use tempfile::{
tempdir,
tempfile,
@ -823,15 +802,17 @@ mod tests {
loop {
match ref_file.read(ref_buf) {
Err(err) => panic!(err),
Ok(ref_len) => match new_file.read(new_buf) {
Err(err) => panic!(err),
Ok(new_len) => {
assert_eq!(ref_len, new_len);
if ref_len == 0 {
break;
};
assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]);
},
Ok(ref_len) => {
match new_file.read(new_buf) {
Err(err) => panic!(err),
Ok(new_len) => {
assert_eq!(ref_len, new_len);
if ref_len == 0 {
break;
};
assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]);
},
}
},
}
}
@ -1017,8 +998,8 @@ mod tests {
// Compare the new dump file to the reference dump file.
compare_files(&mut ref_dump_file, &mut new_dump_file)?;
// Overwrite the old env's files with the new env's files and confirm
// that it's now possible to open the old env with LMDB.
// Overwrite the old env's files with the new env's files and confirm that it's now
// possible to open the old env with LMDB.
fs::copy(new_env.path().join("data.mdb"), old_env.path().join("data.mdb"))?;
fs::copy(new_env.path().join("lock.mdb"), old_env.path().join("lock.mdb"))?;
assert!(Environment::new().open(&old_env.path()).is_ok());

View File

@ -0,0 +1,107 @@
// Copyright 2018-2019 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::{
io,
num,
str,
};
use failure::Fail;
#[derive(Debug, Fail)]
pub enum MigrateError {
#[fail(display = "database not found: {:?}", _0)]
DatabaseNotFound(String),
#[fail(display = "{}", _0)]
FromString(String),
#[fail(display = "couldn't determine bit depth")]
IndeterminateBitDepth,
#[fail(display = "I/O error: {:?}", _0)]
IoError(io::Error),
#[fail(display = "invalid DatabaseFlags bits")]
InvalidDatabaseBits,
#[fail(display = "invalid data version")]
InvalidDataVersion,
#[fail(display = "invalid magic number")]
InvalidMagicNum,
#[fail(display = "invalid NodeFlags bits")]
InvalidNodeBits,
#[fail(display = "invalid PageFlags bits")]
InvalidPageBits,
#[fail(display = "invalid page number")]
InvalidPageNum,
#[fail(display = "lmdb backend error: {}", _0)]
LmdbError(lmdb::Error),
#[fail(display = "string conversion error")]
StringConversionError,
#[fail(display = "TryFromInt error: {:?}", _0)]
TryFromIntError(num::TryFromIntError),
#[fail(display = "unexpected Page variant")]
UnexpectedPageVariant,
#[fail(display = "unexpected PageHeader variant")]
UnexpectedPageHeaderVariant,
#[fail(display = "unsupported PageHeader variant")]
UnsupportedPageHeaderVariant,
#[fail(display = "UTF8 error: {:?}", _0)]
Utf8Error(str::Utf8Error),
}
impl From<io::Error> for MigrateError {
fn from(e: io::Error) -> MigrateError {
MigrateError::IoError(e)
}
}
impl From<str::Utf8Error> for MigrateError {
fn from(e: str::Utf8Error) -> MigrateError {
MigrateError::Utf8Error(e)
}
}
impl From<num::TryFromIntError> for MigrateError {
fn from(e: num::TryFromIntError) -> MigrateError {
MigrateError::TryFromIntError(e)
}
}
impl From<&str> for MigrateError {
fn from(e: &str) -> MigrateError {
MigrateError::FromString(e.to_string())
}
}
impl From<String> for MigrateError {
fn from(e: String) -> MigrateError {
MigrateError::FromString(e)
}
}
impl From<lmdb::Error> for MigrateError {
fn from(e: lmdb::Error) -> MigrateError {
MigrateError::LmdbError(e)
}
}

View File

@ -8,7 +8,15 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::path::Path;
use std::{
fs,
path::{
Path,
PathBuf,
},
};
use lmdb::Error as LmdbError;
use super::{
DatabaseFlagsImpl,
@ -23,93 +31,239 @@ use super::{
use crate::backend::traits::{
BackendEnvironment,
BackendEnvironmentBuilder,
BackendInfo,
BackendIter,
BackendRoCursor,
BackendRoCursorTransaction,
BackendStat,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct EnvironmentBuilderImpl(lmdb::EnvironmentBuilder);
pub struct EnvironmentBuilderImpl {
builder: lmdb::EnvironmentBuilder,
env_path_type: EnvironmentPathType,
env_lock_type: EnvironmentLockType,
env_db_type: EnvironmentDefaultDbType,
make_dir: bool,
}
impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
type Error = ErrorImpl;
type Environment = EnvironmentImpl;
type Error = ErrorImpl;
type Flags = EnvironmentFlagsImpl;
fn new() -> EnvironmentBuilderImpl {
EnvironmentBuilderImpl(lmdb::Environment::new())
EnvironmentBuilderImpl {
builder: lmdb::Environment::new(),
env_path_type: EnvironmentPathType::SubDir,
env_lock_type: EnvironmentLockType::Lockfile,
env_db_type: EnvironmentDefaultDbType::SingleDatabase,
make_dir: false,
}
}
fn set_flags<T>(&mut self, flags: T) -> &mut Self
where
T: Into<Self::Flags>,
{
self.0.set_flags(flags.into().0);
let flags = flags.into();
if flags.0 == lmdb::EnvironmentFlags::NO_SUB_DIR {
self.env_path_type = EnvironmentPathType::NoSubDir;
}
if flags.0 == lmdb::EnvironmentFlags::NO_LOCK {
self.env_lock_type = EnvironmentLockType::NoLockfile;
}
self.builder.set_flags(flags.0);
self
}
fn set_max_readers(&mut self, max_readers: u32) -> &mut Self {
self.0.set_max_readers(max_readers);
self.builder.set_max_readers(max_readers);
self
}
fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self {
self.0.set_max_dbs(max_dbs);
if max_dbs > 0 {
self.env_db_type = EnvironmentDefaultDbType::MultipleNamedDatabases
}
self.builder.set_max_dbs(max_dbs);
self
}
fn set_map_size(&mut self, size: usize) -> &mut Self {
self.0.set_map_size(size);
self.builder.set_map_size(size);
self
}
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self {
self.make_dir = make_dir;
self
}
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
self.0.open(path).map(EnvironmentImpl).map_err(ErrorImpl)
match self.env_path_type {
EnvironmentPathType::NoSubDir => {
if !path.is_file() {
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
}
},
EnvironmentPathType::SubDir => {
if !path.is_dir() {
if !self.make_dir {
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
}
fs::create_dir_all(path)?;
}
},
}
self.builder.open(path).map_err(ErrorImpl::LmdbError).and_then(|lmdbenv| {
EnvironmentImpl::new(path, self.env_path_type, self.env_lock_type, self.env_db_type, lmdbenv)
})
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum EnvironmentPathType {
SubDir,
NoSubDir,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum EnvironmentLockType {
Lockfile,
NoLockfile,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum EnvironmentDefaultDbType {
SingleDatabase,
MultipleNamedDatabases,
}
#[derive(Debug)]
pub struct EnvironmentImpl(lmdb::Environment);
pub struct EnvironmentImpl {
path: PathBuf,
env_path_type: EnvironmentPathType,
env_lock_type: EnvironmentLockType,
env_db_type: EnvironmentDefaultDbType,
lmdbenv: lmdb::Environment,
}
impl EnvironmentImpl {
pub(crate) fn new(
path: &Path,
env_path_type: EnvironmentPathType,
env_lock_type: EnvironmentLockType,
env_db_type: EnvironmentDefaultDbType,
lmdbenv: lmdb::Environment,
) -> Result<EnvironmentImpl, ErrorImpl> {
Ok(EnvironmentImpl {
path: path.to_path_buf(),
env_path_type,
env_lock_type,
env_db_type,
lmdbenv,
})
}
}
impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
type Error = ErrorImpl;
type Database = DatabaseImpl;
type Error = ErrorImpl;
type Flags = DatabaseFlagsImpl;
type Stat = StatImpl;
type Info = InfoImpl;
type RoTransaction = RoTransactionImpl<'e>;
type RwTransaction = RwTransactionImpl<'e>;
type Stat = StatImpl;
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> {
if self.env_db_type == EnvironmentDefaultDbType::SingleDatabase {
return Ok(vec![None]);
}
let db = self.lmdbenv.open_db(None).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)?;
let reader = self.begin_ro_txn()?;
let cursor = reader.open_ro_cursor(&db)?;
let mut iter = cursor.into_iter();
let mut store = vec![];
while let Some(result) = iter.next() {
let (key, _) = result?;
let name = String::from_utf8(key.to_owned()).map_err(|_| ErrorImpl::LmdbError(lmdb::Error::Corrupted))?;
store.push(Some(name));
}
Ok(store)
}
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
self.0.open_db(name).map(DatabaseImpl).map_err(ErrorImpl)
self.lmdbenv.open_db(name).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)
}
fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> {
self.0.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl)
self.lmdbenv.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)
}
fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error> {
self.0.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl)
self.lmdbenv.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl::LmdbError)
}
fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error> {
self.0.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl)
self.lmdbenv.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl::LmdbError)
}
fn sync(&self, force: bool) -> Result<(), Self::Error> {
self.0.sync(force).map_err(ErrorImpl)
self.lmdbenv.sync(force).map_err(ErrorImpl::LmdbError)
}
fn stat(&self) -> Result<Self::Stat, Self::Error> {
self.0.stat().map(StatImpl).map_err(ErrorImpl)
self.lmdbenv.stat().map(StatImpl).map_err(ErrorImpl::LmdbError)
}
fn info(&self) -> Result<Self::Info, Self::Error> {
self.0.info().map(InfoImpl).map_err(ErrorImpl)
self.lmdbenv.info().map(InfoImpl).map_err(ErrorImpl::LmdbError)
}
fn freelist(&self) -> Result<usize, Self::Error> {
self.0.freelist().map_err(ErrorImpl)
self.lmdbenv.freelist().map_err(ErrorImpl::LmdbError)
}
fn load_ratio(&self) -> Result<Option<f32>, Self::Error> {
let stat = self.stat()?;
let info = self.info()?;
let freelist = self.freelist()?;
let last_pgno = info.last_pgno() + 1; // pgno is 0 based.
let total_pgs = info.map_size() / stat.page_size();
if freelist > last_pgno {
return Err(ErrorImpl::LmdbError(LmdbError::Corrupted));
}
let used_pgs = last_pgno - freelist;
Ok(Some(used_pgs as f32 / total_pgs as f32))
}
fn set_map_size(&self, size: usize) -> Result<(), Self::Error> {
self.0.set_map_size(size).map_err(ErrorImpl)
self.lmdbenv.set_map_size(size).map_err(ErrorImpl::LmdbError)
}
fn get_files_on_disk(&self) -> Vec<PathBuf> {
let mut store = vec![];
if self.env_path_type == EnvironmentPathType::NoSubDir {
// The option NO_SUB_DIR could change the default directory layout; therefore this should
// probably return the path used to create environment, along with the custom lockfile
// when available.
unimplemented!();
}
let mut db_filename = self.path.clone();
db_filename.push("data.mdb");
store.push(db_filename);
if self.env_lock_type == EnvironmentLockType::Lockfile {
let mut lock_filename = self.path.clone();
lock_filename.push("lock.mdb");
store.push(lock_filename);
}
store
}
}

View File

@ -8,32 +8,55 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::fmt;
use std::{
fmt,
io,
path::PathBuf,
};
use crate::backend::traits::BackendError;
use crate::error::StoreError;
use crate::{
backend::traits::BackendError,
error::StoreError,
};
#[derive(Debug)]
pub struct ErrorImpl(pub(crate) lmdb::Error);
pub enum ErrorImpl {
LmdbError(lmdb::Error),
UnsuitableEnvironmentPath(PathBuf),
IoError(io::Error),
}
impl BackendError for ErrorImpl {}
impl fmt::Display for ErrorImpl {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
match self {
ErrorImpl::LmdbError(e) => e.fmt(fmt),
ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath"),
ErrorImpl::IoError(e) => e.fmt(fmt),
}
}
}
impl Into<StoreError> for ErrorImpl {
fn into(self) -> StoreError {
match self.0 {
lmdb::Error::NotFound => StoreError::KeyValuePairNotFound,
lmdb::Error::BadValSize => StoreError::KeyValuePairBadSize,
lmdb::Error::Invalid => StoreError::FileInvalid,
lmdb::Error::MapFull => StoreError::MapFull,
lmdb::Error::DbsFull => StoreError::DbsFull,
lmdb::Error::ReadersFull => StoreError::ReadersFull,
_ => StoreError::LmdbError(self.0),
match self {
ErrorImpl::LmdbError(lmdb::Error::Corrupted) => StoreError::DatabaseCorrupted,
ErrorImpl::LmdbError(lmdb::Error::NotFound) => StoreError::KeyValuePairNotFound,
ErrorImpl::LmdbError(lmdb::Error::BadValSize) => StoreError::KeyValuePairBadSize,
ErrorImpl::LmdbError(lmdb::Error::Invalid) => StoreError::FileInvalid,
ErrorImpl::LmdbError(lmdb::Error::MapFull) => StoreError::MapFull,
ErrorImpl::LmdbError(lmdb::Error::DbsFull) => StoreError::DbsFull,
ErrorImpl::LmdbError(lmdb::Error::ReadersFull) => StoreError::ReadersFull,
ErrorImpl::LmdbError(error) => StoreError::LmdbError(error),
ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path),
ErrorImpl::IoError(error) => StoreError::IoError(error),
}
}
}
impl From<io::Error> for ErrorImpl {
fn from(e: io::Error) -> ErrorImpl {
ErrorImpl::IoError(e)
}
}

View File

@ -8,16 +8,18 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use crate::backend::common::{
DatabaseFlags,
EnvironmentFlags,
WriteFlags,
};
use crate::backend::traits::{
BackendDatabaseFlags,
BackendEnvironmentFlags,
BackendFlags,
BackendWriteFlags,
use crate::backend::{
common::{
DatabaseFlags,
EnvironmentFlags,
WriteFlags,
},
traits::{
BackendDatabaseFlags,
BackendEnvironmentFlags,
BackendFlags,
BackendWriteFlags,
},
};
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
@ -86,9 +88,10 @@ impl Into<lmdb::DatabaseFlags> for DatabaseFlags {
DatabaseFlags::REVERSE_KEY => lmdb::DatabaseFlags::REVERSE_KEY,
#[cfg(feature = "db-dup-sort")]
DatabaseFlags::DUP_SORT => lmdb::DatabaseFlags::DUP_SORT,
#[cfg(feature = "db-dup-sort")]
DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED,
#[cfg(feature = "db-int-key")]
DatabaseFlags::INTEGER_KEY => lmdb::DatabaseFlags::INTEGER_KEY,
DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED,
DatabaseFlags::INTEGER_DUP => lmdb::DatabaseFlags::INTEGER_DUP,
DatabaseFlags::REVERSE_DUP => lmdb::DatabaseFlags::REVERSE_DUP,
}

View File

@ -36,6 +36,6 @@ impl<'i, C> BackendIter<'i> for IterImpl<'i, C> {
#[allow(clippy::type_complexity)]
fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>> {
self.iter.next().map(|e| e.map_err(ErrorImpl))
self.iter.next().map(|e| e.map_err(ErrorImpl::LmdbError))
}
}

View File

@ -27,11 +27,11 @@ use crate::backend::traits::{
pub struct RoTransactionImpl<'t>(pub(crate) lmdb::RoTransaction<'t>);
impl<'t> BackendRoTransaction for RoTransactionImpl<'t> {
type Error = ErrorImpl;
type Database = DatabaseImpl;
type Error = ErrorImpl;
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
self.0.get(db.0, &key).map_err(ErrorImpl)
self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError)
}
fn abort(self) {
@ -43,7 +43,7 @@ impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> {
type RoCursor = RoCursorImpl<'t>;
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl)
self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError)
}
}
@ -51,34 +51,34 @@ impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> {
pub struct RwTransactionImpl<'t>(pub(crate) lmdb::RwTransaction<'t>);
impl<'t> BackendRwTransaction for RwTransactionImpl<'t> {
type Error = ErrorImpl;
type Database = DatabaseImpl;
type Error = ErrorImpl;
type Flags = WriteFlagsImpl;
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
self.0.get(db.0, &key).map_err(ErrorImpl)
self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError)
}
fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error> {
self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl)
self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl::LmdbError)
}
#[cfg(not(feature = "db-dup-sort"))]
fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> {
self.0.del(db.0, &key, None).map_err(ErrorImpl)
self.0.del(db.0, &key, None).map_err(ErrorImpl::LmdbError)
}
#[cfg(feature = "db-dup-sort")]
fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> {
self.0.del(db.0, &key, value).map_err(ErrorImpl)
self.0.del(db.0, &key, value).map_err(ErrorImpl::LmdbError)
}
fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> {
self.0.clear_db(db.0).map_err(ErrorImpl)
self.0.clear_db(db.0).map_err(ErrorImpl::LmdbError)
}
fn commit(self) -> Result<(), Self::Error> {
self.0.commit().map_err(ErrorImpl)
self.0.commit().map_err(ErrorImpl::LmdbError)
}
fn abort(self) {
@ -90,6 +90,6 @@ impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> {
type RoCursor = RoCursorImpl<'t>;
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl)
self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError)
}
}

View File

@ -14,8 +14,10 @@ use serde_derive::{
Serialize,
};
use super::snapshot::Snapshot;
use super::DatabaseFlagsImpl;
use super::{
snapshot::Snapshot,
DatabaseFlagsImpl,
};
use crate::backend::traits::BackendDatabase;
#[derive(Debug, Eq, PartialEq, Copy, Clone, Hash)]

View File

@ -8,18 +8,20 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::borrow::Cow;
use std::collections::HashMap;
use std::fs;
use std::path::{
Path,
PathBuf,
};
use std::sync::Arc;
use std::sync::{
RwLock,
RwLockReadGuard,
RwLockWriteGuard,
use std::{
borrow::Cow,
collections::HashMap,
fs,
path::{
Path,
PathBuf,
},
sync::{
Arc,
RwLock,
RwLockReadGuard,
RwLockWriteGuard,
},
};
use id_arena::Arena;
@ -52,11 +54,12 @@ pub struct EnvironmentBuilderImpl {
max_readers: Option<usize>,
max_dbs: Option<usize>,
map_size: Option<usize>,
make_dir: bool,
}
impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
type Error = ErrorImpl;
type Environment = EnvironmentImpl;
type Error = ErrorImpl;
type Flags = EnvironmentFlagsImpl;
fn new() -> EnvironmentBuilderImpl {
@ -65,6 +68,7 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
max_readers: None,
max_dbs: None,
map_size: None,
make_dir: false,
}
}
@ -91,7 +95,20 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
self
}
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self {
self.make_dir = make_dir;
self
}
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
// Technically NO_SUB_DIR should change these checks here, but they're both currently
// unimplemented with this storage backend.
if !path.is_dir() {
if !self.make_dir {
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
}
fs::create_dir_all(path)?;
}
let mut env = EnvironmentImpl::new(path, self.flags, self.max_readers, self.max_dbs, self.map_size)?;
env.read_from_disk()?;
Ok(env)
@ -188,13 +205,18 @@ impl EnvironmentImpl {
}
impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
type Error = ErrorImpl;
type Database = DatabaseImpl;
type Error = ErrorImpl;
type Flags = DatabaseFlagsImpl;
type Stat = StatImpl;
type Info = InfoImpl;
type RoTransaction = RoTransactionImpl<'e>;
type RwTransaction = RwTransactionImpl<'e>;
type Stat = StatImpl;
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> {
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
Ok(dbs.keys().map(|key| key.to_owned()).collect())
}
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
if Arc::strong_count(&self.ro_txns) > 1 {
@ -215,7 +237,7 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
let key = name.map(String::from);
let mut dbs = self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError)?;
let mut arena = self.arena.write().map_err(|_| ErrorImpl::EnvPoisonError)?;
if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs {
if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs && name != None {
return Err(ErrorImpl::DbsFull);
}
let id = dbs.entry(key).or_insert_with(|| DatabaseImpl(arena.alloc(Database::new(Some(flags), None))));
@ -247,8 +269,21 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
unimplemented!()
}
fn load_ratio(&self) -> Result<Option<f32>, Self::Error> {
warn!("`load_ratio()` is irrelevant for this storage backend.");
Ok(None)
}
fn set_map_size(&self, size: usize) -> Result<(), Self::Error> {
warn!("Ignoring `set_map_size({})`", size);
warn!("`set_map_size({})` is ignored by this storage backend.", size);
Ok(())
}
fn get_files_on_disk(&self) -> Vec<PathBuf> {
// Technically NO_SUB_DIR and NO_LOCK should change this output, but
// they're both currently unimplemented with this storage backend.
let mut db_filename = self.path.clone();
db_filename.push(DEFAULT_DB_FILENAME);
return vec![db_filename];
}
}

View File

@ -8,13 +8,18 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::fmt;
use std::io;
use std::{
fmt,
io,
path::PathBuf,
};
use bincode::Error as BincodeError;
use crate::backend::traits::BackendError;
use crate::error::StoreError;
use crate::{
backend::traits::BackendError,
error::StoreError,
};
#[derive(Debug)]
pub enum ErrorImpl {
@ -24,6 +29,7 @@ pub enum ErrorImpl {
DbsIllegalOpen,
DbNotFoundError,
DbIsForeignError,
UnsuitableEnvironmentPath(PathBuf),
IoError(io::Error),
BincodeError(BincodeError),
}
@ -39,6 +45,7 @@ impl fmt::Display for ErrorImpl {
ErrorImpl::DbsIllegalOpen => write!(fmt, "DbIllegalOpen (safe mode)"),
ErrorImpl::DbNotFoundError => write!(fmt, "DbNotFoundError (safe mode)"),
ErrorImpl::DbIsForeignError => write!(fmt, "DbIsForeignError (safe mode)"),
ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath (safe mode)"),
ErrorImpl::IoError(e) => e.fmt(fmt),
ErrorImpl::BincodeError(e) => e.fmt(fmt),
}
@ -55,6 +62,8 @@ impl Into<StoreError> for ErrorImpl {
ErrorImpl::KeyValuePairNotFound => StoreError::KeyValuePairNotFound,
ErrorImpl::BincodeError(_) => StoreError::FileInvalid,
ErrorImpl::DbsFull => StoreError::DbsFull,
ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path),
ErrorImpl::IoError(error) => StoreError::IoError(error),
_ => StoreError::SafeModeError(self),
}
}

View File

@ -14,16 +14,18 @@ use serde_derive::{
Serialize,
};
use crate::backend::common::{
DatabaseFlags,
EnvironmentFlags,
WriteFlags,
};
use crate::backend::traits::{
BackendDatabaseFlags,
BackendEnvironmentFlags,
BackendFlags,
BackendWriteFlags,
use crate::backend::{
common::{
DatabaseFlags,
EnvironmentFlags,
WriteFlags,
},
traits::{
BackendDatabaseFlags,
BackendEnvironmentFlags,
BackendFlags,
BackendWriteFlags,
},
};
bitflags! {
@ -92,9 +94,10 @@ impl Into<DatabaseFlagsImpl> for DatabaseFlags {
DatabaseFlags::REVERSE_KEY => unimplemented!(),
#[cfg(feature = "db-dup-sort")]
DatabaseFlags::DUP_SORT => DatabaseFlagsImpl::DUP_SORT,
#[cfg(feature = "db-dup-sort")]
DatabaseFlags::DUP_FIXED => unimplemented!(),
#[cfg(feature = "db-int-key")]
DatabaseFlags::INTEGER_KEY => DatabaseFlagsImpl::INTEGER_KEY,
DatabaseFlags::DUP_FIXED => unimplemented!(),
DatabaseFlags::INTEGER_DUP => unimplemented!(),
DatabaseFlags::REVERSE_DUP => unimplemented!(),
}

View File

@ -8,11 +8,13 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::collections::{
BTreeMap,
BTreeSet,
use std::{
collections::{
BTreeMap,
BTreeSet,
},
sync::Arc,
};
use std::sync::Arc;
use serde_derive::{
Deserialize,

View File

@ -8,8 +8,10 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::collections::HashMap;
use std::sync::Arc;
use std::{
collections::HashMap,
sync::Arc,
};
use super::{
snapshot::Snapshot,
@ -45,8 +47,8 @@ impl<'t> RoTransactionImpl<'t> {
}
impl<'t> BackendRoTransaction for RoTransactionImpl<'t> {
type Error = ErrorImpl;
type Database = DatabaseImpl;
type Error = ErrorImpl;
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
@ -86,8 +88,8 @@ impl<'t> RwTransactionImpl<'t> {
}
impl<'t> BackendRwTransaction for RwTransactionImpl<'t> {
type Error = ErrorImpl;
type Database = DatabaseImpl;
type Error = ErrorImpl;
type Flags = WriteFlagsImpl;
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {

View File

@ -8,18 +8,25 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::fmt::{
Debug,
Display,
use std::{
fmt::{
Debug,
Display,
},
path::{
Path,
PathBuf,
},
};
use std::path::Path;
use crate::backend::common::{
DatabaseFlags,
EnvironmentFlags,
WriteFlags,
use crate::{
backend::common::{
DatabaseFlags,
EnvironmentFlags,
WriteFlags,
},
error::StoreError,
};
use crate::error::StoreError;
pub trait BackendError: Debug + Display + Into<StoreError> {}
@ -84,6 +91,8 @@ pub trait BackendEnvironmentBuilder<'b>: Debug + Eq + PartialEq + Copy + Clone {
fn set_map_size(&mut self, size: usize) -> &mut Self;
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self;
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error>;
}
@ -96,6 +105,8 @@ pub trait BackendEnvironment<'e>: Debug {
type RoTransaction: BackendRoCursorTransaction<'e, Database = Self::Database>;
type RwTransaction: BackendRwCursorTransaction<'e, Database = Self::Database>;
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error>;
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error>;
fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error>;
@ -112,7 +123,11 @@ pub trait BackendEnvironment<'e>: Debug {
fn freelist(&self) -> Result<usize, Self::Error>;
fn load_ratio(&self) -> Result<Option<f32>, Self::Error>;
fn set_map_size(&self, size: usize) -> Result<(), Self::Error>;
fn get_files_on_disk(&self) -> Vec<PathBuf>;
}
pub trait BackendRoTransaction: Debug {

View File

@ -8,14 +8,18 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::env::args;
use std::io;
use std::path::Path;
use std::{
env::args,
io,
path::Path,
};
use rkv::migrate::Migrator;
use rkv::MigrateError;
use rkv::migrator::{
LmdbArchMigrateError,
LmdbArchMigrator,
};
fn main() -> Result<(), MigrateError> {
fn main() -> Result<(), LmdbArchMigrateError> {
let mut cli_args = args();
let mut db_name = None;
let mut env_path = None;
@ -43,8 +47,8 @@ fn main() -> Result<(), MigrateError> {
}
let env_path = env_path.ok_or("must provide a path to the LMDB environment")?;
let mut migrator: Migrator = Migrator::new(Path::new(&env_path))?;
migrator.dump(db_name.as_ref().map(String::as_str), io::stdout()).unwrap();
let mut migrator = LmdbArchMigrator::new(Path::new(&env_path))?;
migrator.dump(db_name.as_deref(), io::stdout()).unwrap();
Ok(())
}

View File

@ -14,17 +14,19 @@
//! the number of key/value pairs to create via the `-n <number>` flag
//! (for which the default value is 50).
use std::env::args;
use std::fs;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use rkv::backend::{
BackendEnvironmentBuilder,
Lmdb,
use std::{
env::args,
fs,
fs::File,
io::Read,
path::Path,
};
use rkv::{
backend::{
BackendEnvironmentBuilder,
Lmdb,
},
Rkv,
StoreOptions,
Value,
@ -78,7 +80,7 @@ fn main() {
// of the pairs (assuming maximum key and value sizes).
builder.set_map_size((511 + 65535) * num_pairs * 2);
let rkv = Rkv::from_builder(Path::new(&path), builder).expect("Rkv");
let store = rkv.open_single(database.as_ref().map(|x| x.as_str()), StoreOptions::create()).expect("opened");
let store = rkv.open_single(database.as_deref(), StoreOptions::create()).expect("opened");
let mut writer = rkv.write().expect("writer");
// Generate random values for the number of keys and key/value lengths.

View File

@ -8,10 +8,13 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::os::raw::c_uint;
use std::path::{
Path,
PathBuf,
use std::{
fs,
os::raw::c_uint,
path::{
Path,
PathBuf,
},
};
#[cfg(any(feature = "db-dup-sort", feature = "db-int-key"))]
@ -19,22 +22,24 @@ use crate::backend::{
BackendDatabaseFlags,
DatabaseFlags,
};
use crate::backend::{
BackendEnvironment,
BackendEnvironmentBuilder,
BackendInfo,
BackendRoCursorTransaction,
BackendRwCursorTransaction,
BackendStat,
SafeModeError,
use crate::{
backend::{
BackendEnvironment,
BackendEnvironmentBuilder,
BackendRoCursorTransaction,
BackendRwCursorTransaction,
SafeModeError,
},
error::StoreError,
readwrite::{
Reader,
Writer,
},
store::{
single::SingleStore,
Options as StoreOptions,
},
};
use crate::error::StoreError;
use crate::readwrite::{
Reader,
Writer,
};
use crate::store::single::SingleStore;
use crate::store::Options as StoreOptions;
#[cfg(feature = "db-dup-sort")]
use crate::store::multi::MultiStore;
@ -49,7 +54,7 @@ use crate::store::integermulti::MultiIntegerStore;
pub static DEFAULT_MAX_DBS: c_uint = 5;
/// Wrapper around an `Environment` (e.g. an LMDB environment).
/// Wrapper around an `Environment` (e.g. such as an `LMDB` or `SafeMode` environment).
#[derive(Debug)]
pub struct Rkv<E> {
path: PathBuf,
@ -82,10 +87,6 @@ where
where
B: BackendEnvironmentBuilder<'e, Environment = E>,
{
if !path.is_dir() {
return Err(StoreError::DirectoryDoesNotExistError(path.into()));
}
let mut builder = B::new();
builder.set_max_dbs(max_dbs);
@ -98,16 +99,9 @@ where
where
B: BackendEnvironmentBuilder<'e, Environment = E>,
{
if !path.is_dir() {
return Err(StoreError::DirectoryDoesNotExistError(path.into()));
}
Ok(Rkv {
path: path.into(),
env: builder.open(path).map_err(|e| match e.into() {
StoreError::OtherError(2) => StoreError::DirectoryDoesNotExistError(path.into()),
e => e,
})?,
env: builder.open(path).map_err(|e| e.into())?,
})
}
}
@ -117,9 +111,14 @@ impl<'e, E> Rkv<E>
where
E: BackendEnvironment<'e>,
{
/// Return all created databases.
pub fn get_dbs(&self) -> Result<Vec<Option<String>>, StoreError> {
self.env.get_dbs().map_err(|e| e.into())
}
/// Create or Open an existing database in (&[u8] -> Single Value) mode.
/// Note: that create=true cannot be called concurrently with other operations
/// so if you are sure that the database exists, call this with create=false.
/// Note: that create=true cannot be called concurrently with other operations so if
/// you are sure that the database exists, call this with create=false.
pub fn open_single<'s, T>(
&self,
name: T,
@ -132,8 +131,8 @@ where
}
/// Create or Open an existing database in (Integer -> Single Value) mode.
/// Note: that create=true cannot be called concurrently with other operations
/// so if you are sure that the database exists, call this with create=false.
/// Note: that create=true cannot be called concurrently with other operations so if
/// you are sure that the database exists, call this with create=false.
#[cfg(feature = "db-int-key")]
pub fn open_integer<'s, T, K>(
&self,
@ -149,8 +148,8 @@ where
}
/// Create or Open an existing database in (&[u8] -> Multiple Values) mode.
/// Note: that create=true cannot be called concurrently with other operations
/// so if you are sure that the database exists, call this with create=false.
/// Note: that create=true cannot be called concurrently with other operations so if
/// you are sure that the database exists, call this with create=false.
#[cfg(feature = "db-dup-sort")]
pub fn open_multi<'s, T>(
&self,
@ -165,8 +164,8 @@ where
}
/// Create or Open an existing database in (Integer -> Multiple Values) mode.
/// Note: that create=true cannot be called concurrently with other operations
/// so if you are sure that the database exists, call this with create=false.
/// Note: that create=true cannot be called concurrently with other operations so if
/// you are sure that the database exists, call this with create=false.
#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
pub fn open_multi_integer<'s, T, K>(
&self,
@ -187,16 +186,20 @@ where
T: Into<Option<&'s str>>,
{
if opts.create {
self.env.create_db(name.into(), opts.flags).map_err(|e| match e.into() {
StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(),
StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(),
e => e,
self.env.create_db(name.into(), opts.flags).map_err(|e| {
match e.into() {
StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(),
StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(),
e => e,
}
})
} else {
self.env.open_db(name.into()).map_err(|e| match e.into() {
StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(),
StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(),
e => e,
self.env.open_db(name.into()).map_err(|e| {
match e.into() {
StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(),
StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(),
e => e,
}
})
}
}
@ -207,9 +210,9 @@ impl<'e, E> Rkv<E>
where
E: BackendEnvironment<'e>,
{
/// Create a read transaction. There can be multiple concurrent readers
/// for an environment, up to the maximum specified by LMDB (default 126),
/// and you can open readers while a write transaction is active.
/// Create a read transaction. There can be multiple concurrent readers for an
/// environment, up to the maximum specified by LMDB (default 126), and you can open
/// readers while a write transaction is active.
pub fn read<T>(&'e self) -> Result<Reader<T>, StoreError>
where
E: BackendEnvironment<'e, RoTransaction = T>,
@ -218,9 +221,9 @@ where
Ok(Reader::new(self.env.begin_ro_txn().map_err(|e| e.into())?))
}
/// Create a write transaction. There can be only one write transaction
/// active at any given time, so trying to create a second one will block
/// until the first is committed or aborted.
/// Create a write transaction. There can be only one write transaction active at any
/// given time, so trying to create a second one will block until the first is
/// committed or aborted.
pub fn write<T>(&'e self) -> Result<Writer<T>, StoreError>
where
E: BackendEnvironment<'e, RwTransaction = T>,
@ -235,18 +238,18 @@ impl<'e, E> Rkv<E>
where
E: BackendEnvironment<'e>,
{
/// Flush the data buffers to disk. This call is only useful, when the environment
/// was open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below).
/// The call is not valid if the environment was opened with `READ_ONLY`.
/// Flush the data buffers to disk. This call is only useful, when the environment was
/// open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below). The call is
/// not valid if the environment was opened with `READ_ONLY`.
///
/// Data is always written to disk when `transaction.commit()` is called,
/// but the operating system may keep it buffered.
/// LMDB always flushes the OS buffers upon commit as well,
/// unless the environment was opened with `NO_SYNC` or in part `NO_META_SYNC`.
/// Data is always written to disk when `transaction.commit()` is called, but the
/// operating system may keep it buffered. LMDB always flushes the OS buffers upon
/// commit as well, unless the environment was opened with `NO_SYNC` or in part
/// `NO_META_SYNC`.
///
/// `force`: if true, force a synchronous flush.
/// Otherwise if the environment has the `NO_SYNC` flag set the flushes will be omitted,
/// and with `MAP_ASYNC` they will be asynchronous.
/// `force`: if true, force a synchronous flush. Otherwise if the environment has the
/// `NO_SYNC` flag set the flushes will be omitted, and with `MAP_ASYNC` they will
/// be asynchronous.
pub fn sync(&self, force: bool) -> Result<(), StoreError> {
self.env.sync(force).map_err(|e| e.into())
}
@ -278,41 +281,46 @@ where
/// Retrieve the load ratio (# of used pages / total pages) about this environment.
///
/// With the formular: (last_page_no - freelist_pages) / total_pages
pub fn load_ratio(&self) -> Result<f32, StoreError> {
let stat = self.stat()?;
let info = self.info()?;
let freelist = self.env.freelist().map_err(|e| e.into())?;
let last_pgno = info.last_pgno() + 1; // pgno is 0 based.
let total_pgs = info.map_size() / stat.page_size();
if freelist > last_pgno {
return Err(StoreError::DatabaseCorrupted);
}
let used_pgs = last_pgno - freelist;
Ok(used_pgs as f32 / total_pgs as f32)
/// With the formular: (last_page_no - freelist_pages) / total_pages.
/// A value of `None` means that the backend doesn't ever need to be resized.
pub fn load_ratio(&self) -> Result<Option<f32>, StoreError> {
self.env.load_ratio().map_err(|e| e.into())
}
/// Sets the size of the memory map to use for the environment.
///
/// This can be used to resize the map when the environment is already open.
/// You can also use `Rkv::environment_builder()` to set the map size during
/// the `Rkv` initialization.
/// This can be used to resize the map when the environment is already open. You can
/// also use `Rkv::environment_builder()` to set the map size during the `Rkv`
/// initialization.
///
/// Note:
///
/// * No active transactions allowed when performing resizing in this process.
/// It's up to the consumer to enforce that.
/// * No active transactions allowed when performing resizing in this process. It's up
/// to the consumer to enforce that.
///
/// * The size should be a multiple of the OS page size. Any attempt to set
/// a size smaller than the space already consumed by the environment will
/// be silently changed to the current size of the used space.
/// * The size should be a multiple of the OS page size. Any attempt to set a size
/// smaller than the space already consumed by the environment will be silently
/// changed to the current size of the used space.
///
/// * In the multi-process case, once a process resizes the map, other
/// processes need to either re-open the environment, or call set_map_size
/// with size 0 to update the environment. Otherwise, new transaction creation
/// will fail with `LmdbError::MapResized`.
/// * In the multi-process case, once a process resizes the map, other processes need
/// to either re-open the environment, or call set_map_size with size 0 to update
/// the environment. Otherwise, new transaction creation will fail with
/// `LmdbError::MapResized`.
pub fn set_map_size(&self, size: usize) -> Result<(), StoreError> {
self.env.set_map_size(size).map_err(Into::into)
}
/// Closes this environment and deletes all its files from disk. Doesn't delete the
/// folder used when opening the environment.
pub fn close_and_delete(self) -> Result<(), StoreError> {
let files = self.env.get_files_on_disk();
self.sync(true)?;
drop(self);
for file in files {
fs::remove_file(file)?;
}
Ok(())
}
}

View File

@ -8,12 +8,14 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::io;
use std::num;
use std::path::PathBuf;
use std::str;
use std::thread;
use std::thread::ThreadId;
use std::{
io,
path::PathBuf,
str,
sync,
thread,
thread::ThreadId,
};
use failure::Fail;
@ -55,6 +57,9 @@ impl From<Box<bincode::ErrorKind>> for DataError {
#[derive(Debug, Fail)]
pub enum StoreError {
#[fail(display = "manager poisoned")]
ManagerPoisonError,
#[fail(display = "database corrupted")]
DatabaseCorrupted,
@ -79,8 +84,8 @@ pub enum StoreError {
#[fail(display = "I/O error: {:?}", _0)]
IoError(io::Error),
#[fail(display = "directory does not exist or not a directory: {:?}", _0)]
DirectoryDoesNotExistError(PathBuf),
#[fail(display = "environment path does not exist or not the right type: {:?}", _0)]
UnsuitableEnvironmentPath(PathBuf),
#[fail(display = "data error: {:?}", _0)]
DataError(DataError),
@ -96,9 +101,6 @@ pub enum StoreError {
#[fail(display = "attempted to open DB during transaction in thread {:?}", _0)]
OpenAttemptedDuringTransaction(ThreadId),
#[fail(display = "other backing store error: {}", _0)]
OtherError(i32),
}
impl StoreError {
@ -123,101 +125,35 @@ impl From<io::Error> for StoreError {
}
}
impl<T> From<sync::PoisonError<T>> for StoreError {
fn from(_: sync::PoisonError<T>) -> StoreError {
StoreError::ManagerPoisonError
}
}
#[derive(Debug, Fail)]
pub enum MigrateError {
#[fail(display = "database not found: {:?}", _0)]
DatabaseNotFound(String),
#[fail(display = "store error: {}", _0)]
StoreError(StoreError),
#[fail(display = "{}", _0)]
FromString(String),
#[fail(display = "manager poisoned")]
ManagerPoisonError,
#[fail(display = "couldn't determine bit depth")]
IndeterminateBitDepth,
#[fail(display = "source is empty")]
SourceEmpty,
#[fail(display = "I/O error: {:?}", _0)]
IoError(io::Error),
#[fail(display = "invalid DatabaseFlags bits")]
InvalidDatabaseBits,
#[fail(display = "invalid data version")]
InvalidDataVersion,
#[fail(display = "invalid magic number")]
InvalidMagicNum,
#[fail(display = "invalid NodeFlags bits")]
InvalidNodeBits,
#[fail(display = "invalid PageFlags bits")]
InvalidPageBits,
#[fail(display = "invalid page number")]
InvalidPageNum,
#[fail(display = "lmdb backend error: {}", _0)]
LmdbError(lmdb::Error),
#[fail(display = "safe mode backend error: {}", _0)]
SafeModeError(SafeModeError),
#[fail(display = "string conversion error")]
StringConversionError,
#[fail(display = "TryFromInt error: {:?}", _0)]
TryFromIntError(num::TryFromIntError),
#[fail(display = "unexpected Page variant")]
UnexpectedPageVariant,
#[fail(display = "unexpected PageHeader variant")]
UnexpectedPageHeaderVariant,
#[fail(display = "unsupported PageHeader variant")]
UnsupportedPageHeaderVariant,
#[fail(display = "UTF8 error: {:?}", _0)]
Utf8Error(str::Utf8Error),
#[fail(display = "destination is not empty")]
DestinationNotEmpty,
}
impl From<io::Error> for MigrateError {
fn from(e: io::Error) -> MigrateError {
MigrateError::IoError(e)
impl From<StoreError> for MigrateError {
fn from(e: StoreError) -> MigrateError {
MigrateError::StoreError(e)
}
}
impl From<str::Utf8Error> for MigrateError {
fn from(e: str::Utf8Error) -> MigrateError {
MigrateError::Utf8Error(e)
}
}
impl From<num::TryFromIntError> for MigrateError {
fn from(e: num::TryFromIntError) -> MigrateError {
MigrateError::TryFromIntError(e)
}
}
impl From<&str> for MigrateError {
fn from(e: &str) -> MigrateError {
MigrateError::FromString(e.to_string())
}
}
impl From<String> for MigrateError {
fn from(e: String) -> MigrateError {
MigrateError::FromString(e)
}
}
impl From<lmdb::Error> for MigrateError {
fn from(e: lmdb::Error) -> MigrateError {
MigrateError::LmdbError(e)
}
}
impl From<SafeModeError> for MigrateError {
fn from(e: SafeModeError) -> MigrateError {
MigrateError::SafeModeError(e)
impl<T> From<sync::PoisonError<T>> for MigrateError {
fn from(_: sync::PoisonError<T>) -> MigrateError {
MigrateError::ManagerPoisonError
}
}

View File

@ -8,21 +8,24 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::io;
use std::path::{
Path,
PathBuf,
use std::{
io,
path::{
Path,
PathBuf,
},
};
use url::Url;
use crate::error::StoreError;
use crate::value::Value;
use crate::{
error::StoreError,
value::Value,
};
pub(crate) fn read_transform(value: Result<&[u8], StoreError>) -> Result<Option<Value>, StoreError> {
pub(crate) fn read_transform(value: Result<&[u8], StoreError>) -> Result<Value, StoreError> {
match value {
Ok(bytes) => Value::from_tagged_slice(bytes).map(Some).map_err(StoreError::DataError),
Err(StoreError::KeyValuePairNotFound) => Ok(None),
Ok(bytes) => Value::from_tagged_slice(bytes).map_err(StoreError::DataError),
Err(e) => Err(e),
}
}
@ -36,8 +39,8 @@ where
let canonical = path.into().canonicalize()?;
Ok(if cfg!(target_os = "windows") {
let url = Url::from_file_path(&canonical).map_err(|_| io::Error::new(io::ErrorKind::Other, "passing error"))?;
url.to_file_path().map_err(|_| io::Error::new(io::ErrorKind::Other, "path canonicalization error"))?
let map_err = |_| io::Error::new(io::ErrorKind::Other, "path canonicalization error");
Url::from_file_path(&canonical).and_then(|url| url.to_file_path()).map_err(map_err)?
} else {
canonical
})

View File

@ -8,27 +8,32 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//! a simple, humane, typed Rust interface to [LMDB](http://www.lmdb.tech/doc/)
//! A simple, humane, typed key-value storage solution. It supports multiple backend
//! engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for
//! performance, or "SafeMode" for reliability.
//!
//! It aims to achieve the following:
//!
//! - Avoid LMDB's sharp edges (e.g., obscure error codes for common situations).
//! - Avoid sharp edges (e.g., obscure error codes for common situations).
//! - Report errors via [failure](https://docs.rs/failure/).
//! - Correctly restrict access to one handle per process via a [Manager](struct.Manager.html).
//! - Use Rust's type system to make single-typed key stores (including LMDB's own integer-keyed stores)
//! safe and ergonomic.
//! - Correctly restrict access to one handle per process via a
//! [Manager](struct.Manager.html).
//! - Use Rust's type system to make single-typed key stores safe and ergonomic.
//! - Encode and decode values via [bincode](https://docs.rs/bincode/)/[serde](https://docs.rs/serde/)
//! and type tags, achieving platform-independent storage and input/output flexibility.
//!
//! It exposes these primary abstractions:
//!
//! - [Manager](struct.Manager.html): a singleton that controls access to LMDB environments
//! - [Rkv](struct.Rkv.html): an LMDB environment that contains a set of key/value databases
//! - [SingleStore](store/single/struct.SingleStore.html): an LMDB database that contains a set of key/value pairs
//! - [Manager](struct.Manager.html): a singleton that controls access to environments
//! - [Rkv](struct.Rkv.html): an environment contains a set of key/value databases
//! - [SingleStore](store/single/struct.SingleStore.html): a database contains a set of
//! key/value pairs
//!
//! Keys can be anything that implements `AsRef<[u8]>` or integers
//! (when accessing an [IntegerStore](store/integer/struct.IntegerStore.html)).
//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum, including:
//!
//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum,
//! including:
//!
//! - booleans (`Value::Bool`)
//! - integers (`Value::I64`, `Value::U64`)
@ -45,8 +50,8 @@
//! use std::fs;
//! use tempfile::Builder;
//!
//! // First determine the path to the environment, which is represented
//! // on disk as a directory containing two files:
//! // First determine the path to the environment, which is represented on disk as a
//! // directory containing two files:
//! //
//! // * a data file containing the key/value stores
//! // * a lock file containing metadata about current transactions
@ -57,10 +62,9 @@
//! fs::create_dir_all(root.path()).unwrap();
//! let path = root.path();
//!
//! // The Manager enforces that each process opens the same environment
//! // at most once by caching a handle to each environment that it opens.
//! // Use it to retrieve the handle to an opened environment—or create one
//! // if it hasn't already been opened:
//! // The `Manager` enforces that each process opens the same environment at most once by
//! // caching a handle to each environment that it opens. Use it to retrieve the handle
//! // to an opened environment—or create one if it hasn't already been opened:
//! let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
//! let created_arc = manager.get_or_create(path, Rkv::new::<Lmdb>).unwrap();
//! let env = created_arc.read().unwrap();
@ -69,15 +73,15 @@
//! let store = env.open_single("mydb", StoreOptions::create()).unwrap();
//!
//! {
//! // Use a write transaction to mutate the store via a `Writer`.
//! // There can be only one writer for a given environment, so opening
//! // a second one will block until the first completes.
//! // Use a write transaction to mutate the store via a `Writer`. There can be only
//! // one writer for a given environment, so opening a second one will block until
//! // the first completes.
//! let mut writer = env.write().unwrap();
//!
//! // Keys are `AsRef<[u8]>`, while values are `Value` enum instances.
//! // Use the `Blob` variant to store arbitrary collections of bytes.
//! // Putting data returns a `Result<(), StoreError>`, where StoreError
//! // is an enum identifying the reason for a failure.
//! // Keys are `AsRef<[u8]>`, while values are `Value` enum instances. Use the `Blob`
//! // variant to store arbitrary collections of bytes. Putting data returns a
//! // `Result<(), StoreError>`, where StoreError is an enum identifying the reason
//! // for a failure.
//! store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
//! store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap();
//! store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap();
@ -87,15 +91,15 @@
//! store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap();
//! store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap();
//!
//! // You must commit a write transaction before the writer goes out
//! // of scope, or the transaction will abort and the data won't persist.
//! // You must commit a write transaction before the writer goes out of scope, or the
//! // transaction will abort and the data won't persist.
//! writer.commit().unwrap();
//! }
//!
//! {
//! // Use a read transaction to query the store via a `Reader`.
//! // There can be multiple concurrent readers for a store, and readers
//! // never block on a writer nor other readers.
//! // Use a read transaction to query the store via a `Reader`. There can be multiple
//! // concurrent readers for a store, and readers never block on a writer nor other
//! // readers.
//! let reader = env.read().expect("reader");
//!
//! // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`.
@ -111,9 +115,9 @@
//! // Retrieving a non-existent value returns `Ok(None)`.
//! println!("Get non-existent value {:?}", store.get(&reader, "non-existent").unwrap());
//!
//! // A read transaction will automatically close once the reader
//! // goes out of scope, so isn't necessary to close it explicitly,
//! // although you can do so by calling `Reader.abort()`.
//! // A read transaction will automatically close once the reader goes out of scope,
//! // so isn't necessary to close it explicitly, although you can do so by calling
//! // `Reader.abort()`.
//! }
//!
//! {
@ -126,9 +130,9 @@
//! }
//!
//! {
//! // Explicitly aborting a transaction is not required unless an early
//! // abort is desired, since both read and write transactions will
//! // implicitly be aborted once they go out of scope.
//! // Explicitly aborting a transaction is not required unless an early abort is
//! // desired, since both read and write transactions will implicitly be aborted once
//! // they go out of scope.
//! {
//! let mut writer = env.write().unwrap();
//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
@ -144,18 +148,17 @@
//! store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
//! store.delete(&mut writer, "foo").unwrap();
//!
//! // A write transaction also supports reading, and the version of the
//! // store that it reads includes the changes it has made regardless of
//! // the commit state of that transaction.
//! // A write transaction also supports reading, and the version of the store that it
//! // reads includes the changes it has made regardless of the commit state of that
//! // transaction.
//! // In the code above, "foo" and "bar" were put into the store,
//! // then "foo" was deleted so only "bar" will return a result when the
//! // database is queried via the writer.
//! // In the code above, "foo" and "bar" were put into the store, then "foo" was
//! // deleted so only "bar" will return a result when the database is queried via the
//! // writer.
//! println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
//! println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
//!
//! // But a reader won't see that change until the write transaction
//! // is committed.
//! // But a reader won't see that change until the write transaction is committed.
//! {
//! let reader = env.read().expect("reader");
//! println!("Get foo {:?}", store.get(&reader, "foo").unwrap());
@ -168,9 +171,9 @@
//! println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
//! }
//!
//! // Committing a transaction consumes the writer, preventing you
//! // from reusing it by failing at compile time with an error.
//! // This line would report error[E0382]: borrow of moved value: `writer`.
//! // Committing a transaction consumes the writer, preventing you from reusing it by
//! // failing at compile time with an error. This line would report "error[E0382]:
//! // borrow of moved value: `writer`".
//! // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap();
//! }
//!
@ -206,7 +209,7 @@ mod manager;
mod readwrite;
pub mod backend;
pub mod migrate;
pub mod migrator;
pub mod store;
pub mod value;
@ -222,14 +225,17 @@ pub use error::{
StoreError,
};
pub use manager::Manager;
pub use migrator::Migrator;
pub use readwrite::{
Readable,
Reader,
Writer,
};
pub use store::keys::EncodableKey;
pub use store::single::SingleStore;
pub use store::Options as StoreOptions;
pub use store::{
keys::EncodableKey,
single::SingleStore,
Options as StoreOptions,
};
pub use value::{
OwnedValue,
Value,

View File

@ -8,46 +8,64 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::os::raw::c_uint;
use std::path::{
Path,
PathBuf,
};
use std::result;
use std::sync::{
Arc,
RwLock,
use std::{
collections::{
btree_map::Entry,
BTreeMap,
},
os::raw::c_uint,
path::{
Path,
PathBuf,
},
result,
sync::{
Arc,
RwLock,
},
};
use lazy_static::lazy_static;
use crate::backend::{
LmdbEnvironment,
SafeModeEnvironment,
use crate::{
backend::{
BackendEnvironment,
BackendEnvironmentBuilder,
LmdbEnvironment,
SafeModeEnvironment,
},
error::StoreError,
helpers::canonicalize_path,
Rkv,
};
use crate::error::StoreError;
use crate::helpers::canonicalize_path;
use crate::Rkv;
type Result<T> = result::Result<T, StoreError>;
type SharedRkv<E> = Arc<RwLock<Rkv<E>>>;
lazy_static! {
/// A process is only permitted to have one open handle to each Rkv environment.
/// This manager exists to enforce that constraint: don't open environments directly.
static ref MANAGER_LMDB: RwLock<Manager<LmdbEnvironment>> = RwLock::new(Manager::new());
static ref MANAGER_SAFE_MODE: RwLock<Manager<SafeModeEnvironment>> = RwLock::new(Manager::new());
}
/// A process is only permitted to have one open handle to each Rkv environment.
/// This manager exists to enforce that constraint: don't open environments directly.
/// A process is only permitted to have one open handle to each Rkv environment. This
/// manager exists to enforce that constraint: don't open environments directly.
///
/// By default, path canonicalization is enabled for identifying RKV instances. This
/// is true by default, because it helps enforce the constraints guaranteed by
/// this manager. However, path canonicalization might crash in some fringe
/// circumstances, so the `no-canonicalize-path` feature offers the possibility of
/// disabling it. See: https://bugzilla.mozilla.org/show_bug.cgi?id=1531887
///
/// When path canonicalization is disabled, you *must* ensure an RKV environment is
/// always created or retrieved with the same path.
pub struct Manager<E> {
environments: BTreeMap<PathBuf, SharedRkv<E>>,
}
impl<E> Manager<E> {
impl<'e, E> Manager<E>
where
E: BackendEnvironment<'e>,
{
fn new() -> Manager<E> {
Manager {
environments: Default::default(),
@ -59,7 +77,11 @@ impl<E> Manager<E> {
where
P: Into<&'p Path>,
{
let canonical = canonicalize_path(path)?;
let canonical = if cfg!(feature = "no-canonicalize-path") {
path.into().to_path_buf()
} else {
canonicalize_path(path)?
};
Ok(self.environments.get(&canonical).cloned())
}
@ -69,7 +91,11 @@ impl<E> Manager<E> {
F: FnOnce(&Path) -> Result<Rkv<E>>,
P: Into<&'p Path>,
{
let canonical = canonicalize_path(path)?;
let canonical = if cfg!(feature = "no-canonicalize-path") {
path.into().to_path_buf()
} else {
canonicalize_path(path)?
};
Ok(match self.environments.entry(canonical) {
Entry::Occupied(e) => e.get().clone(),
Entry::Vacant(e) => {
@ -79,14 +105,17 @@ impl<E> Manager<E> {
})
}
/// Return the open env at `path` with capacity `capacity`,
/// or create it by calling `f`.
/// Return the open env at `path` with `capacity`, or create it by calling `f`.
pub fn get_or_create_with_capacity<'p, F, P>(&mut self, path: P, capacity: c_uint, f: F) -> Result<SharedRkv<E>>
where
F: FnOnce(&Path, c_uint) -> Result<Rkv<E>>,
P: Into<&'p Path>,
{
let canonical = canonicalize_path(path)?;
let canonical = if cfg!(feature = "no-canonicalize-path") {
path.into().to_path_buf()
} else {
canonicalize_path(path)?
};
Ok(match self.environments.entry(canonical) {
Entry::Occupied(e) => e.get().clone(),
Entry::Vacant(e) => {
@ -95,6 +124,52 @@ impl<E> Manager<E> {
},
})
}
/// Return a new Rkv environment from the builder, or create it by calling `f`.
pub fn get_or_create_from_builder<'p, F, P, B>(&mut self, path: P, builder: B, f: F) -> Result<SharedRkv<E>>
where
F: FnOnce(&Path, B) -> Result<Rkv<E>>,
P: Into<&'p Path>,
B: BackendEnvironmentBuilder<'e, Environment = E>,
{
let canonical = if cfg!(feature = "no-canonicalize-path") {
path.into().to_path_buf()
} else {
canonicalize_path(path)?
};
Ok(match self.environments.entry(canonical) {
Entry::Occupied(e) => e.get().clone(),
Entry::Vacant(e) => {
let k = Arc::new(RwLock::new(f(e.key().as_path(), builder)?));
e.insert(k).clone()
},
})
}
/// Tries to close the specified environment and delete all its files from disk.
/// Doesn't delete the folder used when opening the environment.
/// This will only work if there's no other users of this environment.
pub fn try_close_and_delete<'p, P>(&mut self, path: P) -> Result<()>
where
P: Into<&'p Path>,
{
let canonical = if cfg!(feature = "no-canonicalize-path") {
path.into().to_path_buf()
} else {
canonicalize_path(path)?
};
match self.environments.entry(canonical) {
Entry::Vacant(_) => {}, // noop
Entry::Occupied(e) => {
if Arc::strong_count(e.get()) == 1 {
if let Ok(env) = Arc::try_unwrap(e.remove()) {
env.into_inner()?.close_and_delete()?;
}
}
},
}
Ok(())
}
}
impl Manager<LmdbEnvironment> {
@ -111,12 +186,13 @@ impl Manager<SafeModeEnvironment> {
#[cfg(test)]
mod tests {
use std::fs;
use tempfile::Builder;
use super::*;
use crate::*;
use std::fs;
use tempfile::Builder;
use backend::Lmdb;
/// Test that one can mutate managed Rkv instances in surprising ways.
@ -129,8 +205,8 @@ mod tests {
let path1 = root1.path();
let arc = manager.get_or_create(path1, Rkv::new::<Lmdb>).expect("created");
// Arc<RwLock<>> has interior mutability, so we can replace arc's Rkv
// instance with a new instance that has a different path.
// Arc<RwLock<>> has interior mutability, so we can replace arc's Rkv instance with a new
// instance that has a different path.
let root2 = Builder::new().prefix("test_mutate_managed_rkv_2").tempdir().expect("tempdir");
fs::create_dir_all(root2.path()).expect("dir created");
let path2 = root2.path();
@ -140,14 +216,13 @@ mod tests {
*rkv = rkv2;
}
// arc now has a different internal Rkv with path2, but it's still
// mapped to path1 in manager, so its pointer is equal to a new Arc
// for path1.
// Arc now has a different internal Rkv with path2, but it's still mapped to path1 in
// manager, so its pointer is equal to a new Arc for path1.
let path1_arc = manager.get(path1).expect("success").expect("existed");
assert!(Arc::ptr_eq(&path1_arc, &arc));
// Meanwhile, a new Arc for path2 has a different pointer, even though
// its Rkv's path is the same as arc's current path.
// Meanwhile, a new Arc for path2 has a different pointer, even though its Rkv's path is
// the same as arc's current path.
let path2_arc = manager.get_or_create(path2, Rkv::new::<Lmdb>).expect("success");
assert!(!Arc::ptr_eq(&path2_arc, &arc));
}

168
third_party/rust/rkv/src/migrator.rs vendored Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2018-2019 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//! A simple utility for migrating data from one RVK environment to another. Notably, this
//! tool can migrate data from an enviroment created with a different backend than the
//! current RKV consumer (e.g from Lmdb to SafeMode).
//!
//! The utility doesn't support migrating between 32-bit and 64-bit LMDB environments yet,
//! see `arch_migrator` if this is needed. However, this utility is ultimately intended to
//! handle all possible migrations.
//!
//! The destination environment should be empty of data, otherwise an error is returned.
//!
//! There are 3 versions of the migration methods:
//! * `migrate_<src>_to_<dst>`, where `<src>` and `<dst>` are the source and destination
//! environment types. You're responsive with opening both these environments, handling
//! all errors, and performing any cleanup if necessary.
//! * `open_and_migrate_<src>_to_<dst>`, which is similar the the above, but automatically
//! attempts to open the source environment and delete all of its supporting files if
//! there's no other environment open at that path. You're still responsible with
//! handling all errors.
//! * `easy_migrate_<src>_to_<dst>` which is similar to the above, but ignores the
//! migration and doesn't delete any files if the source environment is invalid
//! (corrupted), unavailable (path not found or incompatible with configuration), or
//! empty (database has no records).
//!
//! The tool currently has these limitations:
//!
//! 1. It doesn't support migration from environments created with
//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a temporary
//! directory, copy the environment's data files in the temporary directory, then
//! migrate the temporary directory as the source environment.
//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT`
//! (with or without `DatabaseFlags::DUP_FIXED`) nor with `DatabaseFlags::INTEGER_KEY`.
//! This effectively means that migration is limited to `SingleStore`s.
//! 3. It doesn't allow for existing data in the destination environment, which means that
//! it cannot overwrite nor append data.
use crate::{
backend::{
LmdbEnvironment,
SafeModeEnvironment,
},
error::MigrateError,
Rkv,
StoreOptions,
};
pub use crate::backend::{
LmdbArchMigrateError,
LmdbArchMigrateResult,
LmdbArchMigrator,
};
// FIXME: should parametrize this instead.
macro_rules! fn_migrator {
($name:tt, $src_env:ty, $dst_env:ty) => {
/// Migrate all data in all of databases from the source environment to the destination
/// environment. This includes all key/value pairs in the main database that aren't
/// metadata about subdatabases and all key/value pairs in all subdatabases.
///
/// Other backend-specific metadata such as map size or maximum databases left intact on
/// the given environments.
///
/// The destination environment should be empty of data, otherwise an error is returned.
pub fn $name<S, D>(src_env: S, dst_env: D) -> Result<(), MigrateError>
where
S: std::ops::Deref<Target = Rkv<$src_env>>,
D: std::ops::Deref<Target = Rkv<$dst_env>>,
{
let src_dbs = src_env.get_dbs().unwrap();
if src_dbs.is_empty() {
return Err(MigrateError::SourceEmpty);
}
let dst_dbs = dst_env.get_dbs().unwrap();
if !dst_dbs.is_empty() {
return Err(MigrateError::DestinationNotEmpty);
}
for name in src_dbs {
let src_store = src_env.open_single(name.as_deref(), StoreOptions::default())?;
let dst_store = dst_env.open_single(name.as_deref(), StoreOptions::create())?;
let reader = src_env.read()?;
let mut writer = dst_env.write()?;
let mut iter = src_store.iter_start(&reader)?;
while let Some(Ok((key, value))) = iter.next() {
dst_store.put(&mut writer, key, &value).expect("wrote");
}
writer.commit()?;
}
Ok(())
}
};
(open $migrate:tt, $name:tt, $builder:tt, $src_env:ty, $dst_env:ty) => {
/// Same as the non `open_*` migration method, but automatically attempts to open the
/// source environment. Finally, deletes all of its supporting files if there's no other
/// environment open at that path.
pub fn $name<F, D>(path: &std::path::Path, build: F, dst_env: D) -> Result<(), MigrateError>
where
F: FnOnce(crate::backend::$builder) -> crate::backend::$builder,
D: std::ops::Deref<Target = Rkv<$dst_env>>,
{
use crate::backend::*;
let mut manager = crate::Manager::<$src_env>::singleton().write()?;
let mut builder = Rkv::<$src_env>::environment_builder::<$builder>();
builder.set_max_dbs(crate::env::DEFAULT_MAX_DBS);
builder = build(builder);
let src_env = manager.get_or_create_from_builder(path, builder, Rkv::from_builder::<$builder>)?;
Migrator::$migrate(src_env.read()?, dst_env)?;
drop(src_env);
manager.try_close_and_delete(path)?;
Ok(())
}
};
(easy $migrate:tt, $name:tt, $src_env:ty, $dst_env:ty) => {
/// Same as the `open_*` migration method, but ignores the migration and doesn't delete
/// any files if the source environment is invalid (corrupted), unavailable, or empty.
pub fn $name<D>(path: &std::path::Path, dst_env: D) -> Result<(), MigrateError>
where
D: std::ops::Deref<Target = Rkv<$dst_env>>,
{
match Migrator::$migrate(path, |builder| builder, dst_env) {
Err(crate::MigrateError::StoreError(crate::StoreError::FileInvalid)) => Ok(()),
Err(crate::MigrateError::StoreError(crate::StoreError::IoError(_))) => Ok(()),
Err(crate::MigrateError::StoreError(crate::StoreError::UnsuitableEnvironmentPath(_))) => Ok(()),
Err(crate::MigrateError::SourceEmpty) => Ok(()),
result => result,
}?;
Ok(())
}
};
}
macro_rules! fns_migrator {
($src:tt, $dst:tt) => {
paste::item! {
fns_migrator!([<migrate_ $src _to_ $dst>], $src, $dst);
fns_migrator!([<migrate_ $dst _to_ $src>], $dst, $src);
}
};
($name:tt, $src:tt, $dst:tt) => {
paste::item! {
fn_migrator!($name, [<$src:camel Environment>], [<$dst:camel Environment>]);
fn_migrator!(open $name, [<open_and_ $name>], [<$src:camel>], [<$src:camel Environment>], [<$dst:camel Environment>]);
fn_migrator!(easy [<open_and_ $name>], [<easy_ $name>], [<$src:camel Environment>], [<$dst:camel Environment>]);
}
};
}
pub struct Migrator;
impl Migrator {
fns_migrator!(lmdb, safe_mode);
}

View File

@ -8,17 +8,19 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use crate::backend::{
BackendDatabase,
BackendRoCursor,
BackendRoCursorTransaction,
BackendRoTransaction,
BackendRwCursorTransaction,
BackendRwTransaction,
use crate::{
backend::{
BackendDatabase,
BackendRoCursor,
BackendRoCursorTransaction,
BackendRoTransaction,
BackendRwCursorTransaction,
BackendRwTransaction,
},
error::StoreError,
helpers::read_transform,
value::Value,
};
use crate::error::StoreError;
use crate::helpers::read_transform;
use crate::value::Value;
pub struct Reader<T>(T);
pub struct Writer<T>(T);
@ -46,7 +48,10 @@ where
K: AsRef<[u8]>,
{
let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into());
read_transform(bytes)
match read_transform(bytes).map(Some) {
Err(StoreError::KeyValuePairNotFound) => Ok(None),
result => result,
}
}
fn open_ro_cursor(&'r self, db: &T::Database) -> Result<T::RoCursor, StoreError> {
@ -81,7 +86,10 @@ where
K: AsRef<[u8]>,
{
let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into());
read_transform(bytes)
match read_transform(bytes).map(Some) {
Err(StoreError::KeyValuePairNotFound) => Ok(None),
result => result,
}
}
fn open_ro_cursor(&'r self, db: &T::Database) -> Result<T::RoCursor, StoreError> {

View File

@ -10,21 +10,25 @@
use std::marker::PhantomData;
use crate::backend::{
BackendDatabase,
BackendRwTransaction,
use crate::{
backend::{
BackendDatabase,
BackendRwTransaction,
},
error::StoreError,
readwrite::{
Readable,
Writer,
},
store::{
keys::{
Key,
PrimitiveInt,
},
single::SingleStore,
},
value::Value,
};
use crate::error::StoreError;
use crate::readwrite::{
Readable,
Writer,
};
use crate::store::keys::{
Key,
PrimitiveInt,
};
use crate::store::single::SingleStore;
use crate::value::Value;
type EmptyResult = Result<(), StoreError>;
@ -77,12 +81,13 @@ where
#[cfg(test)]
mod tests {
use std::fs;
use tempfile::Builder;
use super::*;
use crate::*;
use std::fs;
use tempfile::Builder;
#[test]
fn test_integer_keys() {
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
@ -310,12 +315,13 @@ mod tests {
#[cfg(test)]
mod tests_safe {
use std::fs;
use tempfile::Builder;
use super::*;
use crate::*;
use std::fs;
use tempfile::Builder;
#[test]
fn test_integer_keys() {
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");

View File

@ -10,26 +10,30 @@
use std::marker::PhantomData;
use crate::backend::{
BackendDatabase,
BackendIter,
BackendRoCursor,
BackendRwTransaction,
use crate::{
backend::{
BackendDatabase,
BackendIter,
BackendRoCursor,
BackendRwTransaction,
},
error::StoreError,
readwrite::{
Readable,
Writer,
},
store::{
keys::{
Key,
PrimitiveInt,
},
multi::{
Iter,
MultiStore,
},
},
value::Value,
};
use crate::error::StoreError;
use crate::readwrite::{
Readable,
Writer,
};
use crate::store::keys::{
Key,
PrimitiveInt,
};
use crate::store::multi::{
Iter,
MultiStore,
};
use crate::value::Value;
type EmptyResult = Result<(), StoreError>;
@ -106,12 +110,13 @@ where
#[cfg(test)]
mod tests {
use std::fs;
use tempfile::Builder;
use super::*;
use crate::*;
use std::fs;
use tempfile::Builder;
#[test]
fn test_integer_keys() {
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
@ -214,8 +219,8 @@ mod tests {
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
let mut iter = s.get(&writer, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
}
@ -235,8 +240,8 @@ mod tests {
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
{
let mut iter = s.get(&writer, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
writer.commit().expect("committed");
@ -249,7 +254,7 @@ mod tests {
let reader = k.read().expect("reader");
let mut iter = s.get(&reader, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
@ -260,7 +265,7 @@ mod tests {
let reader = k.read().expect("reader");
let mut iter = s.get(&reader, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
@ -300,8 +305,8 @@ mod tests {
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
{
let mut iter = s.get(&writer, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
writer.commit().expect("committed");
@ -313,8 +318,8 @@ mod tests {
let reader = k.read().expect("reader");
let mut iter = s.get(&reader, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
}
@ -322,12 +327,13 @@ mod tests {
#[cfg(test)]
mod tests_safe {
use std::fs;
use tempfile::Builder;
use super::*;
use crate::*;
use std::fs;
use tempfile::Builder;
#[test]
fn test_integer_keys() {
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
@ -430,8 +436,8 @@ mod tests_safe {
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
let mut iter = s.get(&writer, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
}
@ -451,8 +457,8 @@ mod tests_safe {
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
{
let mut iter = s.get(&writer, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
writer.commit().expect("committed");
@ -465,7 +471,7 @@ mod tests_safe {
let reader = k.read().expect("reader");
let mut iter = s.get(&reader, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
@ -476,7 +482,7 @@ mod tests_safe {
let reader = k.read().expect("reader");
let mut iter = s.get(&reader, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
@ -516,8 +522,8 @@ mod tests_safe {
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
{
let mut iter = s.get(&writer, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
writer.commit().expect("committed");
@ -529,8 +535,8 @@ mod tests_safe {
let reader = k.read().expect("reader");
let mut iter = s.get(&reader, 1).expect("read");
assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!")));
assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!")));
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
assert!(iter.next().is_none());
}
}

View File

@ -10,20 +10,22 @@
use std::marker::PhantomData;
use crate::backend::{
BackendDatabase,
BackendFlags,
BackendIter,
BackendRoCursor,
BackendRwTransaction,
use crate::{
backend::{
BackendDatabase,
BackendFlags,
BackendIter,
BackendRoCursor,
BackendRwTransaction,
},
error::StoreError,
helpers::read_transform,
readwrite::{
Readable,
Writer,
},
value::Value,
};
use crate::error::StoreError;
use crate::helpers::read_transform;
use crate::readwrite::{
Readable,
Writer,
};
use crate::value::Value;
type EmptyResult = Result<(), StoreError>;
@ -47,7 +49,8 @@ where
}
}
/// Provides a cursor to all of the values for the duplicate entries that match this key
/// Provides a cursor to all of the values for the duplicate entries that match this
/// key
pub fn get<'r, R, I, C, K>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError>
where
R: Readable<'r, Database = D, RoCursor = C>,
@ -120,14 +123,16 @@ impl<'i, I> Iterator for Iter<'i, I>
where
I: BackendIter<'i>,
{
type Item = Result<(&'i [u8], Option<Value<'i>>), StoreError>;
type Item = Result<(&'i [u8], Value<'i>), StoreError>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
None => None,
Some(Ok((key, bytes))) => match read_transform(Ok(bytes)) {
Ok(val) => Some(Ok((key, val))),
Err(err) => Some(Err(err)),
Some(Ok((key, bytes))) => {
match read_transform(Ok(bytes)) {
Ok(val) => Some(Ok((key, val))),
Err(err) => Some(Err(err)),
}
},
Some(Err(err)) => Some(Err(err.into())),
}

View File

@ -10,20 +10,22 @@
use std::marker::PhantomData;
use crate::backend::{
BackendDatabase,
BackendFlags,
BackendIter,
BackendRoCursor,
BackendRwTransaction,
use crate::{
backend::{
BackendDatabase,
BackendFlags,
BackendIter,
BackendRoCursor,
BackendRwTransaction,
},
error::StoreError,
helpers::read_transform,
readwrite::{
Readable,
Writer,
},
value::Value,
};
use crate::error::StoreError;
use crate::helpers::read_transform;
use crate::readwrite::{
Readable,
Writer,
};
use crate::value::Value;
type EmptyResult = Result<(), StoreError>;
@ -126,14 +128,16 @@ impl<'i, I> Iterator for Iter<'i, I>
where
I: BackendIter<'i>,
{
type Item = Result<(&'i [u8], Option<Value<'i>>), StoreError>;
type Item = Result<(&'i [u8], Value<'i>), StoreError>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
None => None,
Some(Ok((key, bytes))) => match read_transform(Ok(bytes)) {
Ok(val) => Some(Ok((key, val))),
Err(err) => Some(Err(err)),
Some(Ok((key, bytes))) => {
match read_transform(Ok(bytes)) {
Ok(val) => Some(Ok((key, val))),
Err(err) => Some(Err(err)),
}
},
Some(Err(err)) => Some(Err(err.into())),
}

View File

@ -24,10 +24,9 @@ use uuid::{
use crate::error::DataError;
/// We define a set of types, associated with simple integers, to annotate values
/// stored in LMDB. This is to avoid an accidental 'cast' from a value of one type
/// to another. For this reason we don't simply use `deserialize` from the `bincode`
/// crate.
/// We define a set of types, associated with simple integers, to annotate values stored
/// in LMDB. This is to avoid an accidental 'cast' from a value of one type to another.
/// For this reason we don't simply use `deserialize` from the `bincode` crate.
#[repr(u8)]
#[derive(Debug, PartialEq, Eq)]
pub enum Type {
@ -129,9 +128,11 @@ impl<'v> Value<'v> {
fn from_type_and_data(t: Type, data: &'v [u8]) -> Result<Value<'v>, DataError> {
if t == Type::Uuid {
return deserialize(data)
.map_err(|e| DataError::DecodingError {
value_type: t,
err: e,
.map_err(|e| {
DataError::DecodingError {
value_type: t,
err: e,
}
})
.map(uuid)?;
}
@ -150,9 +151,11 @@ impl<'v> Value<'v> {
unreachable!()
},
}
.map_err(|e| DataError::DecodingError {
value_type: t,
err: e,
.map_err(|e| {
DataError::DecodingError {
value_type: t,
err: e,
}
})
}
@ -221,8 +224,6 @@ impl<'v> From<&'v OwnedValue> for Value<'v> {
#[cfg(test)]
mod tests {
use ordered_float::OrderedFloat;
use super::*;
#[test]

View File

@ -12,11 +12,11 @@ use std::fs;
use tempfile::Builder;
use rkv::backend::{
Lmdb,
SafeMode,
};
use rkv::{
backend::{
Lmdb,
SafeMode,
},
Rkv,
StoreOptions,
Value,
@ -67,7 +67,7 @@ fn test_open_safe_same_dir_as_lmdb() {
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Create database of type B and save to disk (database of type A exists at the same path).
// Create database of type B and save to disk (type A exists at the same path).
{
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let sk = k.open_single("sk", StoreOptions::create()).expect("opened");
@ -149,7 +149,7 @@ fn test_open_lmdb_same_dir_as_safe() {
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Create database of type B and save to disk (database of type A exists at the same path).
// Create database of type B and save to disk (type A exists at the same path).
{
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let sk = k.open_single("sk", StoreOptions::create()).expect("opened");

View File

@ -12,13 +12,16 @@
// deprecates `clippy::cyclomatic_complexity`.
#![allow(clippy::complexity)]
use std::fs;
use std::str;
use std::sync::{
Arc,
RwLock,
use std::{
fs,
path::Path,
str,
sync::{
Arc,
RwLock,
},
thread,
};
use std::thread;
use byteorder::{
ByteOrder,
@ -26,16 +29,16 @@ use byteorder::{
};
use tempfile::Builder;
use rkv::backend::{
BackendEnvironmentBuilder,
BackendInfo,
BackendStat,
Lmdb,
LmdbDatabase,
LmdbEnvironment,
LmdbRwTransaction,
};
use rkv::{
backend::{
BackendEnvironmentBuilder,
BackendInfo,
BackendStat,
Lmdb,
LmdbDatabase,
LmdbEnvironment,
LmdbRwTransaction,
},
EnvironmentFlags,
Rkv,
SingleStore,
@ -69,7 +72,7 @@ fn test_open_fails() {
let pb = nope.to_path_buf();
match Rkv::new::<Lmdb>(nope.as_path()).err() {
Some(StoreError::DirectoryDoesNotExistError(p)) => {
Some(StoreError::UnsuitableEnvironmentPath(p)) => {
assert_eq!(pb, p);
},
_ => panic!("expected error"),
@ -101,9 +104,124 @@ fn test_open_from_builder() {
check_rkv(&k);
}
#[test]
fn test_open_from_builder_with_no_subdir_1() {
let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
{
let mut builder = Rkv::environment_builder::<Lmdb>();
builder.set_max_dbs(2);
let k = Rkv::from_builder(root.path(), builder).expect("rkv");
check_rkv(&k);
}
{
let mut builder = Rkv::environment_builder::<Lmdb>();
builder.set_flags(EnvironmentFlags::NO_SUB_DIR);
builder.set_max_dbs(2);
let mut datamdb = root.path().to_path_buf();
datamdb.push("data.mdb");
let k = Rkv::from_builder(&datamdb, builder).expect("rkv");
check_rkv(&k);
}
}
#[test]
#[should_panic(expected = "rkv: UnsuitableEnvironmentPath")]
fn test_open_from_builder_with_no_subdir_2() {
let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
{
let mut builder = Rkv::environment_builder::<Lmdb>();
builder.set_max_dbs(2);
let k = Rkv::from_builder(root.path(), builder).expect("rkv");
check_rkv(&k);
}
{
let mut builder = Rkv::environment_builder::<Lmdb>();
builder.set_flags(EnvironmentFlags::NO_SUB_DIR);
builder.set_max_dbs(2);
let mut datamdb = root.path().to_path_buf();
datamdb.push("bogus.mdb");
let k = Rkv::from_builder(&datamdb, builder).expect("rkv");
check_rkv(&k);
}
}
#[test]
fn test_open_from_builder_with_dir_1() {
let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
let mut builder = Rkv::environment_builder::<Lmdb>();
builder.set_max_dbs(2);
builder.set_make_dir_if_needed(true);
let k = Rkv::from_builder(root.path(), builder).expect("rkv");
check_rkv(&k);
}
#[test]
#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")]
fn test_open_from_builder_with_dir_2() {
let root = Path::new("bogus");
println!("Root path: {:?}", root);
assert!(!root.is_dir());
let mut builder = Rkv::environment_builder::<Lmdb>();
builder.set_max_dbs(2);
let k = Rkv::from_builder(root, builder).expect("rkv");
check_rkv(&k);
}
#[test]
#[should_panic(expected = "opened: DbsFull")]
fn test_open_with_capacity() {
fn test_create_with_capacity_1() {
let root = Builder::new().prefix("test_create_with_capacity").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv");
check_rkv(&k);
// This errors with "opened: DbsFull" because we specified a capacity of one (database),
// and check_rkv already opened one (plus the default database, which doesn't count
// against the limit).
let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened");
}
#[test]
fn test_create_with_capacity_2() {
let root = Builder::new().prefix("test_create_with_capacity").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv");
check_rkv(&k);
// This doesn't error with "opened: DbsFull" with because even though we specified a
// capacity of one (database), and check_rkv already opened one, the default database
// doesn't count against the limit.
let _zzz = k.open_single(None, StoreOptions::create()).expect("opened");
}
#[test]
#[should_panic(expected = "opened: DbsFull")]
fn test_open_with_capacity_1() {
let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
@ -112,12 +230,65 @@ fn test_open_with_capacity() {
let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv");
check_rkv(&k);
// This panics with "opened: LmdbError(DbsFull)" because we specified
// a capacity of one (database), and check_rkv already opened one
// (plus the default database, which doesn't count against the limit).
// This should really return an error rather than panicking, per
// <https://github.com/mozilla/lmdb-rs/issues/6>.
let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened");
let _zzz = k.open_single("zzz", StoreOptions::default()).expect("opened");
}
#[test]
fn test_open_with_capacity_2() {
let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv");
check_rkv(&k);
let _zzz = k.open_single(None, StoreOptions::default()).expect("opened");
}
#[test]
fn test_list_dbs_1() {
let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv");
check_rkv(&k);
let dbs = k.get_dbs().unwrap();
assert_eq!(dbs, vec![Some("s".to_owned())]);
}
#[test]
fn test_list_dbs_2() {
let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<Lmdb>(root.path(), 2).expect("rkv");
check_rkv(&k);
let _ = k.open_single("zzz", StoreOptions::create()).expect("opened");
let dbs = k.get_dbs().unwrap();
assert_eq!(dbs, vec![Some("s".to_owned()), Some("zzz".to_owned())]);
}
#[test]
fn test_list_dbs_3() {
let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<Lmdb>(root.path(), 0).expect("rkv");
let _ = k.open_single(None, StoreOptions::create()).expect("opened");
let dbs = k.get_dbs().unwrap();
assert_eq!(dbs, vec![None]);
}
fn get_larger_than_default_map_size_value() -> usize {
@ -358,9 +529,9 @@ fn test_multi_put_get_del() {
{
let mut iter = multistore.get(&writer, "str1").unwrap();
let (id, val) = iter.next().unwrap().unwrap();
assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 bar"))));
assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar")));
let (id, val) = iter.next().unwrap().unwrap();
assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 foo"))));
assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo")));
}
writer.commit().unwrap();
@ -723,14 +894,14 @@ fn test_load_ratio() {
let mut writer = k.write().expect("writer");
sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote");
writer.commit().expect("commited");
let ratio = k.load_ratio().expect("ratio");
let ratio = k.load_ratio().expect("ratio").unwrap();
assert!(ratio > 0.0_f32 && ratio < 1.0_f32);
// Put data to database should increase the load ratio.
let mut writer = k.write().expect("writer");
sk.put(&mut writer, "bar", &Value::Str(&"more-than-4KB".repeat(1000))).expect("wrote");
writer.commit().expect("commited");
let new_ratio = k.load_ratio().expect("ratio");
let new_ratio = k.load_ratio().expect("ratio").unwrap();
assert!(new_ratio > ratio);
// Clear the database so that all the used pages should go to freelist, hence the ratio
@ -738,7 +909,7 @@ fn test_load_ratio() {
let mut writer = k.write().expect("writer");
sk.clear(&mut writer).expect("clear");
writer.commit().expect("commited");
let after_clear_ratio = k.load_ratio().expect("ratio");
let after_clear_ratio = k.load_ratio().expect("ratio").unwrap();
assert!(after_clear_ratio < new_ratio);
}
@ -792,22 +963,22 @@ fn test_iter() {
let mut iter = sk.iter_start(&reader).unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "bar");
assert_eq!(val, Some(Value::Bool(true)));
assert_eq!(val, Value::Bool(true));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "baz");
assert_eq!(val, Some(Value::Str("héllo, yöu")));
assert_eq!(val, Value::Str("héllo, yöu"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "foo");
assert_eq!(val, Some(Value::I64(1234)));
assert_eq!(val, Value::I64(1234));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
assert_eq!(val, Value::Str("Emil.RuleZ!"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterators don't loop. Once one returns None, additional calls
@ -819,10 +990,10 @@ fn test_iter() {
let mut iter = sk.iter_from(&reader, "moo").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Reader.iter_from() works as expected when the given key is a prefix
@ -830,10 +1001,10 @@ fn test_iter() {
let mut iter = sk.iter_from(&reader, "no").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
}
@ -928,84 +1099,84 @@ fn test_multiple_store_iter() {
let mut iter = s1.iter_start(&reader).unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "bar");
assert_eq!(val, Some(Value::Bool(true)));
assert_eq!(val, Value::Bool(true));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "baz");
assert_eq!(val, Some(Value::Str("héllo, yöu")));
assert_eq!(val, Value::Str("héllo, yöu"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "foo");
assert_eq!(val, Some(Value::I64(1234)));
assert_eq!(val, Value::I64(1234));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
assert_eq!(val, Value::Str("Emil.RuleZ!"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate through the whole store in "s2"
let mut iter = s2.iter_start(&reader).unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "bar");
assert_eq!(val, Some(Value::Bool(true)));
assert_eq!(val, Value::Bool(true));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "baz");
assert_eq!(val, Some(Value::Str("héllo, yöu")));
assert_eq!(val, Value::Str("héllo, yöu"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "foo");
assert_eq!(val, Some(Value::I64(1234)));
assert_eq!(val, Value::I64(1234));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
assert_eq!(val, Value::Str("Emil.RuleZ!"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given key in "s1"
let mut iter = s1.iter_from(&reader, "moo").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given key in "s2"
let mut iter = s2.iter_from(&reader, "moo").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given prefix in "s1"
let mut iter = s1.iter_from(&reader, "no").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given prefix in "s2"
let mut iter = s2.iter_from(&reader, "no").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
}

View File

@ -0,0 +1,356 @@
// Copyright 2018-2019 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::{
fs,
path::Path,
};
use tempfile::Builder;
use rkv::{
backend::{
Lmdb,
SafeMode,
},
Migrator,
Rkv,
StoreOptions,
Value,
};
macro_rules! populate_store {
($env:expr) => {
let store = $env.open_single("store", StoreOptions::create()).expect("opened");
let mut writer = $env.write().expect("writer");
store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
writer.commit().expect("committed");
};
}
#[test]
fn test_simple_migrator_lmdb_to_safe() {
let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// Populate source environment and persist to disk.
{
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
// Check if the files were written to disk.
{
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
assert!(datamdb.exists());
assert!(lockmdb.exists());
}
// Verify that database was written to disk.
{
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let store = src_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = src_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Open and migrate.
{
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated");
}
// Verify that the database was indeed migrated.
{
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Check if the old files were deleted from disk.
{
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
assert!(!datamdb.exists());
assert!(!lockmdb.exists());
}
}
#[test]
fn test_simple_migrator_safe_to_lmdb() {
let root = Builder::new().prefix("test_simple_migrator_safe_to_lmdb").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// Populate source environment and persist to disk.
{
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
// Check if the files were written to disk.
{
let mut safebin = root.path().to_path_buf();
safebin.push("data.safe.bin");
assert!(safebin.exists());
}
// Verify that database was written to disk.
{
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let store = src_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = src_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Open and migrate.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated");
}
// Verify that the database was indeed migrated.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Check if the old files were deleted from disk.
{
let mut safebin = root.path().to_path_buf();
safebin.push("data.safe.bin");
assert!(!safebin.exists());
}
}
#[test]
fn test_migrator_round_trip() {
let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// Populate source environment and persist to disk.
{
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
// Open and migrate.
{
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated");
}
// Open and migrate back.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated");
}
// Verify that the database was indeed migrated twice.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
// Check if the right files are finally present on disk.
{
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists());
assert!(lockmdb.exists());
assert!(!safebin.exists());
}
}
#[test]
fn test_migrator_no_dir_1() {
let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// This won't fail with IoError even though the path is a bogus path, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::easy_migrate_lmdb_to_safe_mode(Path::new("bogus"), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(!datamdb.exists());
assert!(!lockmdb.exists());
assert!(!safebin.exists()); // safe mode doesn't write an empty db to disk
}
#[test]
fn test_migrator_no_dir_2() {
let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// This won't fail with IoError even though the path is a bogus path, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::easy_migrate_safe_mode_to_lmdb(Path::new("bogus"), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists()); // lmdb writes an empty db to disk
assert!(lockmdb.exists());
assert!(!safebin.exists());
}
#[test]
fn test_migrator_invalid_1() {
let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let dbfile = root.path().join("data.mdb");
fs::write(dbfile, "bogus").expect("dbfile created");
// This won't fail with FileInvalid even though the database is a bogus file, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists()); // corrupted db isn't deleted
assert!(lockmdb.exists());
assert!(!safebin.exists());
}
#[test]
fn test_migrator_invalid_2() {
let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let dbfile = root.path().join("data.safe.bin");
fs::write(dbfile, "bogus").expect("dbfile created");
// This won't fail with FileInvalid even though the database is a bogus file, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists()); // lmdb writes an empty db to disk
assert!(lockmdb.exists());
assert!(safebin.exists()); // corrupted db isn't deleted
}
#[test]
#[should_panic(expected = "migrated: SourceEmpty")]
fn test_migrator_lmdb_to_safe_1() {
let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
}
#[test]
#[should_panic(expected = "migrated: DestinationNotEmpty")]
fn test_migrator_lmdb_to_safe_2() {
let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&dst_env);
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
}
#[test]
fn test_migrator_lmdb_to_safe_3() {
let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}
#[test]
#[should_panic(expected = "migrated: SourceEmpty")]
fn test_migrator_safe_to_lmdb_1() {
let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
}
#[test]
#[should_panic(expected = "migrated: DestinationNotEmpty")]
fn test_migrator_safe_to_lmdb_2() {
let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&dst_env);
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
}
#[test]
fn test_migrator_safe_to_lmdb_3() {
let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
}

View File

@ -12,13 +12,16 @@
// deprecates `clippy::cyclomatic_complexity`.
#![allow(clippy::complexity)]
use std::fs;
use std::str;
use std::sync::{
Arc,
RwLock,
use std::{
fs,
path::Path,
str,
sync::{
Arc,
RwLock,
},
thread,
};
use std::thread;
use byteorder::{
ByteOrder,
@ -26,14 +29,14 @@ use byteorder::{
};
use tempfile::Builder;
use rkv::backend::{
BackendEnvironmentBuilder,
SafeMode,
SafeModeDatabase,
SafeModeEnvironment,
SafeModeRwTransaction,
};
use rkv::{
backend::{
BackendEnvironmentBuilder,
SafeMode,
SafeModeDatabase,
SafeModeEnvironment,
SafeModeRwTransaction,
},
Rkv,
SingleStore,
StoreError,
@ -63,7 +66,7 @@ fn test_open_fails_safe() {
let pb = nope.to_path_buf();
match Rkv::new::<SafeMode>(nope.as_path()).err() {
Some(StoreError::DirectoryDoesNotExistError(p)) => {
Some(StoreError::UnsuitableEnvironmentPath(p)) => {
assert_eq!(pb, p);
},
_ => panic!("expected error"),
@ -95,10 +98,37 @@ fn test_open_from_builder_safe() {
check_rkv(&k);
}
#[test]
fn test_open_from_builder_with_dir_safe_1() {
let root = Builder::new().prefix("test_open_from_builder_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
let mut builder = Rkv::environment_builder::<SafeMode>();
builder.set_max_dbs(2);
builder.set_make_dir_if_needed(true);
let k = Rkv::from_builder(root.path(), builder).expect("rkv");
check_rkv(&k);
}
#[test]
#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")]
fn test_open_from_builder_with_dir_safe_2() {
let root = Path::new("bogus");
println!("Root path: {:?}", root);
assert!(!root.is_dir());
let mut builder = Rkv::environment_builder::<SafeMode>();
builder.set_max_dbs(2);
let k = Rkv::from_builder(root, builder).expect("rkv");
check_rkv(&k);
}
#[test]
#[should_panic(expected = "opened: DbsFull")]
fn test_open_with_capacity_safe() {
let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir");
fn test_create_with_capacity_safe_1() {
let root = Builder::new().prefix("test_create_with_capacity_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
@ -106,9 +136,103 @@ fn test_open_with_capacity_safe() {
let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv");
check_rkv(&k);
// This errors with "opened: DbsFull" because we specified a capacity of one (database),
// and check_rkv already opened one (plus the default database, which doesn't count
// against the limit).
let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened");
}
#[test]
fn test_create_with_capacity_safe_2() {
let root = Builder::new().prefix("test_create_with_capacity_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv");
check_rkv(&k);
// This doesn't error with "opened: DbsFull" because even though we specified a capacity
// of one (database), and check_rkv already opened one, the default database doesn't
// count against the limit).
let _zzz = k.open_single(None, StoreOptions::create()).expect("opened");
}
#[test]
#[should_panic(expected = "opened: SafeModeError(DbNotFoundError)")]
fn test_open_with_capacity_safe_1() {
let root = Builder::new().prefix("test_open_with_capacity_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv");
check_rkv(&k);
let _zzz = k.open_single("zzz", StoreOptions::default()).expect("opened");
}
#[test]
fn test_open_with_capacity_safe_2() {
let root = Builder::new().prefix("test_open_with_capacity_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv");
check_rkv(&k);
let _zzz = k.open_single(None, StoreOptions::default()).expect("opened");
}
#[test]
fn test_list_dbs_safe_1() {
let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv");
check_rkv(&k);
let mut dbs = k.get_dbs().unwrap();
dbs.sort();
assert_eq!(dbs, vec![None, Some("s".to_owned())]);
}
#[test]
fn test_list_dbs_safe_2() {
let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<SafeMode>(root.path(), 2).expect("rkv");
check_rkv(&k);
let _ = k.open_single("zzz", StoreOptions::create()).expect("opened");
let mut dbs = k.get_dbs().unwrap();
dbs.sort();
assert_eq!(dbs, vec![None, Some("s".to_owned()), Some("zzz".to_owned())]);
}
#[test]
fn test_list_dbs_safe_3() {
let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir");
println!("Root path: {:?}", root.path());
fs::create_dir_all(root.path()).expect("dir created");
assert!(root.path().is_dir());
let k = Rkv::with_capacity::<SafeMode>(root.path(), 0).expect("rkv");
let _ = k.open_single(None, StoreOptions::create()).expect("opened");
let mut dbs = k.get_dbs().unwrap();
dbs.sort();
assert_eq!(dbs, vec![None]);
}
#[test]
fn test_round_trip_and_transactions_safe() {
let root = Builder::new().prefix("test_round_trip_and_transactions_safe").tempdir().expect("tempdir");
@ -276,9 +400,9 @@ fn test_multi_put_get_del_safe() {
{
let mut iter = multistore.get(&writer, "str1").unwrap();
let (id, val) = iter.next().unwrap().unwrap();
assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 bar"))));
assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar")));
let (id, val) = iter.next().unwrap().unwrap();
assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 foo"))));
assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo")));
}
writer.commit().unwrap();
@ -609,22 +733,22 @@ fn test_iter_safe() {
let mut iter = sk.iter_start(&reader).unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "bar");
assert_eq!(val, Some(Value::Bool(true)));
assert_eq!(val, Value::Bool(true));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "baz");
assert_eq!(val, Some(Value::Str("héllo, yöu")));
assert_eq!(val, Value::Str("héllo, yöu"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "foo");
assert_eq!(val, Some(Value::I64(1234)));
assert_eq!(val, Value::I64(1234));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
assert_eq!(val, Value::Str("Emil.RuleZ!"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterators don't loop. Once one returns None, additional calls
@ -636,10 +760,10 @@ fn test_iter_safe() {
let mut iter = sk.iter_from(&reader, "moo").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Reader.iter_from() works as expected when the given key is a prefix
@ -647,10 +771,10 @@ fn test_iter_safe() {
let mut iter = sk.iter_from(&reader, "no").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
}
@ -746,84 +870,84 @@ fn test_multiple_store_iter_safe() {
let mut iter = s1.iter_start(&reader).unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "bar");
assert_eq!(val, Some(Value::Bool(true)));
assert_eq!(val, Value::Bool(true));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "baz");
assert_eq!(val, Some(Value::Str("héllo, yöu")));
assert_eq!(val, Value::Str("héllo, yöu"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "foo");
assert_eq!(val, Some(Value::I64(1234)));
assert_eq!(val, Value::I64(1234));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
assert_eq!(val, Value::Str("Emil.RuleZ!"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate through the whole store in "s2"
let mut iter = s2.iter_start(&reader).unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "bar");
assert_eq!(val, Some(Value::Bool(true)));
assert_eq!(val, Value::Bool(true));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "baz");
assert_eq!(val, Some(Value::Str("héllo, yöu")));
assert_eq!(val, Value::Str("héllo, yöu"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "foo");
assert_eq!(val, Some(Value::I64(1234)));
assert_eq!(val, Value::I64(1234));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
assert_eq!(val, Value::Str("Emil.RuleZ!"));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given key in "s1"
let mut iter = s1.iter_from(&reader, "moo").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given key in "s2"
let mut iter = s2.iter_from(&reader, "moo").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given prefix in "s1"
let mut iter = s1.iter_from(&reader, "no").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
// Iterate from a given prefix in "s2"
let mut iter = s2.iter_from(&reader, "no").unwrap();
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "noo");
assert_eq!(val, Some(Value::F64(1234.0.into())));
assert_eq!(val, Value::F64(1234.0.into()));
let (key, val) = iter.next().unwrap().unwrap();
assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
assert_eq!(val, Some(Value::Str("米克規則")));
assert_eq!(val, Value::Str("米克規則"));
assert!(iter.next().is_none());
}

View File

@ -8,14 +8,15 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![cfg(feature = "db-int-key")]
#![allow(clippy::many_single_char_names)]
use std::fs;
use serde_derive::Serialize;
use tempfile::Builder;
use rkv::backend::Lmdb;
use rkv::{
backend::Lmdb,
PrimitiveInt,
Rkv,
StoreOptions,

View File

@ -8,21 +8,26 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::fs;
use std::sync::Arc;
use std::{
fs,
sync::Arc,
};
use tempfile::Builder;
use rkv::backend::{
Lmdb,
LmdbEnvironment,
SafeMode,
SafeModeEnvironment,
use rkv::{
backend::{
Lmdb,
LmdbEnvironment,
SafeMode,
SafeModeEnvironment,
},
Rkv,
};
use rkv::Rkv;
/// Test that a manager can be created with simple type inference.
#[test]
#[allow(clippy::let_underscore_lock)]
fn test_simple() {
type Manager = rkv::Manager<LmdbEnvironment>;
@ -31,6 +36,7 @@ fn test_simple() {
/// Test that a manager can be created with simple type inference.
#[test]
#[allow(clippy::let_underscore_lock)]
fn test_simple_safe() {
type Manager = rkv::Manager<SafeModeEnvironment>;

View File

@ -8,14 +8,15 @@
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
#![allow(clippy::many_single_char_names)]
use std::fs;
use serde_derive::Serialize;
use tempfile::Builder;
use rkv::backend::Lmdb;
use rkv::{
backend::Lmdb,
PrimitiveInt,
Rkv,
StoreOptions,
@ -41,7 +42,7 @@ fn test_multi_integer_keys() {
.get(&writer, $key)
.expect("read")
.map(|result| result.expect("ok"))
.map(|(_, v)| v.expect("multi read"))
.map(|(_, v)| v)
.collect::<Vec<Value>>();
assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]);
writer.commit().expect("committed");
@ -51,7 +52,7 @@ fn test_multi_integer_keys() {
.get(&reader, $key)
.expect("read")
.map(|result| result.expect("ok"))
.map(|(_, v)| v.expect("multi read"))
.map(|(_, v)| v)
.collect::<Vec<Value>>();
assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]);
}};

View File

@ -13,13 +13,13 @@ use std::fs;
use tempfile::Builder;
use rkv::backend::{
Lmdb,
LmdbDatabase,
LmdbRoCursor,
LmdbRwTransaction,
};
use rkv::{
backend::{
Lmdb,
LmdbDatabase,
LmdbRoCursor,
LmdbRwTransaction,
},
Readable,
Rkv,
StoreOptions,
@ -33,10 +33,10 @@ use rkv::{
/// value: String,
/// date: String,
/// }
/// We would like to index all of the fields so that we can search for the struct not only by ID
/// but also by value and date. When we index the fields individually in their own tables, it
/// is important that we run all operations within a single transaction to ensure coherence of
/// the indices.
/// We would like to index all of the fields so that we can search for the struct not only
/// by ID but also by value and date. When we index the fields individually in their own
/// tables, it is important that we run all operations within a single transaction to
/// ensure coherence of the indices.
/// This test features helper functions for reading and writing the parts of the struct.
/// Note that the reader functions take `Readable` because they might run within a Read
/// Transaction or a Write Transaction. The test demonstrates fetching values via both.
@ -97,9 +97,11 @@ where
store
.get(txn, field)
.expect("get iterator")
.map(|id| match id.expect("field") {
(_, Some(Value::U64(id))) => id,
_ => panic!("getting value in iter"),
.map(|id| {
match id.expect("field") {
(_, Value::U64(id)) => id,
_ => panic!("getting value in iter"),
}
})
.collect::<Vec<u64>>()
}