diff --git a/.cargo/config.in b/.cargo/config.in index b09b2851121f..64145b122761 100644 --- a/.cargo/config.in +++ b/.cargo/config.in @@ -7,11 +7,6 @@ branch = "r0.13.1" git = "https://github.com/shravanrn/nix/" replace-with = "vendored-sources" -[source."https://github.com/mozilla/rkv"] -git = "https://github.com/mozilla/rkv" -replace-with = "vendored-sources" -rev = "e3c3388e6632cf55e08d773b32e58b1cab9b2731" - [source."https://github.com/mozilla/neqo"] git = "https://github.com/mozilla/neqo" replace-with = "vendored-sources" diff --git a/Cargo.lock b/Cargo.lock index 3bbf2dab1c22..308f8e8186ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -515,7 +515,7 @@ dependencies = [ "nserror", "nsstring", "rental", - "rkv 0.11.1", + "rkv 0.15.0", "rust_cascade", "sha2", "storage_variant", @@ -2530,7 +2530,7 @@ dependencies = [ "moz_task", "nserror", "nsstring", - "rkv 0.10.4", + "rkv 0.15.0", "storage_variant", "tempfile", "thin-vec", @@ -4100,8 +4100,9 @@ dependencies = [ [[package]] name = "rkv" -version = "0.11.1" -source = "git+https://github.com/mozilla/rkv?rev=e3c3388e6632cf55e08d773b32e58b1cab9b2731#e3c3388e6632cf55e08d773b32e58b1cab9b2731" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e97d1b6321740ce36d77d67d22ff84ac8a996cf69dbd0727b8bcae52f1c98aaa" dependencies = [ "arrayref", "bincode", @@ -4113,6 +4114,7 @@ dependencies = [ "lmdb-rkv", "log", "ordered-float", + "paste", "serde", "serde_derive", "url", diff --git a/third_party/rust/rkv/.appveyor.yml b/third_party/rust/rkv/.appveyor.yml deleted file mode 100644 index 7b96e5e9ce4b..000000000000 --- a/third_party/rust/rkv/.appveyor.yml +++ /dev/null @@ -1,38 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-msvc - TOOLCHAIN: stable - - TARGET: i686-pc-windows-msvc - TOOLCHAIN: stable - - TARGET: x86_64-pc-windows-msvc - TOOLCHAIN: beta - - TARGET: i686-pc-windows-msvc - TOOLCHAIN: beta - - TARGET: x86_64-pc-windows-msvc - TOOLCHAIN: nightly - - TARGET: i686-pc-windows-msvc - TOOLCHAIN: nightly - -install: - - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - - rustup-init.exe -y --default-host %TARGET% --default-toolchain %TOOLCHAIN% - - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - - choco install make -y - - choco install mingw -y - - refreshenv - - rustc -Vv - - cargo -Vv - - make -v - - gcc -v - -# Disable AppVeyor's build phase, let 'cargo test' take care of the build -build: false - -test_script: - - SET RUST_BACKTRACE=1 - - cargo test --all --target %TARGET% --verbose - - cargo test --all --release --target %TARGET% --verbose - -cache: - - C:\Users\appveyor\.cargo\registry - - target diff --git a/third_party/rust/rkv/.cargo-checksum.json b/third_party/rust/rkv/.cargo-checksum.json index 5fcf47fd8959..86a9f00b8932 100644 --- a/third_party/rust/rkv/.cargo-checksum.json +++ b/third_party/rust/rkv/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".appveyor.yml":"053bc7a827d759dcdc6ef8a8299432c80bf5b2970167a0add2bbaa83e77d3c7d",".rustfmt.toml":"b484c99708d8cdb01be0ef680a15b8897112942d041fc656fc5622816b6b3412",".travis.yml":"8a452fdc9dc79c68f42917887eb9c71218511bb57f6ca28e9e0fbef6cec952f4","CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.toml":"12d1eb7b6c5a4536aa08a2c5bf2ee9d44192407b4df56376e28a3f878572b63f","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"601d14735f759e16c3295980aa188a1ab8c3accfde40cec288fb580adc419811","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"4c38a8cea1dfecc79387b7f347a9938f2d3301b4d063dca4e940966650a777ec","examples/simple-store.rs":"cf3bf22bde53ceeb668736e5b8d8c9193a7599d5f4f701dc864ebfddc1a5cc86","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/backend.rs":"5f7972a3d16f66320c5b44e28835c39a524cb268949f160a8e7607f6386dc123","src/backend/common.rs":"43e12f967ad73209c8c44da133765395eab661a7ae25d4db05acabade6c8bc9c","src/backend/impl_lmdb.rs":"2fa6da1aff5bbfc9915e6d14117ddb8580470d420f9167bcecd282f9afb3178d","src/backend/impl_lmdb/cursor.rs":"77a7611f8638c5358f74f79cccde18581d3f6078f72c3a8419abde144791bfdf","src/backend/impl_lmdb/database.rs":"c52ab76a4389c525a998eef0302e709d57a22f2627a967b2246e98ae15f4a999","src/backend/impl_lmdb/environment.rs":"c121408c115d575d68b17680efa28bb6fda78d81b32e93011b0a48cb4183bb7c","src/backend/impl_lmdb/error.rs":"11fd2587ee12af6b779373da06cf4c46c7b2ed6e0d9a682a7a58c913f27f366b","src/backend/impl_lmdb/flags.rs":"90b06968029e8d3533a3f4579f2c6737699b0d6556c23ba1413540496847cf20","src/backend/impl_lmdb/info.rs":"e44d9100c0acc179263f41c70d2d139faa1b19efe6948c951958986c5fc90dbf","src/backend/impl_lmdb/iter.rs":"433b15f907a513d41c50538c35f469bf18f0e6aef1baa64bf522ab70d03bdd11","src/backend/impl_lmdb/stat.rs":"ec3100fee914cfe4805a7c588344c707c027bad7b37b43b7455baa0772cb64f9","src/backend/impl_lmdb/transaction.rs":"2d2f4a77b382f59eaaa683c5f34725b3f44babe1e21870448bfe94078e7d8138","src/backend/impl_safe.rs":"df796cd3b43bd973f213f7616c396ac4f006759bef3be8fd5f1792a82e26baeb","src/backend/impl_safe/cursor.rs":"7fb0d39ee8b2ea69f9ac8b733c1a8f487dfa814486821bc6a8bb1b656539942c","src/backend/impl_safe/database.rs":"935a551d127c61f561d3a0a86c4b671e4b025c162f1dcbd147b15e79d8ad32bb","src/backend/impl_safe/environment.rs":"74f8e55e91b1b3faff8cfe98c09672b5dcb27d54ae02770f9cb5998fd20364f3","src/backend/impl_safe/error.rs":"1056dd32609e2cd795b086eb59ef82e1c73185cecda548ff82577707f49370b8","src/backend/impl_safe/flags.rs":"6a116c08a56b468e57e97138dfd541267e99bcb5b3e7ebe00686ddc46498c580","src/backend/impl_safe/info.rs":"c9dc67d989394dd7a25e99681e4a25f19b4ca3992eb18b719fb89742fae635b2","src/backend/impl_safe/iter.rs":"b98b54b51b474cb1e31f90136b64871baff6c31261d60bd4f79faa329768f2e8","src/backend/impl_safe/snapshot.rs":"b5b60d5366a9c041444ca2924d12ef8bced410cb1f510edb8e7a8f3be845755a","src/backend/impl_safe/stat.rs":"77ea9937c2ff839cba4ed5597b4804550915d5c50fce0fc86133bf23cff49d95","src/backend/impl_safe/transaction.rs":"d658499c3850c6f63e57f9325d22cdc1a3eec26f6af5deda5cd1be40486692ad","src/backend/traits.rs":"18cbb99dcd5348b5fa561c5b78af4954fae8d07220c0d9e79416d72dd0df2568","src/bin/dump.rs":"58b4f36fa1a51dd42099ee94043c1af09c7fe1091307634bc6e777c77185fec5","src/bin/rand.rs":"ca357b19caa142d5016fc4400c8016067a88ca1766346a4f5f0177fc4958a6af","src/env.rs":"36b1dc9a5f1ceeb3a1ec4763eb522a7331d7f11937ee76605899954643ee8997","src/error.rs":"6f1d2dd84f366e2abe5949bde7c54cbfe2207d1bac326c58c8227ae27f301750","src/helpers.rs":"457923fd6b263cc749730e656ee887463cd13b3474d7f085e8779c0ae9d04420","src/lib.rs":"b6c0824064ddb18c8f08502c4f2530349273221850422322d74d110cf62ab271","src/manager.rs":"64d711d1764daada1f6d60b0c0409b88bca0e1c6e08964053036e4e3a0705366","src/migrate.rs":"674cee0d027fc2eed3b09cebe686c837a97725099c967d8c2f49d19e793e6bfd","src/readwrite.rs":"ce3b05e15d6df07ea283656d7ac1510c2119be9805e6fecf5b3a9e4832e83185","src/store.rs":"f6758b4ee9c61ec98468b778cc19bd91b66945819ce5bba11e7b3a8ffe85090e","src/store/integer.rs":"429b1e99386c975ce7c0878ff0beb6dc5646aacd6f13c45b4004bce4001847f6","src/store/integermulti.rs":"e63cfd0210866db094e5c91ae5775ffeee48e4b4f6e29c74be0994c1269c3af5","src/store/keys.rs":"584bf897df7a0a10dd2841cf31cb693124748757902374d215b4d7caf79baae5","src/store/keys/encodables.rs":"d8b5dd6f49cab4a44b6f638c473ad372f589669e7ef9bd6935aa960840b95063","src/store/keys/primitives.rs":"f714e54dd6507e048cf867deecd15a5a452298b8255575aa9eb1c67317fff5dd","src/store/multi.rs":"9cc56a7343fb1ffa2735aebe8cbd2425490f90a7effce87c3d4a0f0361b35728","src/store/single.rs":"c716252d8cd420d038ea8d8f1f5d2f67cbe91606b2e829a843a98332ec22a0fb","src/value.rs":"494deb8903a1ac0a2f884b0d0f851a836b18cd2c641024694888a08344fe2036","tests/env-all.rs":"846b727fafbe98cfac2ba710335b3d4ba0f2a5c7ff8698eb4bdc2cfc08b24315","tests/env-lmdb.rs":"db896ddb7c0ad5208dbf5296a19792bb2a4c24aa544077b831d542bdfbe24b93","tests/env-safe.rs":"fb3d14f66dd53bbfad33d39536e26a6830a12e12f774598ac0112b109b0e19eb","tests/integer-store.rs":"94d3bbb33be98cd917eae19812de6c735b9eb7bda2cba31e49d96c553d077e40","tests/manager.rs":"1fad3557ed12f1213547925d18e72b07c3eccee44b86810fafcc40790accfbfc","tests/multi-integer-store.rs":"d542e7f338cdac61e2e11b1f88581e0813ea453b219ee9ee78507894cd6359d8","tests/test_txn.rs":"e793af3a6a0d6c3f177f741562bd7fb02a44b165fb13e53d3b40009a5921925d"},"package":null} \ No newline at end of file +{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.lock":"617a73cbb78075706f477ab985ca9dd6cbd21514b4cbb6373b2a3fa81192350c","Cargo.toml":"353177937a7ba9d301966b2c13ac6e31d1135986df4aeff88b14fe98e88f7ca8","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"f80ce3b29b0cf54927d2bb4457e0b5691a2e6b56a5fc140c551c3cee68f242ae","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"844acdb7b81b95547502dddead0095dd98c4d6be63a7a765c97c354c02ee38d1","examples/simple-store.rs":"56c403307cd8a7644baa7831e7a400df0b2d2df25a854b2f3441abff78a54227","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/backend.rs":"091d6c8aed4782f7e19079aaf2a25db0db02c969edc8058f882dde47137cc983","src/backend/common.rs":"3dd1b7fbe0c62b0731904358b11f82054c918044de6271c502fb0d38b813b67d","src/backend/impl_lmdb.rs":"2ad9749017613defe11d854480d6d7b3db04ec5d2f43f099be30d01da2c59a16","src/backend/impl_lmdb/arch_migrator.rs":"eeaafcb328f7b9e6deb24d5f8ac7c27d114a8e08bdfcc3f2df588178ee234f83","src/backend/impl_lmdb/arch_migrator_error.rs":"ee15d7bee9e12a037e6b7423acf05040672913e0d86ff632d826eaaa3188eecb","src/backend/impl_lmdb/cursor.rs":"77a7611f8638c5358f74f79cccde18581d3f6078f72c3a8419abde144791bfdf","src/backend/impl_lmdb/database.rs":"c52ab76a4389c525a998eef0302e709d57a22f2627a967b2246e98ae15f4a999","src/backend/impl_lmdb/environment.rs":"cb2e60561c2e18b897286a5a0639aeda7bfbf9b7d2cca7160ad793f61a936898","src/backend/impl_lmdb/error.rs":"856782cb1ffda0c0cdd0f0c783762412db35b8b6b6a0aefaf36974fe169ec5a2","src/backend/impl_lmdb/flags.rs":"861144973a397372cc572999ba8f87a538f48d3b9d4f8233ad78ce76cd2d40b1","src/backend/impl_lmdb/info.rs":"e44d9100c0acc179263f41c70d2d139faa1b19efe6948c951958986c5fc90dbf","src/backend/impl_lmdb/iter.rs":"ba4fd8b287b785e2567dd819d52ce58cf5bd73096ac59675ac11b21a06885d8a","src/backend/impl_lmdb/stat.rs":"ec3100fee914cfe4805a7c588344c707c027bad7b37b43b7455baa0772cb64f9","src/backend/impl_lmdb/transaction.rs":"5ecf5c86148e7c2cc62f89abb1e571f6f355989d6bb48af44a3435e5222260e7","src/backend/impl_safe.rs":"df796cd3b43bd973f213f7616c396ac4f006759bef3be8fd5f1792a82e26baeb","src/backend/impl_safe/cursor.rs":"7fb0d39ee8b2ea69f9ac8b733c1a8f487dfa814486821bc6a8bb1b656539942c","src/backend/impl_safe/database.rs":"7b03bbe7ec8183af06f2376028049ad638e8e778b9686364e13f06a63f7102c4","src/backend/impl_safe/environment.rs":"5c88f15d926e5c0fbbc3ea4b6a0bd1b5d7c0bc221828228b0e3f921898e5861c","src/backend/impl_safe/error.rs":"5a41b7b8cc059abd485c8f740593e35d2d3a4e90466995e954711113f79da200","src/backend/impl_safe/flags.rs":"8775cfab62a78466184310bffb7c0f16c51c4b6d941571348a1ac5ece76a6de0","src/backend/impl_safe/info.rs":"c9dc67d989394dd7a25e99681e4a25f19b4ca3992eb18b719fb89742fae635b2","src/backend/impl_safe/iter.rs":"b98b54b51b474cb1e31f90136b64871baff6c31261d60bd4f79faa329768f2e8","src/backend/impl_safe/snapshot.rs":"de83b5feffcb2603e64c4f53314c4b033fbc3289c88076be85cc33eec88b1d43","src/backend/impl_safe/stat.rs":"77ea9937c2ff839cba4ed5597b4804550915d5c50fce0fc86133bf23cff49d95","src/backend/impl_safe/transaction.rs":"316174d204bab807ac317cf031c1b9e431dc216f231e705acd80ecc060a04616","src/backend/traits.rs":"e1991586760ca12fe34fa6dedb3e936ad26260dce3b3b173da9712b28d12462c","src/bin/dump.rs":"78929424ec2e9d9f155e4eab9b118a6f478caae422db734302fa254a816e5be1","src/bin/rand.rs":"e3a2da9bb449aa9b54e8930c2a54c8a509300283a612360802f9182ae9db5ce4","src/env.rs":"ce4efb94152e47443f1e01789289cdb16a7d92c16e44296d380bd94b0154c744","src/error.rs":"4c519b489057626fe6ce48453cd4fe4b5e14a6c6af2ffb6a764767a438157aab","src/helpers.rs":"2565e271d6edac3e2551d9fdede00a4348c98ddd2df6d95ef08112ced4397f28","src/lib.rs":"f9dbcc6eb3ea169685c30cb6cf34600c5a69e68006cdab51c56e5c7f2f1a60dd","src/manager.rs":"8f6db0298168c57c0dc08b8bd942399181e144719bd93ad9ae385ee0fdc5fae2","src/migrator.rs":"60322b5e331d11e368f2f3e15103045fc514bb0c3d5e19e22d9294dee87e63ed","src/readwrite.rs":"d4296a27458119c47275b230e0d94740c249e05921b8ac7ecbae4c91c92bca0a","src/store.rs":"f6758b4ee9c61ec98468b778cc19bd91b66945819ce5bba11e7b3a8ffe85090e","src/store/integer.rs":"d72ffc052bc3f3d91987ae4afaae4fca819f0ffa7155c83c64b78eb6081055a3","src/store/integermulti.rs":"b807b896582dca59d341a99cd5c7539ea8cebbffadc85072c81dde1f15d0ee43","src/store/keys.rs":"584bf897df7a0a10dd2841cf31cb693124748757902374d215b4d7caf79baae5","src/store/keys/encodables.rs":"d8b5dd6f49cab4a44b6f638c473ad372f589669e7ef9bd6935aa960840b95063","src/store/keys/primitives.rs":"f714e54dd6507e048cf867deecd15a5a452298b8255575aa9eb1c67317fff5dd","src/store/multi.rs":"6337401b68ac61022e4f1668764cd7d4fa00357653db488f61eb7d3ed5424145","src/store/single.rs":"3dc8cab214af5169cb1e34072621e55e40043da0ef6609138a9df1d3f1415a3b","src/value.rs":"4ccf8de44934b8c1baaff29b7993e6c442ecfa2380e73ee37d2eca5aad310a60","tests/env-all.rs":"3ad08161ae79e793241180b0f716f2e9981504cf24a9f348de958944b9e54653","tests/env-lmdb.rs":"8f48bd097f1b18fc9e61518d82086246853ff9cd965241ccf765371e7e668273","tests/env-migration.rs":"a71525fec63a37e1d2db098ba13fbc42e631fc653deabaee47f5856da45477a8","tests/env-safe.rs":"4b768a29f68f6d832204e0130bcd058bf1d9f684641238e5f713c3fc7e908ddf","tests/integer-store.rs":"2deaeee18ea945f54ef5ba7d85917e13e56e46b7af5de03f678f1c4a99d67292","tests/manager.rs":"2e825363cbb322e91ecfa9ff2e6ea2f2ead1603ea0d2b956bbe328a50fa5c846","tests/multi-integer-store.rs":"e53f4753fa3fd8891404048aa533fafe3c1d58230adf1a1a23d31d3c421c82b2","tests/test_txn.rs":"4ff987baab7c29db32d472e47279c06832663aa10c67268b23639e96f76a4dcd"},"package":"e97d1b6321740ce36d77d67d22ff84ac8a996cf69dbd0727b8bcae52f1c98aaa"} \ No newline at end of file diff --git a/third_party/rust/rkv/.rustfmt.toml b/third_party/rust/rkv/.rustfmt.toml deleted file mode 100644 index 377a00864c1e..000000000000 --- a/third_party/rust/rkv/.rustfmt.toml +++ /dev/null @@ -1,4 +0,0 @@ -imports_layout = "Vertical" -max_width = 120 -match_block_trailing_comma = true -use_small_heuristics = "Off" diff --git a/third_party/rust/rkv/.travis.yml b/third_party/rust/rkv/.travis.yml deleted file mode 100644 index 374cabb72946..000000000000 --- a/third_party/rust/rkv/.travis.yml +++ /dev/null @@ -1,48 +0,0 @@ -language: rust -sudo: false - -cache: cargo - -rust: - - 1.37.0 - - stable - - beta - - nightly - -os: - - linux - - osx - -matrix: - allow_failures: - - rust: nightly - fast_finish: true - -before_script: - # We install a known-to-have-rustfmt version of the nightly toolchain - # in order to run the nightly version of rustfmt, which supports rules - # that we depend upon. When updating, pick a suitable nightly version - # from https://rust-lang.github.io/rustup-components-history/ - - rustup toolchain install nightly-2019-09-11 - - rustup component add rustfmt --toolchain nightly-2019-09-11 - - rustup component add clippy --toolchain nightly-2019-09-11 - # Use official clang in order to test out building on osx. - - if [[ "$TRAVIS_OS_NAME" = "osx" ]]; then - brew update; - brew install llvm; - export PATH="/usr/local/opt/llvm/bin:$PATH"; - export LDFLAGS="-L/usr/local/opt/llvm/lib"; - export CPPFLAGS="-I/usr/local/opt/llvm/include"; - fi - -script: - - cargo +nightly-2019-09-11 fmt --all -- --check - - CC="clang" cargo +nightly-2019-09-11 clippy --all-features -- -D warnings - - cargo build --verbose - - export RUST_BACKTRACE=1 - - cargo test --all --verbose - - cargo test --lib --no-default-features --verbose - - cargo test --lib --no-default-features --features "db-dup-sort" --verbose - - cargo test --lib --no-default-features --features "db-int-key" --verbose - - cargo test --release --all --verbose - - ./run-all-examples.sh diff --git a/third_party/rust/rkv/Cargo.lock b/third_party/rust/rkv/Cargo.lock new file mode 100644 index 000000000000..392fe97fcf90 --- /dev/null +++ b/third_party/rust/rkv/Cargo.lock @@ -0,0 +1,492 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bincode" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +dependencies = [ + "byteorder", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "cc" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "failure" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" +dependencies = [ + "backtrace", + "failure_derive", +] + +[[package]] +name = "failure_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" + +[[package]] +name = "id-arena" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" + +[[package]] +name = "lmdb-rkv" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b" +dependencies = [ + "bitflags", + "byteorder", + "libc", + "lmdb-rkv-sys", +] + +[[package]] +name = "lmdb-rkv-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "miniz_oxide" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +dependencies = [ + "adler", +] + +[[package]] +name = "num-traits" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" + +[[package]] +name = "ordered-float" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3741934be594d77de1c8461ebcbbe866f585ea616a9753aa78f2bdc69f0e4579" +dependencies = [ + "num-traits", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pkg-config" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" + +[[package]] +name = "ppv-lite86" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" + +[[package]] +name = "proc-macro-hack" +version = "0.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" + +[[package]] +name = "proc-macro2" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rkv" +version = "0.15.0" +dependencies = [ + "arrayref", + "bincode", + "bitflags", + "byteorder", + "failure", + "id-arena", + "lazy_static", + "lmdb-rkv", + "log", + "ordered-float", + "paste", + "serde", + "serde_derive", + "tempfile", + "url", + "uuid", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + +[[package]] +name = "serde" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "url" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +dependencies = [ + "idna", + "matches", + "percent-encoding", +] + +[[package]] +name = "uuid" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/rkv/Cargo.toml b/third_party/rust/rkv/Cargo.toml index c6798ca68159..6eb3eebf9519 100644 --- a/third_party/rust/rkv/Cargo.toml +++ b/third_party/rust/rkv/Cargo.toml @@ -1,49 +1,88 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] -name = "rkv" -version = "0.11.1" -authors = ["Richard Newman ", "Nan Jiang ", "Myk Melez ", "Victor Porof "] edition = "2018" -license = "Apache-2.0" -description = "a simple, humane, typed Rust interface to LMDB" -documentation = "https://docs.rs/rkv" +name = "rkv" +version = "0.15.0" +authors = ["Richard Newman ", "Nan Jiang ", "Myk Melez ", "Victor Porof "] +exclude = ["/tests/envs/*"] +description = "A simple, humane, typed key-value storage solution" homepage = "https://github.com/mozilla/rkv" -repository = "https://github.com/mozilla/rkv" +documentation = "https://docs.rs/rkv" readme = "README.md" keywords = ["lmdb", "database", "storage"] categories = ["database"] -exclude = ["/tests/envs/*"] +license = "Apache-2.0" +repository = "https://github.com/mozilla/rkv" +[dependencies.arrayref] +version = "0.3" + +[dependencies.bincode] +version = "1.0" + +[dependencies.bitflags] +version = "1" + +[dependencies.byteorder] +version = "1" + +[dependencies.failure] +version = "0.1" +features = ["derive"] +default_features = false + +[dependencies.id-arena] +version = "2.2" + +[dependencies.lazy_static] +version = "1.0" + +[dependencies.lmdb-rkv] +version = "0.14" + +[dependencies.log] +version = "0.4" + +[dependencies.ordered-float] +version = "1.0" + +[dependencies.paste] +version = "0.1" + +[dependencies.serde] +version = "1.0" +features = ["derive", "rc"] + +[dependencies.serde_derive] +version = "1.0" + +[dependencies.url] +version = "2.0" + +[dependencies.uuid] +version = "0.8" +[dev-dependencies.byteorder] +version = "1" + +[dev-dependencies.tempfile] +version = "3" [features] -default = ["db-dup-sort", "db-int-key"] backtrace = ["failure/backtrace", "failure/std"] db-dup-sort = [] db-int-key = [] +default = ["db-dup-sort", "db-int-key"] +no-canonicalize-path = [] with-asan = ["lmdb-rkv/with-asan"] with-fuzzer = ["lmdb-rkv/with-fuzzer"] with-fuzzer-no-link = ["lmdb-rkv/with-fuzzer-no-link"] - -[dependencies] -arrayref = "0.3" -bincode = "1.0" -bitflags = "1" -byteorder = "1" -id-arena = "2.2" -lazy_static = "1.0" -lmdb-rkv = "0.14" -log = "0.4" -ordered-float = "1.0" -serde = { version = "1.0", features = ["derive", "rc"] } -serde_derive = "1.0" -url = "2.0" -uuid = "0.8" - -# Get rid of failure's dependency on backtrace. Eventually -# backtrace will move into Rust core, but we don't need it here. -[dependencies.failure] -version = "0.1" -default_features = false -features = ["derive"] - -[dev-dependencies] -byteorder = "1" -tempfile = "3" diff --git a/third_party/rust/rkv/README.md b/third_party/rust/rkv/README.md index 4627b10e4005..59383a4d3491 100644 --- a/third_party/rust/rkv/README.md +++ b/third_party/rust/rkv/README.md @@ -9,8 +9,6 @@ The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed ke ## ⚠️ Warning ⚠️ -The LMDB backend is currently unstable and crash-prone. We're attempting to fix these crashes in bugs [1538539](https://bugzilla.mozilla.org/show_bug.cgi?id=1538539), [1538541](https://bugzilla.mozilla.org/show_bug.cgi?id=1538541) and [1550174](https://bugzilla.mozilla.org/show_bug.cgi?id=1550174). - To use rkv in production/release environments at Mozilla, you may do so with the "SafeMode" backend, for example: ```rust @@ -23,9 +21,9 @@ let shared_rkv = manager.get_or_create(path, Rkv::new::).unwrap(); ... ``` -The "SafeMode` backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk on commit. +The "SafeMode" backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk (only on commit). -In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing the LMDB crashes, or offering more choices of backend engines (e.g. SQLite). +In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing some LMDB crashes, or offering more choices of backend engines (e.g. SQLite). ## Use @@ -49,8 +47,7 @@ There are several features that you can opt-in and out of when using rkv: By default, `db-dup-sort` and `db-int-key` features offer high level database APIs which allow multiple values per key, and optimizations around integer-based keys respectively. Opt out of these default features when specifying the rkv dependency in your Cargo.toml file to disable them; doing so avoids a certain amount of overhead required to support them. -If you specify the `backtrace` feature, backtraces will be enabled in "failure" -errors. This feature is disabled by default. +If you specify the `backtrace` feature, backtraces will be enabled in "failure" errors. This feature is disabled by default. To aid fuzzing efforts, `with-asan`, `with-fuzzer`, and `with-fuzzer-no-link` configure the build scripts responsible with compiling the underlying backing engines (e.g. LMDB) to build with these LLMV features enabled. Please refer to the official LLVM/Clang documentation on them for more informatiuon. These features are also disabled by default. diff --git a/third_party/rust/rkv/examples/iterator.rs b/third_party/rust/rkv/examples/iterator.rs index 8aad8d460f06..6ae060588fcf 100644 --- a/third_party/rust/rkv/examples/iterator.rs +++ b/third_party/rust/rkv/examples/iterator.rs @@ -7,17 +7,19 @@ //! //! cargo run --example iterator -use std::fs; -use std::str; +use std::{ + fs, + str, +}; use tempfile::Builder; -use rkv::backend::{ - Lmdb, - LmdbDatabase, - LmdbEnvironment, -}; use rkv::{ + backend::{ + Lmdb, + LmdbDatabase, + LmdbEnvironment, + }, Manager, Rkv, SingleStore, diff --git a/third_party/rust/rkv/examples/simple-store.rs b/third_party/rust/rkv/examples/simple-store.rs index d2c1ff1060fe..620181d25a7c 100644 --- a/third_party/rust/rkv/examples/simple-store.rs +++ b/third_party/rust/rkv/examples/simple-store.rs @@ -11,14 +11,14 @@ use std::fs; use tempfile::Builder; -use rkv::backend::{ - BackendStat, - Lmdb, - LmdbDatabase, - LmdbEnvironment, - LmdbRwTransaction, -}; use rkv::{ + backend::{ + BackendStat, + Lmdb, + LmdbDatabase, + LmdbEnvironment, + LmdbRwTransaction, + }, Manager, Rkv, StoreOptions, @@ -35,7 +35,7 @@ fn getput<'w, 's>(store: MultiStore, writer: &'w mut Writer, ids: &'s mut Vec`, where `MigrateError` -//! is an enum whose variants identify specific kinds of migration failures. +//! Both `Migrator::new()` and `migrate()` return a `MigrateResult` that is either an +//! `Ok()` result or an `Err`, where `MigrateError` is an enum whose +//! variants identify specific kinds of migration failures. -pub use crate::error::MigrateError; -use bitflags::bitflags; -use byteorder::{ - LittleEndian, - ReadBytesExt, -}; -use lmdb::{ - DatabaseFlags, - Environment, - Transaction, - WriteFlags, -}; use std::{ collections::{ BTreeMap, @@ -92,12 +77,25 @@ use std::{ str, }; +use bitflags::bitflags; +use byteorder::{ + LittleEndian, + ReadBytesExt, +}; +use lmdb::{ + DatabaseFlags, + Environment, + Transaction, + WriteFlags, +}; + +pub use super::arch_migrator_error::MigrateError; + const PAGESIZE: u16 = 4096; -// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian. -// It appears at offset 12 on 32-bit systems and 16 on 64-bit systems. -// We don't support big-endian migration, but presumably we could do so -// by detecting the order of the bytes. +// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian. It appears at +// offset 12 on 32-bit systems and 16 on 64-bit systems. We don't support big-endian +// migration, but presumably we could do so by detecting the order of the bytes. const MAGIC: [u8; 4] = [0xDE, 0xC0, 0xEF, 0xBE]; pub type MigrateResult = Result; @@ -126,9 +124,8 @@ bitflags! { } } -// The bit depth of the executable that created an LMDB environment. -// The Migrator determines this automatically based on the location of -// the magic number in the data.mdb file. +// The bit depth of the executable that created an LMDB environment. The Migrator +// determines this automatically based on the location of the magic number in data.mdb. #[derive(Clone, Copy, PartialEq)] enum Bits { U32, @@ -369,8 +366,8 @@ impl Page { } fn parse_leaf_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - // The order of the mn_lo and mn_hi fields is endian-dependent and would - // be reversed in an LMDB environment created on a big-endian system. + // The order of the mn_lo and mn_hi fields is endian-dependent and would be + // reversed in an LMDB environment created on a big-endian system. let mn_lo = cursor.read_u16::()?; let mn_hi = cursor.read_u16::()?; @@ -385,7 +382,6 @@ impl Page { let mv_size = Self::leaf_node_size(mn_lo, mn_hi); if mn_flags.contains(NodeFlags::BIGDATA) { let overflow_pgno = cursor.read_uint::(bits.size())?; - Ok(LeafNode::BigData { mn_lo, mn_hi, @@ -402,7 +398,6 @@ impl Page { let mut cursor = std::io::Cursor::new(&value[..]); let db = Database::new(&mut cursor, bits)?; validate_page_num(db.md_root, bits)?; - Ok(LeafNode::SubData { mn_lo, mn_hi, @@ -417,7 +412,6 @@ impl Page { let start = usize::try_from(cursor.position())?; let end = usize::try_from(cursor.position() + u64::from(mv_size))?; let value = cursor.get_ref()[start..end].to_vec(); - Ok(LeafNode::Regular { mn_lo, mn_hi, @@ -449,15 +443,15 @@ impl Page { } fn parse_branch_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - // The order of the mn_lo and mn_hi fields is endian-dependent and would - // be reversed in an LMDB environment created on a big-endian system. + // The order of the mn_lo and mn_hi fields is endian-dependent and would be + // reversed in an LMDB environment created on a big-endian system. let mn_lo = cursor.read_u16::()?; let mn_hi = cursor.read_u16::()?; let mn_flags = cursor.read_u16::()?; - // Branch nodes overload the mn_lo, mn_hi, and mn_flags fields - // to store the page number, so we derive the number from those fields. + // Branch nodes overload the mn_lo, mn_hi, and mn_flags fields to store the page + // number, so we derive the number from those fields. let mp_pgno = Self::branch_node_page_num(mn_lo, mn_hi, mn_flags, bits); let mn_ksize = cursor.read_u16::()?; @@ -502,10 +496,10 @@ pub struct Migrator { } impl Migrator { - /// Create a new Migrator for the LMDB environment at the given path. - /// This tries to open the data.mdb file in the environment and determine - /// the bit depth of the executable that created it, so it can fail - /// and return an Err if the file can't be opened or the depth determined. + /// Create a new Migrator for the LMDB environment at the given path. This tries to + /// open the data.mdb file in the environment and determine the bit depth of the + /// executable that created it, so it can fail and return an Err if the file can't be + /// opened or the depth determined. pub fn new(path: &Path) -> MigrateResult { let mut path = PathBuf::from(path); path.push("data.mdb"); @@ -533,20 +527,18 @@ impl Migrator { }) } - /// Dump the data in one of the databases in the LMDB environment. - /// If the `database` paremeter is None, then we dump the data in the main - /// database. If it's the name of a subdatabase, then we dump the data - /// in that subdatabase. + /// Dump the data in one of the databases in the LMDB environment. If the `database` + /// paremeter is None, then we dump the data in the main database. If it's the name + /// of a subdatabase, then we dump the data in that subdatabase. /// - /// Note that the output isn't identical to that of the mdb_dump utility, - /// since mdb_dump includes subdatabase key/value pairs when dumping - /// the main database, and those values are architecture-dependent, since - /// they contain pointer-sized data. - /// - /// If we wanted to support identical output, we could parameterize - /// inclusion of subdatabase pairs in get_pairs() and include them - /// when dumping data, while continuing to exclude them when migrating + /// Note that the output isn't identical to that of the `mdb_dump` utility, since + /// `mdb_dump` includes subdatabase key/value pairs when dumping the main database, + /// and those values are architecture-dependent, since they contain pointer-sized /// data. + /// + /// If we wanted to support identical output, we could parameterize inclusion of + /// subdatabase pairs in get_pairs() and include them when dumping data, while + /// continuing to exclude them when migrating data. pub fn dump(&mut self, database: Option<&str>, mut out: T) -> MigrateResult<()> { let meta_data = self.get_meta_data()?; let root_page_num = meta_data.mm_dbs.main.md_root; @@ -593,20 +585,18 @@ impl Migrator { Ok(()) } - /// Migrate all data in all of databases in the existing LMDB environment - /// to a new environment. This includes all key/value pairs in the main - /// database that aren't metadata about subdatabases and all key/value pairs - /// in all subdatabases. + /// Migrate all data in all of databases in the existing LMDB environment to a new + /// environment. This includes all key/value pairs in the main database that aren't + /// metadata about subdatabases and all key/value pairs in all subdatabases. /// - /// We also set the map size and maximum databases of the new environment - /// to their values for the existing environment. But we don't set - /// other metadata, and we don't check that the new environment is empty - /// before migrating data. + /// We also set the map size and maximum databases of the new environment to their + /// values for the existing environment. But we don't set other metadata, and we + /// don't check that the new environment is empty before migrating data. /// - /// Thus it's possible for this to overwrite existing data or fail - /// to migrate data if the new environment isn't empty. It's the consumer's - /// responsibility to ensure that data can be safely migrated to the new - /// environment. In general, this means that environment should be empty. + /// Thus it's possible for this to overwrite existing data or fail to migrate data if + /// the new environment isn't empty. It's the consumer's responsibility to ensure + /// that data can be safely migrated to the new environment. In general, this means + /// that environment should be empty. pub fn migrate(&mut self, dest: &Path) -> MigrateResult<()> { let meta_data = self.get_meta_data()?; let root_page_num = meta_data.mm_dbs.main.md_root; @@ -619,24 +609,23 @@ impl Migrator { .set_max_dbs(subdbs.len() as u32) .open(dest)?; - // Create the databases before we open a read-write transaction, - // since database creation requires its own read-write transaction, - // which would hang while awaiting completion of an existing one. + // Create the databases before we open a read-write transaction, since database + // creation requires its own read-write transaction, which would hang while + // awaiting completion of an existing one. env.create_db(None, meta_data.mm_dbs.main.md_flags)?; for (subdb_name, subdb_info) in &subdbs { env.create_db(Some(str::from_utf8(&subdb_name)?), subdb_info.md_flags)?; } - // Now open the read-write transaction that we'll use to migrate - // all the data. + // Now open the read-write transaction that we'll use to migrate all the data. let mut txn = env.begin_rw_txn()?; // Migrate the main database. let pairs = self.get_pairs(root_page)?; let db = env.open_db(None)?; for (key, value) in pairs { - // If we knew that the target database was empty, we could - // specify WriteFlags::APPEND to speed up the migration. + // If we knew that the target database was empty, we could specify + // WriteFlags::APPEND to speed up the migration. txn.put(db, &key, &value, WriteFlags::empty())?; } @@ -646,8 +635,8 @@ impl Migrator { let pairs = self.get_pairs(root_page)?; let db = env.open_db(Some(str::from_utf8(&subdb_name)?))?; for (key, value) in pairs { - // If we knew that the target database was empty, we could - // specify WriteFlags::APPEND to speed up the migration. + // If we knew that the target database was empty, we could specify + // WriteFlags::APPEND to speed up the migration. txn.put(db, &key, &value, WriteFlags::empty())?; } } @@ -716,9 +705,9 @@ impl Migrator { overflow_pgno, .. } => { - // XXX perhaps we could reduce memory consumption - // during a migration by waiting to read big data - // until it's time to write it to the new database. + // Perhaps we could reduce memory consumption during a + // migration by waiting to read big data until it's time + // to write it to the new database. let value = self.read_data( *overflow_pgno * u64::from(PAGESIZE) + page_header_size(self.bits), *mv_size as usize, @@ -728,16 +717,15 @@ impl Migrator { LeafNode::SubData { .. } => { - // We don't include subdatabase leaves in pairs, - // since there's no architecture-neutral - // representation of them, and in any case they're - // meta-data that should get recreated when we - // migrate the subdatabases themselves. + // We don't include subdatabase leaves in pairs, since + // there's no architecture-neutral representation of them, + // and in any case they're meta-data that should get + // recreated when we migrate the subdatabases themselves. // // If we wanted to create identical dumps to those - // produced by mdb_dump, however, we could allow - // consumers to specify that they'd like to include - // these records. + // produced by `mdb_dump`, however, we could allow + // consumers to specify that they'd like to include these + // records. }, }; } @@ -787,27 +775,18 @@ impl Migrator { #[cfg(test)] mod tests { - use super::MigrateResult; - use super::Migrator; - use crate::error::MigrateError; + use super::*; + + use std::{ + env, + fs, + mem::size_of, + }; + use lmdb::{ Environment, Error as LmdbError, }; - use std::{ - env, - fs::{ - self, - File, - }, - io::{ - Read, - Seek, - SeekFrom, - }, - mem::size_of, - path::PathBuf, - }; use tempfile::{ tempdir, tempfile, @@ -823,15 +802,17 @@ mod tests { loop { match ref_file.read(ref_buf) { Err(err) => panic!(err), - Ok(ref_len) => match new_file.read(new_buf) { - Err(err) => panic!(err), - Ok(new_len) => { - assert_eq!(ref_len, new_len); - if ref_len == 0 { - break; - }; - assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]); - }, + Ok(ref_len) => { + match new_file.read(new_buf) { + Err(err) => panic!(err), + Ok(new_len) => { + assert_eq!(ref_len, new_len); + if ref_len == 0 { + break; + }; + assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]); + }, + } }, } } @@ -1017,8 +998,8 @@ mod tests { // Compare the new dump file to the reference dump file. compare_files(&mut ref_dump_file, &mut new_dump_file)?; - // Overwrite the old env's files with the new env's files and confirm - // that it's now possible to open the old env with LMDB. + // Overwrite the old env's files with the new env's files and confirm that it's now + // possible to open the old env with LMDB. fs::copy(new_env.path().join("data.mdb"), old_env.path().join("data.mdb"))?; fs::copy(new_env.path().join("lock.mdb"), old_env.path().join("lock.mdb"))?; assert!(Environment::new().open(&old_env.path()).is_ok()); diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator_error.rs b/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator_error.rs new file mode 100644 index 000000000000..7b56f5e96b69 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator_error.rs @@ -0,0 +1,107 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + io, + num, + str, +}; + +use failure::Fail; + +#[derive(Debug, Fail)] +pub enum MigrateError { + #[fail(display = "database not found: {:?}", _0)] + DatabaseNotFound(String), + + #[fail(display = "{}", _0)] + FromString(String), + + #[fail(display = "couldn't determine bit depth")] + IndeterminateBitDepth, + + #[fail(display = "I/O error: {:?}", _0)] + IoError(io::Error), + + #[fail(display = "invalid DatabaseFlags bits")] + InvalidDatabaseBits, + + #[fail(display = "invalid data version")] + InvalidDataVersion, + + #[fail(display = "invalid magic number")] + InvalidMagicNum, + + #[fail(display = "invalid NodeFlags bits")] + InvalidNodeBits, + + #[fail(display = "invalid PageFlags bits")] + InvalidPageBits, + + #[fail(display = "invalid page number")] + InvalidPageNum, + + #[fail(display = "lmdb backend error: {}", _0)] + LmdbError(lmdb::Error), + + #[fail(display = "string conversion error")] + StringConversionError, + + #[fail(display = "TryFromInt error: {:?}", _0)] + TryFromIntError(num::TryFromIntError), + + #[fail(display = "unexpected Page variant")] + UnexpectedPageVariant, + + #[fail(display = "unexpected PageHeader variant")] + UnexpectedPageHeaderVariant, + + #[fail(display = "unsupported PageHeader variant")] + UnsupportedPageHeaderVariant, + + #[fail(display = "UTF8 error: {:?}", _0)] + Utf8Error(str::Utf8Error), +} + +impl From for MigrateError { + fn from(e: io::Error) -> MigrateError { + MigrateError::IoError(e) + } +} + +impl From for MigrateError { + fn from(e: str::Utf8Error) -> MigrateError { + MigrateError::Utf8Error(e) + } +} + +impl From for MigrateError { + fn from(e: num::TryFromIntError) -> MigrateError { + MigrateError::TryFromIntError(e) + } +} + +impl From<&str> for MigrateError { + fn from(e: &str) -> MigrateError { + MigrateError::FromString(e.to_string()) + } +} + +impl From for MigrateError { + fn from(e: String) -> MigrateError { + MigrateError::FromString(e) + } +} + +impl From for MigrateError { + fn from(e: lmdb::Error) -> MigrateError { + MigrateError::LmdbError(e) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs b/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs index 78104c93cba2..e25678430c5f 100644 --- a/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs +++ b/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs @@ -8,7 +8,15 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::path::Path; +use std::{ + fs, + path::{ + Path, + PathBuf, + }, +}; + +use lmdb::Error as LmdbError; use super::{ DatabaseFlagsImpl, @@ -23,93 +31,239 @@ use super::{ use crate::backend::traits::{ BackendEnvironment, BackendEnvironmentBuilder, + BackendInfo, + BackendIter, + BackendRoCursor, + BackendRoCursorTransaction, + BackendStat, }; #[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct EnvironmentBuilderImpl(lmdb::EnvironmentBuilder); +pub struct EnvironmentBuilderImpl { + builder: lmdb::EnvironmentBuilder, + env_path_type: EnvironmentPathType, + env_lock_type: EnvironmentLockType, + env_db_type: EnvironmentDefaultDbType, + make_dir: bool, +} impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { - type Error = ErrorImpl; type Environment = EnvironmentImpl; + type Error = ErrorImpl; type Flags = EnvironmentFlagsImpl; fn new() -> EnvironmentBuilderImpl { - EnvironmentBuilderImpl(lmdb::Environment::new()) + EnvironmentBuilderImpl { + builder: lmdb::Environment::new(), + env_path_type: EnvironmentPathType::SubDir, + env_lock_type: EnvironmentLockType::Lockfile, + env_db_type: EnvironmentDefaultDbType::SingleDatabase, + make_dir: false, + } } fn set_flags(&mut self, flags: T) -> &mut Self where T: Into, { - self.0.set_flags(flags.into().0); + let flags = flags.into(); + if flags.0 == lmdb::EnvironmentFlags::NO_SUB_DIR { + self.env_path_type = EnvironmentPathType::NoSubDir; + } + if flags.0 == lmdb::EnvironmentFlags::NO_LOCK { + self.env_lock_type = EnvironmentLockType::NoLockfile; + } + self.builder.set_flags(flags.0); self } fn set_max_readers(&mut self, max_readers: u32) -> &mut Self { - self.0.set_max_readers(max_readers); + self.builder.set_max_readers(max_readers); self } fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self { - self.0.set_max_dbs(max_dbs); + if max_dbs > 0 { + self.env_db_type = EnvironmentDefaultDbType::MultipleNamedDatabases + } + self.builder.set_max_dbs(max_dbs); self } fn set_map_size(&mut self, size: usize) -> &mut Self { - self.0.set_map_size(size); + self.builder.set_map_size(size); + self + } + + fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self { + self.make_dir = make_dir; self } fn open(&self, path: &Path) -> Result { - self.0.open(path).map(EnvironmentImpl).map_err(ErrorImpl) + match self.env_path_type { + EnvironmentPathType::NoSubDir => { + if !path.is_file() { + return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); + } + }, + EnvironmentPathType::SubDir => { + if !path.is_dir() { + if !self.make_dir { + return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); + } + fs::create_dir_all(path)?; + } + }, + } + + self.builder.open(path).map_err(ErrorImpl::LmdbError).and_then(|lmdbenv| { + EnvironmentImpl::new(path, self.env_path_type, self.env_lock_type, self.env_db_type, lmdbenv) + }) } } +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum EnvironmentPathType { + SubDir, + NoSubDir, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum EnvironmentLockType { + Lockfile, + NoLockfile, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum EnvironmentDefaultDbType { + SingleDatabase, + MultipleNamedDatabases, +} + #[derive(Debug)] -pub struct EnvironmentImpl(lmdb::Environment); +pub struct EnvironmentImpl { + path: PathBuf, + env_path_type: EnvironmentPathType, + env_lock_type: EnvironmentLockType, + env_db_type: EnvironmentDefaultDbType, + lmdbenv: lmdb::Environment, +} + +impl EnvironmentImpl { + pub(crate) fn new( + path: &Path, + env_path_type: EnvironmentPathType, + env_lock_type: EnvironmentLockType, + env_db_type: EnvironmentDefaultDbType, + lmdbenv: lmdb::Environment, + ) -> Result { + Ok(EnvironmentImpl { + path: path.to_path_buf(), + env_path_type, + env_lock_type, + env_db_type, + lmdbenv, + }) + } +} impl<'e> BackendEnvironment<'e> for EnvironmentImpl { - type Error = ErrorImpl; type Database = DatabaseImpl; + type Error = ErrorImpl; type Flags = DatabaseFlagsImpl; - type Stat = StatImpl; type Info = InfoImpl; type RoTransaction = RoTransactionImpl<'e>; type RwTransaction = RwTransactionImpl<'e>; + type Stat = StatImpl; + + fn get_dbs(&self) -> Result>, Self::Error> { + if self.env_db_type == EnvironmentDefaultDbType::SingleDatabase { + return Ok(vec![None]); + } + let db = self.lmdbenv.open_db(None).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)?; + let reader = self.begin_ro_txn()?; + let cursor = reader.open_ro_cursor(&db)?; + let mut iter = cursor.into_iter(); + let mut store = vec![]; + while let Some(result) = iter.next() { + let (key, _) = result?; + let name = String::from_utf8(key.to_owned()).map_err(|_| ErrorImpl::LmdbError(lmdb::Error::Corrupted))?; + store.push(Some(name)); + } + Ok(store) + } fn open_db(&self, name: Option<&str>) -> Result { - self.0.open_db(name).map(DatabaseImpl).map_err(ErrorImpl) + self.lmdbenv.open_db(name).map(DatabaseImpl).map_err(ErrorImpl::LmdbError) } fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result { - self.0.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl) + self.lmdbenv.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl::LmdbError) } fn begin_ro_txn(&'e self) -> Result { - self.0.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl) + self.lmdbenv.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl::LmdbError) } fn begin_rw_txn(&'e self) -> Result { - self.0.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl) + self.lmdbenv.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl::LmdbError) } fn sync(&self, force: bool) -> Result<(), Self::Error> { - self.0.sync(force).map_err(ErrorImpl) + self.lmdbenv.sync(force).map_err(ErrorImpl::LmdbError) } fn stat(&self) -> Result { - self.0.stat().map(StatImpl).map_err(ErrorImpl) + self.lmdbenv.stat().map(StatImpl).map_err(ErrorImpl::LmdbError) } fn info(&self) -> Result { - self.0.info().map(InfoImpl).map_err(ErrorImpl) + self.lmdbenv.info().map(InfoImpl).map_err(ErrorImpl::LmdbError) } fn freelist(&self) -> Result { - self.0.freelist().map_err(ErrorImpl) + self.lmdbenv.freelist().map_err(ErrorImpl::LmdbError) + } + + fn load_ratio(&self) -> Result, Self::Error> { + let stat = self.stat()?; + let info = self.info()?; + let freelist = self.freelist()?; + + let last_pgno = info.last_pgno() + 1; // pgno is 0 based. + let total_pgs = info.map_size() / stat.page_size(); + if freelist > last_pgno { + return Err(ErrorImpl::LmdbError(LmdbError::Corrupted)); + } + let used_pgs = last_pgno - freelist; + Ok(Some(used_pgs as f32 / total_pgs as f32)) } fn set_map_size(&self, size: usize) -> Result<(), Self::Error> { - self.0.set_map_size(size).map_err(ErrorImpl) + self.lmdbenv.set_map_size(size).map_err(ErrorImpl::LmdbError) + } + + fn get_files_on_disk(&self) -> Vec { + let mut store = vec![]; + + if self.env_path_type == EnvironmentPathType::NoSubDir { + // The option NO_SUB_DIR could change the default directory layout; therefore this should + // probably return the path used to create environment, along with the custom lockfile + // when available. + unimplemented!(); + } + + let mut db_filename = self.path.clone(); + db_filename.push("data.mdb"); + store.push(db_filename); + + if self.env_lock_type == EnvironmentLockType::Lockfile { + let mut lock_filename = self.path.clone(); + lock_filename.push("lock.mdb"); + store.push(lock_filename); + } + + store } } diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/error.rs b/third_party/rust/rkv/src/backend/impl_lmdb/error.rs index f70d5e076db9..646e8f3fe3d2 100644 --- a/third_party/rust/rkv/src/backend/impl_lmdb/error.rs +++ b/third_party/rust/rkv/src/backend/impl_lmdb/error.rs @@ -8,32 +8,55 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::fmt; +use std::{ + fmt, + io, + path::PathBuf, +}; -use crate::backend::traits::BackendError; -use crate::error::StoreError; +use crate::{ + backend::traits::BackendError, + error::StoreError, +}; #[derive(Debug)] -pub struct ErrorImpl(pub(crate) lmdb::Error); +pub enum ErrorImpl { + LmdbError(lmdb::Error), + UnsuitableEnvironmentPath(PathBuf), + IoError(io::Error), +} impl BackendError for ErrorImpl {} impl fmt::Display for ErrorImpl { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt(fmt) + match self { + ErrorImpl::LmdbError(e) => e.fmt(fmt), + ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath"), + ErrorImpl::IoError(e) => e.fmt(fmt), + } } } impl Into for ErrorImpl { fn into(self) -> StoreError { - match self.0 { - lmdb::Error::NotFound => StoreError::KeyValuePairNotFound, - lmdb::Error::BadValSize => StoreError::KeyValuePairBadSize, - lmdb::Error::Invalid => StoreError::FileInvalid, - lmdb::Error::MapFull => StoreError::MapFull, - lmdb::Error::DbsFull => StoreError::DbsFull, - lmdb::Error::ReadersFull => StoreError::ReadersFull, - _ => StoreError::LmdbError(self.0), + match self { + ErrorImpl::LmdbError(lmdb::Error::Corrupted) => StoreError::DatabaseCorrupted, + ErrorImpl::LmdbError(lmdb::Error::NotFound) => StoreError::KeyValuePairNotFound, + ErrorImpl::LmdbError(lmdb::Error::BadValSize) => StoreError::KeyValuePairBadSize, + ErrorImpl::LmdbError(lmdb::Error::Invalid) => StoreError::FileInvalid, + ErrorImpl::LmdbError(lmdb::Error::MapFull) => StoreError::MapFull, + ErrorImpl::LmdbError(lmdb::Error::DbsFull) => StoreError::DbsFull, + ErrorImpl::LmdbError(lmdb::Error::ReadersFull) => StoreError::ReadersFull, + ErrorImpl::LmdbError(error) => StoreError::LmdbError(error), + ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path), + ErrorImpl::IoError(error) => StoreError::IoError(error), } } } + +impl From for ErrorImpl { + fn from(e: io::Error) -> ErrorImpl { + ErrorImpl::IoError(e) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs b/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs index c4012aae33dd..d4f19c8c9ff5 100644 --- a/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs +++ b/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs @@ -8,16 +8,18 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use crate::backend::common::{ - DatabaseFlags, - EnvironmentFlags, - WriteFlags, -}; -use crate::backend::traits::{ - BackendDatabaseFlags, - BackendEnvironmentFlags, - BackendFlags, - BackendWriteFlags, +use crate::backend::{ + common::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, + }, + traits::{ + BackendDatabaseFlags, + BackendEnvironmentFlags, + BackendFlags, + BackendWriteFlags, + }, }; #[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] @@ -86,9 +88,10 @@ impl Into for DatabaseFlags { DatabaseFlags::REVERSE_KEY => lmdb::DatabaseFlags::REVERSE_KEY, #[cfg(feature = "db-dup-sort")] DatabaseFlags::DUP_SORT => lmdb::DatabaseFlags::DUP_SORT, + #[cfg(feature = "db-dup-sort")] + DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED, #[cfg(feature = "db-int-key")] DatabaseFlags::INTEGER_KEY => lmdb::DatabaseFlags::INTEGER_KEY, - DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED, DatabaseFlags::INTEGER_DUP => lmdb::DatabaseFlags::INTEGER_DUP, DatabaseFlags::REVERSE_DUP => lmdb::DatabaseFlags::REVERSE_DUP, } diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs b/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs index ff20e1ccc964..c7df66b0bbf8 100644 --- a/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs +++ b/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs @@ -36,6 +36,6 @@ impl<'i, C> BackendIter<'i> for IterImpl<'i, C> { #[allow(clippy::type_complexity)] fn next(&mut self) -> Option> { - self.iter.next().map(|e| e.map_err(ErrorImpl)) + self.iter.next().map(|e| e.map_err(ErrorImpl::LmdbError)) } } diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs b/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs index ac41ea5b0e1f..d63c5cb4c5e1 100644 --- a/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs +++ b/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs @@ -27,11 +27,11 @@ use crate::backend::traits::{ pub struct RoTransactionImpl<'t>(pub(crate) lmdb::RoTransaction<'t>); impl<'t> BackendRoTransaction for RoTransactionImpl<'t> { - type Error = ErrorImpl; type Database = DatabaseImpl; + type Error = ErrorImpl; fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { - self.0.get(db.0, &key).map_err(ErrorImpl) + self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError) } fn abort(self) { @@ -43,7 +43,7 @@ impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> { type RoCursor = RoCursorImpl<'t>; fn open_ro_cursor(&'t self, db: &Self::Database) -> Result { - self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl) + self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError) } } @@ -51,34 +51,34 @@ impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> { pub struct RwTransactionImpl<'t>(pub(crate) lmdb::RwTransaction<'t>); impl<'t> BackendRwTransaction for RwTransactionImpl<'t> { - type Error = ErrorImpl; type Database = DatabaseImpl; + type Error = ErrorImpl; type Flags = WriteFlagsImpl; fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { - self.0.get(db.0, &key).map_err(ErrorImpl) + self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError) } fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error> { - self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl) + self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl::LmdbError) } #[cfg(not(feature = "db-dup-sort"))] fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> { - self.0.del(db.0, &key, None).map_err(ErrorImpl) + self.0.del(db.0, &key, None).map_err(ErrorImpl::LmdbError) } #[cfg(feature = "db-dup-sort")] fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> { - self.0.del(db.0, &key, value).map_err(ErrorImpl) + self.0.del(db.0, &key, value).map_err(ErrorImpl::LmdbError) } fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> { - self.0.clear_db(db.0).map_err(ErrorImpl) + self.0.clear_db(db.0).map_err(ErrorImpl::LmdbError) } fn commit(self) -> Result<(), Self::Error> { - self.0.commit().map_err(ErrorImpl) + self.0.commit().map_err(ErrorImpl::LmdbError) } fn abort(self) { @@ -90,6 +90,6 @@ impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> { type RoCursor = RoCursorImpl<'t>; fn open_ro_cursor(&'t self, db: &Self::Database) -> Result { - self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl) + self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError) } } diff --git a/third_party/rust/rkv/src/backend/impl_safe/database.rs b/third_party/rust/rkv/src/backend/impl_safe/database.rs index 6fd4edc68cd5..9e883d3cfdd6 100644 --- a/third_party/rust/rkv/src/backend/impl_safe/database.rs +++ b/third_party/rust/rkv/src/backend/impl_safe/database.rs @@ -14,8 +14,10 @@ use serde_derive::{ Serialize, }; -use super::snapshot::Snapshot; -use super::DatabaseFlagsImpl; +use super::{ + snapshot::Snapshot, + DatabaseFlagsImpl, +}; use crate::backend::traits::BackendDatabase; #[derive(Debug, Eq, PartialEq, Copy, Clone, Hash)] diff --git a/third_party/rust/rkv/src/backend/impl_safe/environment.rs b/third_party/rust/rkv/src/backend/impl_safe/environment.rs index 7b39de061cbb..4334977f1c95 100644 --- a/third_party/rust/rkv/src/backend/impl_safe/environment.rs +++ b/third_party/rust/rkv/src/backend/impl_safe/environment.rs @@ -8,18 +8,20 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::borrow::Cow; -use std::collections::HashMap; -use std::fs; -use std::path::{ - Path, - PathBuf, -}; -use std::sync::Arc; -use std::sync::{ - RwLock, - RwLockReadGuard, - RwLockWriteGuard, +use std::{ + borrow::Cow, + collections::HashMap, + fs, + path::{ + Path, + PathBuf, + }, + sync::{ + Arc, + RwLock, + RwLockReadGuard, + RwLockWriteGuard, + }, }; use id_arena::Arena; @@ -52,11 +54,12 @@ pub struct EnvironmentBuilderImpl { max_readers: Option, max_dbs: Option, map_size: Option, + make_dir: bool, } impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { - type Error = ErrorImpl; type Environment = EnvironmentImpl; + type Error = ErrorImpl; type Flags = EnvironmentFlagsImpl; fn new() -> EnvironmentBuilderImpl { @@ -65,6 +68,7 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { max_readers: None, max_dbs: None, map_size: None, + make_dir: false, } } @@ -91,7 +95,20 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { self } + fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self { + self.make_dir = make_dir; + self + } + fn open(&self, path: &Path) -> Result { + // Technically NO_SUB_DIR should change these checks here, but they're both currently + // unimplemented with this storage backend. + if !path.is_dir() { + if !self.make_dir { + return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); + } + fs::create_dir_all(path)?; + } let mut env = EnvironmentImpl::new(path, self.flags, self.max_readers, self.max_dbs, self.map_size)?; env.read_from_disk()?; Ok(env) @@ -188,13 +205,18 @@ impl EnvironmentImpl { } impl<'e> BackendEnvironment<'e> for EnvironmentImpl { - type Error = ErrorImpl; type Database = DatabaseImpl; + type Error = ErrorImpl; type Flags = DatabaseFlagsImpl; - type Stat = StatImpl; type Info = InfoImpl; type RoTransaction = RoTransactionImpl<'e>; type RwTransaction = RwTransactionImpl<'e>; + type Stat = StatImpl; + + fn get_dbs(&self) -> Result>, Self::Error> { + let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?; + Ok(dbs.keys().map(|key| key.to_owned()).collect()) + } fn open_db(&self, name: Option<&str>) -> Result { if Arc::strong_count(&self.ro_txns) > 1 { @@ -215,7 +237,7 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl { let key = name.map(String::from); let mut dbs = self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError)?; let mut arena = self.arena.write().map_err(|_| ErrorImpl::EnvPoisonError)?; - if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs { + if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs && name != None { return Err(ErrorImpl::DbsFull); } let id = dbs.entry(key).or_insert_with(|| DatabaseImpl(arena.alloc(Database::new(Some(flags), None)))); @@ -247,8 +269,21 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl { unimplemented!() } + fn load_ratio(&self) -> Result, Self::Error> { + warn!("`load_ratio()` is irrelevant for this storage backend."); + Ok(None) + } + fn set_map_size(&self, size: usize) -> Result<(), Self::Error> { - warn!("Ignoring `set_map_size({})`", size); + warn!("`set_map_size({})` is ignored by this storage backend.", size); Ok(()) } + + fn get_files_on_disk(&self) -> Vec { + // Technically NO_SUB_DIR and NO_LOCK should change this output, but + // they're both currently unimplemented with this storage backend. + let mut db_filename = self.path.clone(); + db_filename.push(DEFAULT_DB_FILENAME); + return vec![db_filename]; + } } diff --git a/third_party/rust/rkv/src/backend/impl_safe/error.rs b/third_party/rust/rkv/src/backend/impl_safe/error.rs index 537345500e63..df48d590460e 100644 --- a/third_party/rust/rkv/src/backend/impl_safe/error.rs +++ b/third_party/rust/rkv/src/backend/impl_safe/error.rs @@ -8,13 +8,18 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::fmt; -use std::io; +use std::{ + fmt, + io, + path::PathBuf, +}; use bincode::Error as BincodeError; -use crate::backend::traits::BackendError; -use crate::error::StoreError; +use crate::{ + backend::traits::BackendError, + error::StoreError, +}; #[derive(Debug)] pub enum ErrorImpl { @@ -24,6 +29,7 @@ pub enum ErrorImpl { DbsIllegalOpen, DbNotFoundError, DbIsForeignError, + UnsuitableEnvironmentPath(PathBuf), IoError(io::Error), BincodeError(BincodeError), } @@ -39,6 +45,7 @@ impl fmt::Display for ErrorImpl { ErrorImpl::DbsIllegalOpen => write!(fmt, "DbIllegalOpen (safe mode)"), ErrorImpl::DbNotFoundError => write!(fmt, "DbNotFoundError (safe mode)"), ErrorImpl::DbIsForeignError => write!(fmt, "DbIsForeignError (safe mode)"), + ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath (safe mode)"), ErrorImpl::IoError(e) => e.fmt(fmt), ErrorImpl::BincodeError(e) => e.fmt(fmt), } @@ -55,6 +62,8 @@ impl Into for ErrorImpl { ErrorImpl::KeyValuePairNotFound => StoreError::KeyValuePairNotFound, ErrorImpl::BincodeError(_) => StoreError::FileInvalid, ErrorImpl::DbsFull => StoreError::DbsFull, + ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path), + ErrorImpl::IoError(error) => StoreError::IoError(error), _ => StoreError::SafeModeError(self), } } diff --git a/third_party/rust/rkv/src/backend/impl_safe/flags.rs b/third_party/rust/rkv/src/backend/impl_safe/flags.rs index 12106f431192..e3fde1522be2 100644 --- a/third_party/rust/rkv/src/backend/impl_safe/flags.rs +++ b/third_party/rust/rkv/src/backend/impl_safe/flags.rs @@ -14,16 +14,18 @@ use serde_derive::{ Serialize, }; -use crate::backend::common::{ - DatabaseFlags, - EnvironmentFlags, - WriteFlags, -}; -use crate::backend::traits::{ - BackendDatabaseFlags, - BackendEnvironmentFlags, - BackendFlags, - BackendWriteFlags, +use crate::backend::{ + common::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, + }, + traits::{ + BackendDatabaseFlags, + BackendEnvironmentFlags, + BackendFlags, + BackendWriteFlags, + }, }; bitflags! { @@ -92,9 +94,10 @@ impl Into for DatabaseFlags { DatabaseFlags::REVERSE_KEY => unimplemented!(), #[cfg(feature = "db-dup-sort")] DatabaseFlags::DUP_SORT => DatabaseFlagsImpl::DUP_SORT, + #[cfg(feature = "db-dup-sort")] + DatabaseFlags::DUP_FIXED => unimplemented!(), #[cfg(feature = "db-int-key")] DatabaseFlags::INTEGER_KEY => DatabaseFlagsImpl::INTEGER_KEY, - DatabaseFlags::DUP_FIXED => unimplemented!(), DatabaseFlags::INTEGER_DUP => unimplemented!(), DatabaseFlags::REVERSE_DUP => unimplemented!(), } diff --git a/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs b/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs index 727ba80ca63e..938d5886b5b8 100644 --- a/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs +++ b/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs @@ -8,11 +8,13 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::collections::{ - BTreeMap, - BTreeSet, +use std::{ + collections::{ + BTreeMap, + BTreeSet, + }, + sync::Arc, }; -use std::sync::Arc; use serde_derive::{ Deserialize, diff --git a/third_party/rust/rkv/src/backend/impl_safe/transaction.rs b/third_party/rust/rkv/src/backend/impl_safe/transaction.rs index 773973820070..7f0beff71312 100644 --- a/third_party/rust/rkv/src/backend/impl_safe/transaction.rs +++ b/third_party/rust/rkv/src/backend/impl_safe/transaction.rs @@ -8,8 +8,10 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::collections::HashMap; -use std::sync::Arc; +use std::{ + collections::HashMap, + sync::Arc, +}; use super::{ snapshot::Snapshot, @@ -45,8 +47,8 @@ impl<'t> RoTransactionImpl<'t> { } impl<'t> BackendRoTransaction for RoTransactionImpl<'t> { - type Error = ErrorImpl; type Database = DatabaseImpl; + type Error = ErrorImpl; fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; @@ -86,8 +88,8 @@ impl<'t> RwTransactionImpl<'t> { } impl<'t> BackendRwTransaction for RwTransactionImpl<'t> { - type Error = ErrorImpl; type Database = DatabaseImpl; + type Error = ErrorImpl; type Flags = WriteFlagsImpl; fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { diff --git a/third_party/rust/rkv/src/backend/traits.rs b/third_party/rust/rkv/src/backend/traits.rs index 104d950b3695..1e35f6e39713 100644 --- a/third_party/rust/rkv/src/backend/traits.rs +++ b/third_party/rust/rkv/src/backend/traits.rs @@ -8,18 +8,25 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::fmt::{ - Debug, - Display, +use std::{ + fmt::{ + Debug, + Display, + }, + path::{ + Path, + PathBuf, + }, }; -use std::path::Path; -use crate::backend::common::{ - DatabaseFlags, - EnvironmentFlags, - WriteFlags, +use crate::{ + backend::common::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, + }, + error::StoreError, }; -use crate::error::StoreError; pub trait BackendError: Debug + Display + Into {} @@ -84,6 +91,8 @@ pub trait BackendEnvironmentBuilder<'b>: Debug + Eq + PartialEq + Copy + Clone { fn set_map_size(&mut self, size: usize) -> &mut Self; + fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self; + fn open(&self, path: &Path) -> Result; } @@ -96,6 +105,8 @@ pub trait BackendEnvironment<'e>: Debug { type RoTransaction: BackendRoCursorTransaction<'e, Database = Self::Database>; type RwTransaction: BackendRwCursorTransaction<'e, Database = Self::Database>; + fn get_dbs(&self) -> Result>, Self::Error>; + fn open_db(&self, name: Option<&str>) -> Result; fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result; @@ -112,7 +123,11 @@ pub trait BackendEnvironment<'e>: Debug { fn freelist(&self) -> Result; + fn load_ratio(&self) -> Result, Self::Error>; + fn set_map_size(&self, size: usize) -> Result<(), Self::Error>; + + fn get_files_on_disk(&self) -> Vec; } pub trait BackendRoTransaction: Debug { diff --git a/third_party/rust/rkv/src/bin/dump.rs b/third_party/rust/rkv/src/bin/dump.rs index 32bb8fadfb23..04ae824c5d2f 100644 --- a/third_party/rust/rkv/src/bin/dump.rs +++ b/third_party/rust/rkv/src/bin/dump.rs @@ -8,14 +8,18 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::env::args; -use std::io; -use std::path::Path; +use std::{ + env::args, + io, + path::Path, +}; -use rkv::migrate::Migrator; -use rkv::MigrateError; +use rkv::migrator::{ + LmdbArchMigrateError, + LmdbArchMigrator, +}; -fn main() -> Result<(), MigrateError> { +fn main() -> Result<(), LmdbArchMigrateError> { let mut cli_args = args(); let mut db_name = None; let mut env_path = None; @@ -43,8 +47,8 @@ fn main() -> Result<(), MigrateError> { } let env_path = env_path.ok_or("must provide a path to the LMDB environment")?; - let mut migrator: Migrator = Migrator::new(Path::new(&env_path))?; - migrator.dump(db_name.as_ref().map(String::as_str), io::stdout()).unwrap(); + let mut migrator = LmdbArchMigrator::new(Path::new(&env_path))?; + migrator.dump(db_name.as_deref(), io::stdout()).unwrap(); Ok(()) } diff --git a/third_party/rust/rkv/src/bin/rand.rs b/third_party/rust/rkv/src/bin/rand.rs index f1da01e5c21e..54492d8b92c6 100644 --- a/third_party/rust/rkv/src/bin/rand.rs +++ b/third_party/rust/rkv/src/bin/rand.rs @@ -14,17 +14,19 @@ //! the number of key/value pairs to create via the `-n ` flag //! (for which the default value is 50). -use std::env::args; -use std::fs; -use std::fs::File; -use std::io::Read; -use std::path::Path; - -use rkv::backend::{ - BackendEnvironmentBuilder, - Lmdb, +use std::{ + env::args, + fs, + fs::File, + io::Read, + path::Path, }; + use rkv::{ + backend::{ + BackendEnvironmentBuilder, + Lmdb, + }, Rkv, StoreOptions, Value, @@ -78,7 +80,7 @@ fn main() { // of the pairs (assuming maximum key and value sizes). builder.set_map_size((511 + 65535) * num_pairs * 2); let rkv = Rkv::from_builder(Path::new(&path), builder).expect("Rkv"); - let store = rkv.open_single(database.as_ref().map(|x| x.as_str()), StoreOptions::create()).expect("opened"); + let store = rkv.open_single(database.as_deref(), StoreOptions::create()).expect("opened"); let mut writer = rkv.write().expect("writer"); // Generate random values for the number of keys and key/value lengths. diff --git a/third_party/rust/rkv/src/env.rs b/third_party/rust/rkv/src/env.rs index c5a722bf312c..3edd23a3514c 100644 --- a/third_party/rust/rkv/src/env.rs +++ b/third_party/rust/rkv/src/env.rs @@ -8,10 +8,13 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::os::raw::c_uint; -use std::path::{ - Path, - PathBuf, +use std::{ + fs, + os::raw::c_uint, + path::{ + Path, + PathBuf, + }, }; #[cfg(any(feature = "db-dup-sort", feature = "db-int-key"))] @@ -19,22 +22,24 @@ use crate::backend::{ BackendDatabaseFlags, DatabaseFlags, }; -use crate::backend::{ - BackendEnvironment, - BackendEnvironmentBuilder, - BackendInfo, - BackendRoCursorTransaction, - BackendRwCursorTransaction, - BackendStat, - SafeModeError, +use crate::{ + backend::{ + BackendEnvironment, + BackendEnvironmentBuilder, + BackendRoCursorTransaction, + BackendRwCursorTransaction, + SafeModeError, + }, + error::StoreError, + readwrite::{ + Reader, + Writer, + }, + store::{ + single::SingleStore, + Options as StoreOptions, + }, }; -use crate::error::StoreError; -use crate::readwrite::{ - Reader, - Writer, -}; -use crate::store::single::SingleStore; -use crate::store::Options as StoreOptions; #[cfg(feature = "db-dup-sort")] use crate::store::multi::MultiStore; @@ -49,7 +54,7 @@ use crate::store::integermulti::MultiIntegerStore; pub static DEFAULT_MAX_DBS: c_uint = 5; -/// Wrapper around an `Environment` (e.g. an LMDB environment). +/// Wrapper around an `Environment` (e.g. such as an `LMDB` or `SafeMode` environment). #[derive(Debug)] pub struct Rkv { path: PathBuf, @@ -82,10 +87,6 @@ where where B: BackendEnvironmentBuilder<'e, Environment = E>, { - if !path.is_dir() { - return Err(StoreError::DirectoryDoesNotExistError(path.into())); - } - let mut builder = B::new(); builder.set_max_dbs(max_dbs); @@ -98,16 +99,9 @@ where where B: BackendEnvironmentBuilder<'e, Environment = E>, { - if !path.is_dir() { - return Err(StoreError::DirectoryDoesNotExistError(path.into())); - } - Ok(Rkv { path: path.into(), - env: builder.open(path).map_err(|e| match e.into() { - StoreError::OtherError(2) => StoreError::DirectoryDoesNotExistError(path.into()), - e => e, - })?, + env: builder.open(path).map_err(|e| e.into())?, }) } } @@ -117,9 +111,14 @@ impl<'e, E> Rkv where E: BackendEnvironment<'e>, { + /// Return all created databases. + pub fn get_dbs(&self) -> Result>, StoreError> { + self.env.get_dbs().map_err(|e| e.into()) + } + /// Create or Open an existing database in (&[u8] -> Single Value) mode. - /// Note: that create=true cannot be called concurrently with other operations - /// so if you are sure that the database exists, call this with create=false. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. pub fn open_single<'s, T>( &self, name: T, @@ -132,8 +131,8 @@ where } /// Create or Open an existing database in (Integer -> Single Value) mode. - /// Note: that create=true cannot be called concurrently with other operations - /// so if you are sure that the database exists, call this with create=false. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. #[cfg(feature = "db-int-key")] pub fn open_integer<'s, T, K>( &self, @@ -149,8 +148,8 @@ where } /// Create or Open an existing database in (&[u8] -> Multiple Values) mode. - /// Note: that create=true cannot be called concurrently with other operations - /// so if you are sure that the database exists, call this with create=false. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. #[cfg(feature = "db-dup-sort")] pub fn open_multi<'s, T>( &self, @@ -165,8 +164,8 @@ where } /// Create or Open an existing database in (Integer -> Multiple Values) mode. - /// Note: that create=true cannot be called concurrently with other operations - /// so if you are sure that the database exists, call this with create=false. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. #[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] pub fn open_multi_integer<'s, T, K>( &self, @@ -187,16 +186,20 @@ where T: Into>, { if opts.create { - self.env.create_db(name.into(), opts.flags).map_err(|e| match e.into() { - StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(), - StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(), - e => e, + self.env.create_db(name.into(), opts.flags).map_err(|e| { + match e.into() { + StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(), + StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(), + e => e, + } }) } else { - self.env.open_db(name.into()).map_err(|e| match e.into() { - StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(), - StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(), - e => e, + self.env.open_db(name.into()).map_err(|e| { + match e.into() { + StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(), + StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(), + e => e, + } }) } } @@ -207,9 +210,9 @@ impl<'e, E> Rkv where E: BackendEnvironment<'e>, { - /// Create a read transaction. There can be multiple concurrent readers - /// for an environment, up to the maximum specified by LMDB (default 126), - /// and you can open readers while a write transaction is active. + /// Create a read transaction. There can be multiple concurrent readers for an + /// environment, up to the maximum specified by LMDB (default 126), and you can open + /// readers while a write transaction is active. pub fn read(&'e self) -> Result, StoreError> where E: BackendEnvironment<'e, RoTransaction = T>, @@ -218,9 +221,9 @@ where Ok(Reader::new(self.env.begin_ro_txn().map_err(|e| e.into())?)) } - /// Create a write transaction. There can be only one write transaction - /// active at any given time, so trying to create a second one will block - /// until the first is committed or aborted. + /// Create a write transaction. There can be only one write transaction active at any + /// given time, so trying to create a second one will block until the first is + /// committed or aborted. pub fn write(&'e self) -> Result, StoreError> where E: BackendEnvironment<'e, RwTransaction = T>, @@ -235,18 +238,18 @@ impl<'e, E> Rkv where E: BackendEnvironment<'e>, { - /// Flush the data buffers to disk. This call is only useful, when the environment - /// was open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below). - /// The call is not valid if the environment was opened with `READ_ONLY`. + /// Flush the data buffers to disk. This call is only useful, when the environment was + /// open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below). The call is + /// not valid if the environment was opened with `READ_ONLY`. /// - /// Data is always written to disk when `transaction.commit()` is called, - /// but the operating system may keep it buffered. - /// LMDB always flushes the OS buffers upon commit as well, - /// unless the environment was opened with `NO_SYNC` or in part `NO_META_SYNC`. + /// Data is always written to disk when `transaction.commit()` is called, but the + /// operating system may keep it buffered. LMDB always flushes the OS buffers upon + /// commit as well, unless the environment was opened with `NO_SYNC` or in part + /// `NO_META_SYNC`. /// - /// `force`: if true, force a synchronous flush. - /// Otherwise if the environment has the `NO_SYNC` flag set the flushes will be omitted, - /// and with `MAP_ASYNC` they will be asynchronous. + /// `force`: if true, force a synchronous flush. Otherwise if the environment has the + /// `NO_SYNC` flag set the flushes will be omitted, and with `MAP_ASYNC` they will + /// be asynchronous. pub fn sync(&self, force: bool) -> Result<(), StoreError> { self.env.sync(force).map_err(|e| e.into()) } @@ -278,41 +281,46 @@ where /// Retrieve the load ratio (# of used pages / total pages) about this environment. /// - /// With the formular: (last_page_no - freelist_pages) / total_pages - pub fn load_ratio(&self) -> Result { - let stat = self.stat()?; - let info = self.info()?; - let freelist = self.env.freelist().map_err(|e| e.into())?; - - let last_pgno = info.last_pgno() + 1; // pgno is 0 based. - let total_pgs = info.map_size() / stat.page_size(); - if freelist > last_pgno { - return Err(StoreError::DatabaseCorrupted); - } - let used_pgs = last_pgno - freelist; - Ok(used_pgs as f32 / total_pgs as f32) + /// With the formular: (last_page_no - freelist_pages) / total_pages. + /// A value of `None` means that the backend doesn't ever need to be resized. + pub fn load_ratio(&self) -> Result, StoreError> { + self.env.load_ratio().map_err(|e| e.into()) } /// Sets the size of the memory map to use for the environment. /// - /// This can be used to resize the map when the environment is already open. - /// You can also use `Rkv::environment_builder()` to set the map size during - /// the `Rkv` initialization. + /// This can be used to resize the map when the environment is already open. You can + /// also use `Rkv::environment_builder()` to set the map size during the `Rkv` + /// initialization. /// /// Note: /// - /// * No active transactions allowed when performing resizing in this process. - /// It's up to the consumer to enforce that. + /// * No active transactions allowed when performing resizing in this process. It's up + /// to the consumer to enforce that. /// - /// * The size should be a multiple of the OS page size. Any attempt to set - /// a size smaller than the space already consumed by the environment will - /// be silently changed to the current size of the used space. + /// * The size should be a multiple of the OS page size. Any attempt to set a size + /// smaller than the space already consumed by the environment will be silently + /// changed to the current size of the used space. /// - /// * In the multi-process case, once a process resizes the map, other - /// processes need to either re-open the environment, or call set_map_size - /// with size 0 to update the environment. Otherwise, new transaction creation - /// will fail with `LmdbError::MapResized`. + /// * In the multi-process case, once a process resizes the map, other processes need + /// to either re-open the environment, or call set_map_size with size 0 to update + /// the environment. Otherwise, new transaction creation will fail with + /// `LmdbError::MapResized`. pub fn set_map_size(&self, size: usize) -> Result<(), StoreError> { self.env.set_map_size(size).map_err(Into::into) } + + /// Closes this environment and deletes all its files from disk. Doesn't delete the + /// folder used when opening the environment. + pub fn close_and_delete(self) -> Result<(), StoreError> { + let files = self.env.get_files_on_disk(); + self.sync(true)?; + drop(self); + + for file in files { + fs::remove_file(file)?; + } + + Ok(()) + } } diff --git a/third_party/rust/rkv/src/error.rs b/third_party/rust/rkv/src/error.rs index 1f547799fa5a..18332b714a7b 100644 --- a/third_party/rust/rkv/src/error.rs +++ b/third_party/rust/rkv/src/error.rs @@ -8,12 +8,14 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::io; -use std::num; -use std::path::PathBuf; -use std::str; -use std::thread; -use std::thread::ThreadId; +use std::{ + io, + path::PathBuf, + str, + sync, + thread, + thread::ThreadId, +}; use failure::Fail; @@ -55,6 +57,9 @@ impl From> for DataError { #[derive(Debug, Fail)] pub enum StoreError { + #[fail(display = "manager poisoned")] + ManagerPoisonError, + #[fail(display = "database corrupted")] DatabaseCorrupted, @@ -79,8 +84,8 @@ pub enum StoreError { #[fail(display = "I/O error: {:?}", _0)] IoError(io::Error), - #[fail(display = "directory does not exist or not a directory: {:?}", _0)] - DirectoryDoesNotExistError(PathBuf), + #[fail(display = "environment path does not exist or not the right type: {:?}", _0)] + UnsuitableEnvironmentPath(PathBuf), #[fail(display = "data error: {:?}", _0)] DataError(DataError), @@ -96,9 +101,6 @@ pub enum StoreError { #[fail(display = "attempted to open DB during transaction in thread {:?}", _0)] OpenAttemptedDuringTransaction(ThreadId), - - #[fail(display = "other backing store error: {}", _0)] - OtherError(i32), } impl StoreError { @@ -123,101 +125,35 @@ impl From for StoreError { } } +impl From> for StoreError { + fn from(_: sync::PoisonError) -> StoreError { + StoreError::ManagerPoisonError + } +} + #[derive(Debug, Fail)] pub enum MigrateError { - #[fail(display = "database not found: {:?}", _0)] - DatabaseNotFound(String), + #[fail(display = "store error: {}", _0)] + StoreError(StoreError), - #[fail(display = "{}", _0)] - FromString(String), + #[fail(display = "manager poisoned")] + ManagerPoisonError, - #[fail(display = "couldn't determine bit depth")] - IndeterminateBitDepth, + #[fail(display = "source is empty")] + SourceEmpty, - #[fail(display = "I/O error: {:?}", _0)] - IoError(io::Error), - - #[fail(display = "invalid DatabaseFlags bits")] - InvalidDatabaseBits, - - #[fail(display = "invalid data version")] - InvalidDataVersion, - - #[fail(display = "invalid magic number")] - InvalidMagicNum, - - #[fail(display = "invalid NodeFlags bits")] - InvalidNodeBits, - - #[fail(display = "invalid PageFlags bits")] - InvalidPageBits, - - #[fail(display = "invalid page number")] - InvalidPageNum, - - #[fail(display = "lmdb backend error: {}", _0)] - LmdbError(lmdb::Error), - - #[fail(display = "safe mode backend error: {}", _0)] - SafeModeError(SafeModeError), - - #[fail(display = "string conversion error")] - StringConversionError, - - #[fail(display = "TryFromInt error: {:?}", _0)] - TryFromIntError(num::TryFromIntError), - - #[fail(display = "unexpected Page variant")] - UnexpectedPageVariant, - - #[fail(display = "unexpected PageHeader variant")] - UnexpectedPageHeaderVariant, - - #[fail(display = "unsupported PageHeader variant")] - UnsupportedPageHeaderVariant, - - #[fail(display = "UTF8 error: {:?}", _0)] - Utf8Error(str::Utf8Error), + #[fail(display = "destination is not empty")] + DestinationNotEmpty, } -impl From for MigrateError { - fn from(e: io::Error) -> MigrateError { - MigrateError::IoError(e) +impl From for MigrateError { + fn from(e: StoreError) -> MigrateError { + MigrateError::StoreError(e) } } -impl From for MigrateError { - fn from(e: str::Utf8Error) -> MigrateError { - MigrateError::Utf8Error(e) - } -} - -impl From for MigrateError { - fn from(e: num::TryFromIntError) -> MigrateError { - MigrateError::TryFromIntError(e) - } -} - -impl From<&str> for MigrateError { - fn from(e: &str) -> MigrateError { - MigrateError::FromString(e.to_string()) - } -} - -impl From for MigrateError { - fn from(e: String) -> MigrateError { - MigrateError::FromString(e) - } -} - -impl From for MigrateError { - fn from(e: lmdb::Error) -> MigrateError { - MigrateError::LmdbError(e) - } -} - -impl From for MigrateError { - fn from(e: SafeModeError) -> MigrateError { - MigrateError::SafeModeError(e) +impl From> for MigrateError { + fn from(_: sync::PoisonError) -> MigrateError { + MigrateError::ManagerPoisonError } } diff --git a/third_party/rust/rkv/src/helpers.rs b/third_party/rust/rkv/src/helpers.rs index e5e69f0e95b1..6f6cd9c774b7 100644 --- a/third_party/rust/rkv/src/helpers.rs +++ b/third_party/rust/rkv/src/helpers.rs @@ -8,21 +8,24 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::io; -use std::path::{ - Path, - PathBuf, +use std::{ + io, + path::{ + Path, + PathBuf, + }, }; use url::Url; -use crate::error::StoreError; -use crate::value::Value; +use crate::{ + error::StoreError, + value::Value, +}; -pub(crate) fn read_transform(value: Result<&[u8], StoreError>) -> Result, StoreError> { +pub(crate) fn read_transform(value: Result<&[u8], StoreError>) -> Result { match value { - Ok(bytes) => Value::from_tagged_slice(bytes).map(Some).map_err(StoreError::DataError), - Err(StoreError::KeyValuePairNotFound) => Ok(None), + Ok(bytes) => Value::from_tagged_slice(bytes).map_err(StoreError::DataError), Err(e) => Err(e), } } @@ -36,8 +39,8 @@ where let canonical = path.into().canonicalize()?; Ok(if cfg!(target_os = "windows") { - let url = Url::from_file_path(&canonical).map_err(|_| io::Error::new(io::ErrorKind::Other, "passing error"))?; - url.to_file_path().map_err(|_| io::Error::new(io::ErrorKind::Other, "path canonicalization error"))? + let map_err = |_| io::Error::new(io::ErrorKind::Other, "path canonicalization error"); + Url::from_file_path(&canonical).and_then(|url| url.to_file_path()).map_err(map_err)? } else { canonical }) diff --git a/third_party/rust/rkv/src/lib.rs b/third_party/rust/rkv/src/lib.rs index f1b9d37cc354..0650bea51c22 100644 --- a/third_party/rust/rkv/src/lib.rs +++ b/third_party/rust/rkv/src/lib.rs @@ -8,27 +8,32 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -//! a simple, humane, typed Rust interface to [LMDB](http://www.lmdb.tech/doc/) +//! A simple, humane, typed key-value storage solution. It supports multiple backend +//! engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for +//! performance, or "SafeMode" for reliability. //! //! It aims to achieve the following: //! -//! - Avoid LMDB's sharp edges (e.g., obscure error codes for common situations). +//! - Avoid sharp edges (e.g., obscure error codes for common situations). //! - Report errors via [failure](https://docs.rs/failure/). -//! - Correctly restrict access to one handle per process via a [Manager](struct.Manager.html). -//! - Use Rust's type system to make single-typed key stores (including LMDB's own integer-keyed stores) -//! safe and ergonomic. +//! - Correctly restrict access to one handle per process via a +//! [Manager](struct.Manager.html). +//! - Use Rust's type system to make single-typed key stores safe and ergonomic. //! - Encode and decode values via [bincode](https://docs.rs/bincode/)/[serde](https://docs.rs/serde/) //! and type tags, achieving platform-independent storage and input/output flexibility. //! //! It exposes these primary abstractions: //! -//! - [Manager](struct.Manager.html): a singleton that controls access to LMDB environments -//! - [Rkv](struct.Rkv.html): an LMDB environment that contains a set of key/value databases -//! - [SingleStore](store/single/struct.SingleStore.html): an LMDB database that contains a set of key/value pairs +//! - [Manager](struct.Manager.html): a singleton that controls access to environments +//! - [Rkv](struct.Rkv.html): an environment contains a set of key/value databases +//! - [SingleStore](store/single/struct.SingleStore.html): a database contains a set of +//! key/value pairs //! //! Keys can be anything that implements `AsRef<[u8]>` or integers //! (when accessing an [IntegerStore](store/integer/struct.IntegerStore.html)). -//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum, including: +//! +//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum, +//! including: //! //! - booleans (`Value::Bool`) //! - integers (`Value::I64`, `Value::U64`) @@ -45,8 +50,8 @@ //! use std::fs; //! use tempfile::Builder; //! -//! // First determine the path to the environment, which is represented -//! // on disk as a directory containing two files: +//! // First determine the path to the environment, which is represented on disk as a +//! // directory containing two files: //! // //! // * a data file containing the key/value stores //! // * a lock file containing metadata about current transactions @@ -57,10 +62,9 @@ //! fs::create_dir_all(root.path()).unwrap(); //! let path = root.path(); //! -//! // The Manager enforces that each process opens the same environment -//! // at most once by caching a handle to each environment that it opens. -//! // Use it to retrieve the handle to an opened environment—or create one -//! // if it hasn't already been opened: +//! // The `Manager` enforces that each process opens the same environment at most once by +//! // caching a handle to each environment that it opens. Use it to retrieve the handle +//! // to an opened environment—or create one if it hasn't already been opened: //! let mut manager = Manager::::singleton().write().unwrap(); //! let created_arc = manager.get_or_create(path, Rkv::new::).unwrap(); //! let env = created_arc.read().unwrap(); @@ -69,15 +73,15 @@ //! let store = env.open_single("mydb", StoreOptions::create()).unwrap(); //! //! { -//! // Use a write transaction to mutate the store via a `Writer`. -//! // There can be only one writer for a given environment, so opening -//! // a second one will block until the first completes. +//! // Use a write transaction to mutate the store via a `Writer`. There can be only +//! // one writer for a given environment, so opening a second one will block until +//! // the first completes. //! let mut writer = env.write().unwrap(); //! -//! // Keys are `AsRef<[u8]>`, while values are `Value` enum instances. -//! // Use the `Blob` variant to store arbitrary collections of bytes. -//! // Putting data returns a `Result<(), StoreError>`, where StoreError -//! // is an enum identifying the reason for a failure. +//! // Keys are `AsRef<[u8]>`, while values are `Value` enum instances. Use the `Blob` +//! // variant to store arbitrary collections of bytes. Putting data returns a +//! // `Result<(), StoreError>`, where StoreError is an enum identifying the reason +//! // for a failure. //! store.put(&mut writer, "int", &Value::I64(1234)).unwrap(); //! store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap(); //! store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap(); @@ -87,15 +91,15 @@ //! store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap(); //! store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap(); //! -//! // You must commit a write transaction before the writer goes out -//! // of scope, or the transaction will abort and the data won't persist. +//! // You must commit a write transaction before the writer goes out of scope, or the +//! // transaction will abort and the data won't persist. //! writer.commit().unwrap(); //! } //! //! { -//! // Use a read transaction to query the store via a `Reader`. -//! // There can be multiple concurrent readers for a store, and readers -//! // never block on a writer nor other readers. +//! // Use a read transaction to query the store via a `Reader`. There can be multiple +//! // concurrent readers for a store, and readers never block on a writer nor other +//! // readers. //! let reader = env.read().expect("reader"); //! //! // Keys are `AsRef`, and the return value is `Result, StoreError>`. @@ -111,9 +115,9 @@ //! // Retrieving a non-existent value returns `Ok(None)`. //! println!("Get non-existent value {:?}", store.get(&reader, "non-existent").unwrap()); //! -//! // A read transaction will automatically close once the reader -//! // goes out of scope, so isn't necessary to close it explicitly, -//! // although you can do so by calling `Reader.abort()`. +//! // A read transaction will automatically close once the reader goes out of scope, +//! // so isn't necessary to close it explicitly, although you can do so by calling +//! // `Reader.abort()`. //! } //! //! { @@ -126,9 +130,9 @@ //! } //! //! { -//! // Explicitly aborting a transaction is not required unless an early -//! // abort is desired, since both read and write transactions will -//! // implicitly be aborted once they go out of scope. +//! // Explicitly aborting a transaction is not required unless an early abort is +//! // desired, since both read and write transactions will implicitly be aborted once +//! // they go out of scope. //! { //! let mut writer = env.write().unwrap(); //! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); @@ -144,18 +148,17 @@ //! store.put(&mut writer, "bar", &Value::Str("baz")).unwrap(); //! store.delete(&mut writer, "foo").unwrap(); //! -//! // A write transaction also supports reading, and the version of the -//! // store that it reads includes the changes it has made regardless of -//! // the commit state of that transaction. +//! // A write transaction also supports reading, and the version of the store that it +//! // reads includes the changes it has made regardless of the commit state of that +//! // transaction. -//! // In the code above, "foo" and "bar" were put into the store, -//! // then "foo" was deleted so only "bar" will return a result when the -//! // database is queried via the writer. +//! // In the code above, "foo" and "bar" were put into the store, then "foo" was +//! // deleted so only "bar" will return a result when the database is queried via the +//! // writer. //! println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap()); //! println!("Get bar ({:?})", store.get(&writer, "bar").unwrap()); //! -//! // But a reader won't see that change until the write transaction -//! // is committed. +//! // But a reader won't see that change until the write transaction is committed. //! { //! let reader = env.read().expect("reader"); //! println!("Get foo {:?}", store.get(&reader, "foo").unwrap()); @@ -168,9 +171,9 @@ //! println!("Get bar {:?}", store.get(&reader, "bar").unwrap()); //! } //! -//! // Committing a transaction consumes the writer, preventing you -//! // from reusing it by failing at compile time with an error. -//! // This line would report error[E0382]: borrow of moved value: `writer`. +//! // Committing a transaction consumes the writer, preventing you from reusing it by +//! // failing at compile time with an error. This line would report "error[E0382]: +//! // borrow of moved value: `writer`". //! // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap(); //! } //! @@ -206,7 +209,7 @@ mod manager; mod readwrite; pub mod backend; -pub mod migrate; +pub mod migrator; pub mod store; pub mod value; @@ -222,14 +225,17 @@ pub use error::{ StoreError, }; pub use manager::Manager; +pub use migrator::Migrator; pub use readwrite::{ Readable, Reader, Writer, }; -pub use store::keys::EncodableKey; -pub use store::single::SingleStore; -pub use store::Options as StoreOptions; +pub use store::{ + keys::EncodableKey, + single::SingleStore, + Options as StoreOptions, +}; pub use value::{ OwnedValue, Value, diff --git a/third_party/rust/rkv/src/manager.rs b/third_party/rust/rkv/src/manager.rs index 66c81d4d1571..fc696efbf40e 100644 --- a/third_party/rust/rkv/src/manager.rs +++ b/third_party/rust/rkv/src/manager.rs @@ -8,46 +8,64 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; -use std::os::raw::c_uint; -use std::path::{ - Path, - PathBuf, -}; -use std::result; -use std::sync::{ - Arc, - RwLock, +use std::{ + collections::{ + btree_map::Entry, + BTreeMap, + }, + os::raw::c_uint, + path::{ + Path, + PathBuf, + }, + result, + sync::{ + Arc, + RwLock, + }, }; use lazy_static::lazy_static; -use crate::backend::{ - LmdbEnvironment, - SafeModeEnvironment, +use crate::{ + backend::{ + BackendEnvironment, + BackendEnvironmentBuilder, + LmdbEnvironment, + SafeModeEnvironment, + }, + error::StoreError, + helpers::canonicalize_path, + Rkv, }; -use crate::error::StoreError; -use crate::helpers::canonicalize_path; -use crate::Rkv; type Result = result::Result; type SharedRkv = Arc>>; lazy_static! { - /// A process is only permitted to have one open handle to each Rkv environment. - /// This manager exists to enforce that constraint: don't open environments directly. static ref MANAGER_LMDB: RwLock> = RwLock::new(Manager::new()); static ref MANAGER_SAFE_MODE: RwLock> = RwLock::new(Manager::new()); } -/// A process is only permitted to have one open handle to each Rkv environment. -/// This manager exists to enforce that constraint: don't open environments directly. +/// A process is only permitted to have one open handle to each Rkv environment. This +/// manager exists to enforce that constraint: don't open environments directly. +/// +/// By default, path canonicalization is enabled for identifying RKV instances. This +/// is true by default, because it helps enforce the constraints guaranteed by +/// this manager. However, path canonicalization might crash in some fringe +/// circumstances, so the `no-canonicalize-path` feature offers the possibility of +/// disabling it. See: https://bugzilla.mozilla.org/show_bug.cgi?id=1531887 +/// +/// When path canonicalization is disabled, you *must* ensure an RKV environment is +/// always created or retrieved with the same path. pub struct Manager { environments: BTreeMap>, } -impl Manager { +impl<'e, E> Manager +where + E: BackendEnvironment<'e>, +{ fn new() -> Manager { Manager { environments: Default::default(), @@ -59,7 +77,11 @@ impl Manager { where P: Into<&'p Path>, { - let canonical = canonicalize_path(path)?; + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; Ok(self.environments.get(&canonical).cloned()) } @@ -69,7 +91,11 @@ impl Manager { F: FnOnce(&Path) -> Result>, P: Into<&'p Path>, { - let canonical = canonicalize_path(path)?; + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; Ok(match self.environments.entry(canonical) { Entry::Occupied(e) => e.get().clone(), Entry::Vacant(e) => { @@ -79,14 +105,17 @@ impl Manager { }) } - /// Return the open env at `path` with capacity `capacity`, - /// or create it by calling `f`. + /// Return the open env at `path` with `capacity`, or create it by calling `f`. pub fn get_or_create_with_capacity<'p, F, P>(&mut self, path: P, capacity: c_uint, f: F) -> Result> where F: FnOnce(&Path, c_uint) -> Result>, P: Into<&'p Path>, { - let canonical = canonicalize_path(path)?; + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; Ok(match self.environments.entry(canonical) { Entry::Occupied(e) => e.get().clone(), Entry::Vacant(e) => { @@ -95,6 +124,52 @@ impl Manager { }, }) } + + /// Return a new Rkv environment from the builder, or create it by calling `f`. + pub fn get_or_create_from_builder<'p, F, P, B>(&mut self, path: P, builder: B, f: F) -> Result> + where + F: FnOnce(&Path, B) -> Result>, + P: Into<&'p Path>, + B: BackendEnvironmentBuilder<'e, Environment = E>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + Ok(match self.environments.entry(canonical) { + Entry::Occupied(e) => e.get().clone(), + Entry::Vacant(e) => { + let k = Arc::new(RwLock::new(f(e.key().as_path(), builder)?)); + e.insert(k).clone() + }, + }) + } + + /// Tries to close the specified environment and delete all its files from disk. + /// Doesn't delete the folder used when opening the environment. + /// This will only work if there's no other users of this environment. + pub fn try_close_and_delete<'p, P>(&mut self, path: P) -> Result<()> + where + P: Into<&'p Path>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + match self.environments.entry(canonical) { + Entry::Vacant(_) => {}, // noop + Entry::Occupied(e) => { + if Arc::strong_count(e.get()) == 1 { + if let Ok(env) = Arc::try_unwrap(e.remove()) { + env.into_inner()?.close_and_delete()?; + } + } + }, + } + Ok(()) + } } impl Manager { @@ -111,12 +186,13 @@ impl Manager { #[cfg(test)] mod tests { - use std::fs; - use tempfile::Builder; - use super::*; use crate::*; + use std::fs; + + use tempfile::Builder; + use backend::Lmdb; /// Test that one can mutate managed Rkv instances in surprising ways. @@ -129,8 +205,8 @@ mod tests { let path1 = root1.path(); let arc = manager.get_or_create(path1, Rkv::new::).expect("created"); - // Arc> has interior mutability, so we can replace arc's Rkv - // instance with a new instance that has a different path. + // Arc> has interior mutability, so we can replace arc's Rkv instance with a new + // instance that has a different path. let root2 = Builder::new().prefix("test_mutate_managed_rkv_2").tempdir().expect("tempdir"); fs::create_dir_all(root2.path()).expect("dir created"); let path2 = root2.path(); @@ -140,14 +216,13 @@ mod tests { *rkv = rkv2; } - // arc now has a different internal Rkv with path2, but it's still - // mapped to path1 in manager, so its pointer is equal to a new Arc - // for path1. + // Arc now has a different internal Rkv with path2, but it's still mapped to path1 in + // manager, so its pointer is equal to a new Arc for path1. let path1_arc = manager.get(path1).expect("success").expect("existed"); assert!(Arc::ptr_eq(&path1_arc, &arc)); - // Meanwhile, a new Arc for path2 has a different pointer, even though - // its Rkv's path is the same as arc's current path. + // Meanwhile, a new Arc for path2 has a different pointer, even though its Rkv's path is + // the same as arc's current path. let path2_arc = manager.get_or_create(path2, Rkv::new::).expect("success"); assert!(!Arc::ptr_eq(&path2_arc, &arc)); } diff --git a/third_party/rust/rkv/src/migrator.rs b/third_party/rust/rkv/src/migrator.rs new file mode 100644 index 000000000000..068004024109 --- /dev/null +++ b/third_party/rust/rkv/src/migrator.rs @@ -0,0 +1,168 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//! A simple utility for migrating data from one RVK environment to another. Notably, this +//! tool can migrate data from an enviroment created with a different backend than the +//! current RKV consumer (e.g from Lmdb to SafeMode). +//! +//! The utility doesn't support migrating between 32-bit and 64-bit LMDB environments yet, +//! see `arch_migrator` if this is needed. However, this utility is ultimately intended to +//! handle all possible migrations. +//! +//! The destination environment should be empty of data, otherwise an error is returned. +//! +//! There are 3 versions of the migration methods: +//! * `migrate__to_`, where `` and `` are the source and destination +//! environment types. You're responsive with opening both these environments, handling +//! all errors, and performing any cleanup if necessary. +//! * `open_and_migrate__to_`, which is similar the the above, but automatically +//! attempts to open the source environment and delete all of its supporting files if +//! there's no other environment open at that path. You're still responsible with +//! handling all errors. +//! * `easy_migrate__to_` which is similar to the above, but ignores the +//! migration and doesn't delete any files if the source environment is invalid +//! (corrupted), unavailable (path not found or incompatible with configuration), or +//! empty (database has no records). +//! +//! The tool currently has these limitations: +//! +//! 1. It doesn't support migration from environments created with +//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a temporary +//! directory, copy the environment's data files in the temporary directory, then +//! migrate the temporary directory as the source environment. +//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT` +//! (with or without `DatabaseFlags::DUP_FIXED`) nor with `DatabaseFlags::INTEGER_KEY`. +//! This effectively means that migration is limited to `SingleStore`s. +//! 3. It doesn't allow for existing data in the destination environment, which means that +//! it cannot overwrite nor append data. + +use crate::{ + backend::{ + LmdbEnvironment, + SafeModeEnvironment, + }, + error::MigrateError, + Rkv, + StoreOptions, +}; + +pub use crate::backend::{ + LmdbArchMigrateError, + LmdbArchMigrateResult, + LmdbArchMigrator, +}; + +// FIXME: should parametrize this instead. + +macro_rules! fn_migrator { + ($name:tt, $src_env:ty, $dst_env:ty) => { + /// Migrate all data in all of databases from the source environment to the destination + /// environment. This includes all key/value pairs in the main database that aren't + /// metadata about subdatabases and all key/value pairs in all subdatabases. + /// + /// Other backend-specific metadata such as map size or maximum databases left intact on + /// the given environments. + /// + /// The destination environment should be empty of data, otherwise an error is returned. + pub fn $name(src_env: S, dst_env: D) -> Result<(), MigrateError> + where + S: std::ops::Deref>, + D: std::ops::Deref>, + { + let src_dbs = src_env.get_dbs().unwrap(); + if src_dbs.is_empty() { + return Err(MigrateError::SourceEmpty); + } + let dst_dbs = dst_env.get_dbs().unwrap(); + if !dst_dbs.is_empty() { + return Err(MigrateError::DestinationNotEmpty); + } + for name in src_dbs { + let src_store = src_env.open_single(name.as_deref(), StoreOptions::default())?; + let dst_store = dst_env.open_single(name.as_deref(), StoreOptions::create())?; + let reader = src_env.read()?; + let mut writer = dst_env.write()?; + let mut iter = src_store.iter_start(&reader)?; + while let Some(Ok((key, value))) = iter.next() { + dst_store.put(&mut writer, key, &value).expect("wrote"); + } + writer.commit()?; + } + Ok(()) + } + }; + + (open $migrate:tt, $name:tt, $builder:tt, $src_env:ty, $dst_env:ty) => { + /// Same as the non `open_*` migration method, but automatically attempts to open the + /// source environment. Finally, deletes all of its supporting files if there's no other + /// environment open at that path. + pub fn $name(path: &std::path::Path, build: F, dst_env: D) -> Result<(), MigrateError> + where + F: FnOnce(crate::backend::$builder) -> crate::backend::$builder, + D: std::ops::Deref>, + { + use crate::backend::*; + + let mut manager = crate::Manager::<$src_env>::singleton().write()?; + let mut builder = Rkv::<$src_env>::environment_builder::<$builder>(); + builder.set_max_dbs(crate::env::DEFAULT_MAX_DBS); + builder = build(builder); + + let src_env = manager.get_or_create_from_builder(path, builder, Rkv::from_builder::<$builder>)?; + Migrator::$migrate(src_env.read()?, dst_env)?; + + drop(src_env); + manager.try_close_and_delete(path)?; + + Ok(()) + } + }; + + (easy $migrate:tt, $name:tt, $src_env:ty, $dst_env:ty) => { + /// Same as the `open_*` migration method, but ignores the migration and doesn't delete + /// any files if the source environment is invalid (corrupted), unavailable, or empty. + pub fn $name(path: &std::path::Path, dst_env: D) -> Result<(), MigrateError> + where + D: std::ops::Deref>, + { + match Migrator::$migrate(path, |builder| builder, dst_env) { + Err(crate::MigrateError::StoreError(crate::StoreError::FileInvalid)) => Ok(()), + Err(crate::MigrateError::StoreError(crate::StoreError::IoError(_))) => Ok(()), + Err(crate::MigrateError::StoreError(crate::StoreError::UnsuitableEnvironmentPath(_))) => Ok(()), + Err(crate::MigrateError::SourceEmpty) => Ok(()), + result => result, + }?; + + Ok(()) + } + }; +} + +macro_rules! fns_migrator { + ($src:tt, $dst:tt) => { + paste::item! { + fns_migrator!([], $src, $dst); + fns_migrator!([], $dst, $src); + } + }; + ($name:tt, $src:tt, $dst:tt) => { + paste::item! { + fn_migrator!($name, [<$src:camel Environment>], [<$dst:camel Environment>]); + fn_migrator!(open $name, [], [<$src:camel>], [<$src:camel Environment>], [<$dst:camel Environment>]); + fn_migrator!(easy [], [], [<$src:camel Environment>], [<$dst:camel Environment>]); + } + }; +} + +pub struct Migrator; + +impl Migrator { + fns_migrator!(lmdb, safe_mode); +} diff --git a/third_party/rust/rkv/src/readwrite.rs b/third_party/rust/rkv/src/readwrite.rs index db7c3aafffde..50ed2a1d88fd 100644 --- a/third_party/rust/rkv/src/readwrite.rs +++ b/third_party/rust/rkv/src/readwrite.rs @@ -8,17 +8,19 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use crate::backend::{ - BackendDatabase, - BackendRoCursor, - BackendRoCursorTransaction, - BackendRoTransaction, - BackendRwCursorTransaction, - BackendRwTransaction, +use crate::{ + backend::{ + BackendDatabase, + BackendRoCursor, + BackendRoCursorTransaction, + BackendRoTransaction, + BackendRwCursorTransaction, + BackendRwTransaction, + }, + error::StoreError, + helpers::read_transform, + value::Value, }; -use crate::error::StoreError; -use crate::helpers::read_transform; -use crate::value::Value; pub struct Reader(T); pub struct Writer(T); @@ -46,7 +48,10 @@ where K: AsRef<[u8]>, { let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into()); - read_transform(bytes) + match read_transform(bytes).map(Some) { + Err(StoreError::KeyValuePairNotFound) => Ok(None), + result => result, + } } fn open_ro_cursor(&'r self, db: &T::Database) -> Result { @@ -81,7 +86,10 @@ where K: AsRef<[u8]>, { let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into()); - read_transform(bytes) + match read_transform(bytes).map(Some) { + Err(StoreError::KeyValuePairNotFound) => Ok(None), + result => result, + } } fn open_ro_cursor(&'r self, db: &T::Database) -> Result { diff --git a/third_party/rust/rkv/src/store/integer.rs b/third_party/rust/rkv/src/store/integer.rs index 9ace6ef683d2..6a64afb807dd 100644 --- a/third_party/rust/rkv/src/store/integer.rs +++ b/third_party/rust/rkv/src/store/integer.rs @@ -10,21 +10,25 @@ use std::marker::PhantomData; -use crate::backend::{ - BackendDatabase, - BackendRwTransaction, +use crate::{ + backend::{ + BackendDatabase, + BackendRwTransaction, + }, + error::StoreError, + readwrite::{ + Readable, + Writer, + }, + store::{ + keys::{ + Key, + PrimitiveInt, + }, + single::SingleStore, + }, + value::Value, }; -use crate::error::StoreError; -use crate::readwrite::{ - Readable, - Writer, -}; -use crate::store::keys::{ - Key, - PrimitiveInt, -}; -use crate::store::single::SingleStore; -use crate::value::Value; type EmptyResult = Result<(), StoreError>; @@ -77,12 +81,13 @@ where #[cfg(test)] mod tests { - use std::fs; - use tempfile::Builder; - use super::*; use crate::*; + use std::fs; + + use tempfile::Builder; + #[test] fn test_integer_keys() { let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); @@ -310,12 +315,13 @@ mod tests { #[cfg(test)] mod tests_safe { - use std::fs; - use tempfile::Builder; - use super::*; use crate::*; + use std::fs; + + use tempfile::Builder; + #[test] fn test_integer_keys() { let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); diff --git a/third_party/rust/rkv/src/store/integermulti.rs b/third_party/rust/rkv/src/store/integermulti.rs index 3bde0797f0b5..f157c62d9963 100644 --- a/third_party/rust/rkv/src/store/integermulti.rs +++ b/third_party/rust/rkv/src/store/integermulti.rs @@ -10,26 +10,30 @@ use std::marker::PhantomData; -use crate::backend::{ - BackendDatabase, - BackendIter, - BackendRoCursor, - BackendRwTransaction, +use crate::{ + backend::{ + BackendDatabase, + BackendIter, + BackendRoCursor, + BackendRwTransaction, + }, + error::StoreError, + readwrite::{ + Readable, + Writer, + }, + store::{ + keys::{ + Key, + PrimitiveInt, + }, + multi::{ + Iter, + MultiStore, + }, + }, + value::Value, }; -use crate::error::StoreError; -use crate::readwrite::{ - Readable, - Writer, -}; -use crate::store::keys::{ - Key, - PrimitiveInt, -}; -use crate::store::multi::{ - Iter, - MultiStore, -}; -use crate::value::Value; type EmptyResult = Result<(), StoreError>; @@ -106,12 +110,13 @@ where #[cfg(test)] mod tests { - use std::fs; - use tempfile::Builder; - use super::*; use crate::*; + use std::fs; + + use tempfile::Builder; + #[test] fn test_integer_keys() { let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); @@ -214,8 +219,8 @@ mod tests { s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); let mut iter = s.get(&writer, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } } @@ -235,8 +240,8 @@ mod tests { s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); { let mut iter = s.get(&writer, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } writer.commit().expect("committed"); @@ -249,7 +254,7 @@ mod tests { let reader = k.read().expect("reader"); let mut iter = s.get(&reader, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } @@ -260,7 +265,7 @@ mod tests { let reader = k.read().expect("reader"); let mut iter = s.get(&reader, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } @@ -300,8 +305,8 @@ mod tests { s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); { let mut iter = s.get(&writer, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } writer.commit().expect("committed"); @@ -313,8 +318,8 @@ mod tests { let reader = k.read().expect("reader"); let mut iter = s.get(&reader, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } } @@ -322,12 +327,13 @@ mod tests { #[cfg(test)] mod tests_safe { - use std::fs; - use tempfile::Builder; - use super::*; use crate::*; + use std::fs; + + use tempfile::Builder; + #[test] fn test_integer_keys() { let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); @@ -430,8 +436,8 @@ mod tests_safe { s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); let mut iter = s.get(&writer, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } } @@ -451,8 +457,8 @@ mod tests_safe { s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); { let mut iter = s.get(&writer, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } writer.commit().expect("committed"); @@ -465,7 +471,7 @@ mod tests_safe { let reader = k.read().expect("reader"); let mut iter = s.get(&reader, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } @@ -476,7 +482,7 @@ mod tests_safe { let reader = k.read().expect("reader"); let mut iter = s.get(&reader, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } @@ -516,8 +522,8 @@ mod tests_safe { s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); { let mut iter = s.get(&writer, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } writer.commit().expect("committed"); @@ -529,8 +535,8 @@ mod tests_safe { let reader = k.read().expect("reader"); let mut iter = s.get(&reader, 1).expect("read"); - assert_eq!(iter.next().expect("first").expect("ok").1, Some(Value::Str("hello!"))); - assert_eq!(iter.next().expect("second").expect("ok").1, Some(Value::Str("hello1!"))); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); assert!(iter.next().is_none()); } } diff --git a/third_party/rust/rkv/src/store/multi.rs b/third_party/rust/rkv/src/store/multi.rs index fad9109bba92..04714badcb62 100644 --- a/third_party/rust/rkv/src/store/multi.rs +++ b/third_party/rust/rkv/src/store/multi.rs @@ -10,20 +10,22 @@ use std::marker::PhantomData; -use crate::backend::{ - BackendDatabase, - BackendFlags, - BackendIter, - BackendRoCursor, - BackendRwTransaction, +use crate::{ + backend::{ + BackendDatabase, + BackendFlags, + BackendIter, + BackendRoCursor, + BackendRwTransaction, + }, + error::StoreError, + helpers::read_transform, + readwrite::{ + Readable, + Writer, + }, + value::Value, }; -use crate::error::StoreError; -use crate::helpers::read_transform; -use crate::readwrite::{ - Readable, - Writer, -}; -use crate::value::Value; type EmptyResult = Result<(), StoreError>; @@ -47,7 +49,8 @@ where } } - /// Provides a cursor to all of the values for the duplicate entries that match this key + /// Provides a cursor to all of the values for the duplicate entries that match this + /// key pub fn get<'r, R, I, C, K>(&self, reader: &'r R, k: K) -> Result, StoreError> where R: Readable<'r, Database = D, RoCursor = C>, @@ -120,14 +123,16 @@ impl<'i, I> Iterator for Iter<'i, I> where I: BackendIter<'i>, { - type Item = Result<(&'i [u8], Option>), StoreError>; + type Item = Result<(&'i [u8], Value<'i>), StoreError>; fn next(&mut self) -> Option { match self.iter.next() { None => None, - Some(Ok((key, bytes))) => match read_transform(Ok(bytes)) { - Ok(val) => Some(Ok((key, val))), - Err(err) => Some(Err(err)), + Some(Ok((key, bytes))) => { + match read_transform(Ok(bytes)) { + Ok(val) => Some(Ok((key, val))), + Err(err) => Some(Err(err)), + } }, Some(Err(err)) => Some(Err(err.into())), } diff --git a/third_party/rust/rkv/src/store/single.rs b/third_party/rust/rkv/src/store/single.rs index a7f4aeaecc80..bb7a5ab75580 100644 --- a/third_party/rust/rkv/src/store/single.rs +++ b/third_party/rust/rkv/src/store/single.rs @@ -10,20 +10,22 @@ use std::marker::PhantomData; -use crate::backend::{ - BackendDatabase, - BackendFlags, - BackendIter, - BackendRoCursor, - BackendRwTransaction, +use crate::{ + backend::{ + BackendDatabase, + BackendFlags, + BackendIter, + BackendRoCursor, + BackendRwTransaction, + }, + error::StoreError, + helpers::read_transform, + readwrite::{ + Readable, + Writer, + }, + value::Value, }; -use crate::error::StoreError; -use crate::helpers::read_transform; -use crate::readwrite::{ - Readable, - Writer, -}; -use crate::value::Value; type EmptyResult = Result<(), StoreError>; @@ -126,14 +128,16 @@ impl<'i, I> Iterator for Iter<'i, I> where I: BackendIter<'i>, { - type Item = Result<(&'i [u8], Option>), StoreError>; + type Item = Result<(&'i [u8], Value<'i>), StoreError>; fn next(&mut self) -> Option { match self.iter.next() { None => None, - Some(Ok((key, bytes))) => match read_transform(Ok(bytes)) { - Ok(val) => Some(Ok((key, val))), - Err(err) => Some(Err(err)), + Some(Ok((key, bytes))) => { + match read_transform(Ok(bytes)) { + Ok(val) => Some(Ok((key, val))), + Err(err) => Some(Err(err)), + } }, Some(Err(err)) => Some(Err(err.into())), } diff --git a/third_party/rust/rkv/src/value.rs b/third_party/rust/rkv/src/value.rs index 211b55b17120..8d60ea21f7df 100644 --- a/third_party/rust/rkv/src/value.rs +++ b/third_party/rust/rkv/src/value.rs @@ -24,10 +24,9 @@ use uuid::{ use crate::error::DataError; -/// We define a set of types, associated with simple integers, to annotate values -/// stored in LMDB. This is to avoid an accidental 'cast' from a value of one type -/// to another. For this reason we don't simply use `deserialize` from the `bincode` -/// crate. +/// We define a set of types, associated with simple integers, to annotate values stored +/// in LMDB. This is to avoid an accidental 'cast' from a value of one type to another. +/// For this reason we don't simply use `deserialize` from the `bincode` crate. #[repr(u8)] #[derive(Debug, PartialEq, Eq)] pub enum Type { @@ -129,9 +128,11 @@ impl<'v> Value<'v> { fn from_type_and_data(t: Type, data: &'v [u8]) -> Result, DataError> { if t == Type::Uuid { return deserialize(data) - .map_err(|e| DataError::DecodingError { - value_type: t, - err: e, + .map_err(|e| { + DataError::DecodingError { + value_type: t, + err: e, + } }) .map(uuid)?; } @@ -150,9 +151,11 @@ impl<'v> Value<'v> { unreachable!() }, } - .map_err(|e| DataError::DecodingError { - value_type: t, - err: e, + .map_err(|e| { + DataError::DecodingError { + value_type: t, + err: e, + } }) } @@ -221,8 +224,6 @@ impl<'v> From<&'v OwnedValue> for Value<'v> { #[cfg(test)] mod tests { - use ordered_float::OrderedFloat; - use super::*; #[test] diff --git a/third_party/rust/rkv/tests/env-all.rs b/third_party/rust/rkv/tests/env-all.rs index 9ef4bdda9a40..325e20481e4b 100644 --- a/third_party/rust/rkv/tests/env-all.rs +++ b/third_party/rust/rkv/tests/env-all.rs @@ -12,11 +12,11 @@ use std::fs; use tempfile::Builder; -use rkv::backend::{ - Lmdb, - SafeMode, -}; use rkv::{ + backend::{ + Lmdb, + SafeMode, + }, Rkv, StoreOptions, Value, @@ -67,7 +67,7 @@ fn test_open_safe_same_dir_as_lmdb() { assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); } - // Create database of type B and save to disk (database of type A exists at the same path). + // Create database of type B and save to disk (type A exists at the same path). { let k = Rkv::new::(root.path()).expect("new succeeded"); let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); @@ -149,7 +149,7 @@ fn test_open_lmdb_same_dir_as_safe() { assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); } - // Create database of type B and save to disk (database of type A exists at the same path). + // Create database of type B and save to disk (type A exists at the same path). { let k = Rkv::new::(root.path()).expect("new succeeded"); let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); diff --git a/third_party/rust/rkv/tests/env-lmdb.rs b/third_party/rust/rkv/tests/env-lmdb.rs index cd6cd17e802d..f9376d2ccd89 100644 --- a/third_party/rust/rkv/tests/env-lmdb.rs +++ b/third_party/rust/rkv/tests/env-lmdb.rs @@ -12,13 +12,16 @@ // deprecates `clippy::cyclomatic_complexity`. #![allow(clippy::complexity)] -use std::fs; -use std::str; -use std::sync::{ - Arc, - RwLock, +use std::{ + fs, + path::Path, + str, + sync::{ + Arc, + RwLock, + }, + thread, }; -use std::thread; use byteorder::{ ByteOrder, @@ -26,16 +29,16 @@ use byteorder::{ }; use tempfile::Builder; -use rkv::backend::{ - BackendEnvironmentBuilder, - BackendInfo, - BackendStat, - Lmdb, - LmdbDatabase, - LmdbEnvironment, - LmdbRwTransaction, -}; use rkv::{ + backend::{ + BackendEnvironmentBuilder, + BackendInfo, + BackendStat, + Lmdb, + LmdbDatabase, + LmdbEnvironment, + LmdbRwTransaction, + }, EnvironmentFlags, Rkv, SingleStore, @@ -69,7 +72,7 @@ fn test_open_fails() { let pb = nope.to_path_buf(); match Rkv::new::(nope.as_path()).err() { - Some(StoreError::DirectoryDoesNotExistError(p)) => { + Some(StoreError::UnsuitableEnvironmentPath(p)) => { assert_eq!(pb, p); }, _ => panic!("expected error"), @@ -101,9 +104,124 @@ fn test_open_from_builder() { check_rkv(&k); } +#[test] +fn test_open_from_builder_with_no_subdir_1() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + { + let mut builder = Rkv::environment_builder::(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); + } + { + let mut builder = Rkv::environment_builder::(); + builder.set_flags(EnvironmentFlags::NO_SUB_DIR); + builder.set_max_dbs(2); + + let mut datamdb = root.path().to_path_buf(); + datamdb.push("data.mdb"); + + let k = Rkv::from_builder(&datamdb, builder).expect("rkv"); + check_rkv(&k); + } +} + +#[test] +#[should_panic(expected = "rkv: UnsuitableEnvironmentPath")] +fn test_open_from_builder_with_no_subdir_2() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + { + let mut builder = Rkv::environment_builder::(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); + } + { + let mut builder = Rkv::environment_builder::(); + builder.set_flags(EnvironmentFlags::NO_SUB_DIR); + builder.set_max_dbs(2); + + let mut datamdb = root.path().to_path_buf(); + datamdb.push("bogus.mdb"); + + let k = Rkv::from_builder(&datamdb, builder).expect("rkv"); + check_rkv(&k); + } +} + +#[test] +fn test_open_from_builder_with_dir_1() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + + let mut builder = Rkv::environment_builder::(); + builder.set_max_dbs(2); + builder.set_make_dir_if_needed(true); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")] +fn test_open_from_builder_with_dir_2() { + let root = Path::new("bogus"); + println!("Root path: {:?}", root); + assert!(!root.is_dir()); + + let mut builder = Rkv::environment_builder::(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root, builder).expect("rkv"); + check_rkv(&k); +} + #[test] #[should_panic(expected = "opened: DbsFull")] -fn test_open_with_capacity() { +fn test_create_with_capacity_1() { + let root = Builder::new().prefix("test_create_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This errors with "opened: DbsFull" because we specified a capacity of one (database), + // and check_rkv already opened one (plus the default database, which doesn't count + // against the limit). + let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened"); +} + +#[test] +fn test_create_with_capacity_2() { + let root = Builder::new().prefix("test_create_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This doesn't error with "opened: DbsFull" with because even though we specified a + // capacity of one (database), and check_rkv already opened one, the default database + // doesn't count against the limit. + let _zzz = k.open_single(None, StoreOptions::create()).expect("opened"); +} + +#[test] +#[should_panic(expected = "opened: DbsFull")] +fn test_open_with_capacity_1() { let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir"); println!("Root path: {:?}", root.path()); fs::create_dir_all(root.path()).expect("dir created"); @@ -112,12 +230,65 @@ fn test_open_with_capacity() { let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); check_rkv(&k); - // This panics with "opened: LmdbError(DbsFull)" because we specified - // a capacity of one (database), and check_rkv already opened one - // (plus the default database, which doesn't count against the limit). - // This should really return an error rather than panicking, per - // . - let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened"); + let _zzz = k.open_single("zzz", StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_open_with_capacity_2() { + let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single(None, StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_list_dbs_1() { + let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let dbs = k.get_dbs().unwrap(); + assert_eq!(dbs, vec![Some("s".to_owned())]); +} + +#[test] +fn test_list_dbs_2() { + let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 2).expect("rkv"); + check_rkv(&k); + + let _ = k.open_single("zzz", StoreOptions::create()).expect("opened"); + + let dbs = k.get_dbs().unwrap(); + assert_eq!(dbs, vec![Some("s".to_owned()), Some("zzz".to_owned())]); +} + +#[test] +fn test_list_dbs_3() { + let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 0).expect("rkv"); + + let _ = k.open_single(None, StoreOptions::create()).expect("opened"); + + let dbs = k.get_dbs().unwrap(); + assert_eq!(dbs, vec![None]); } fn get_larger_than_default_map_size_value() -> usize { @@ -358,9 +529,9 @@ fn test_multi_put_get_del() { { let mut iter = multistore.get(&writer, "str1").unwrap(); let (id, val) = iter.next().unwrap().unwrap(); - assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 bar")))); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar"))); let (id, val) = iter.next().unwrap().unwrap(); - assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 foo")))); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo"))); } writer.commit().unwrap(); @@ -723,14 +894,14 @@ fn test_load_ratio() { let mut writer = k.write().expect("writer"); sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); writer.commit().expect("commited"); - let ratio = k.load_ratio().expect("ratio"); + let ratio = k.load_ratio().expect("ratio").unwrap(); assert!(ratio > 0.0_f32 && ratio < 1.0_f32); // Put data to database should increase the load ratio. let mut writer = k.write().expect("writer"); sk.put(&mut writer, "bar", &Value::Str(&"more-than-4KB".repeat(1000))).expect("wrote"); writer.commit().expect("commited"); - let new_ratio = k.load_ratio().expect("ratio"); + let new_ratio = k.load_ratio().expect("ratio").unwrap(); assert!(new_ratio > ratio); // Clear the database so that all the used pages should go to freelist, hence the ratio @@ -738,7 +909,7 @@ fn test_load_ratio() { let mut writer = k.write().expect("writer"); sk.clear(&mut writer).expect("clear"); writer.commit().expect("commited"); - let after_clear_ratio = k.load_ratio().expect("ratio"); + let after_clear_ratio = k.load_ratio().expect("ratio").unwrap(); assert!(after_clear_ratio < new_ratio); } @@ -792,22 +963,22 @@ fn test_iter() { let mut iter = sk.iter_start(&reader).unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Some(Value::Bool(true))); + assert_eq!(val, Value::Bool(true)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Some(Value::Str("héllo, yöu"))); + assert_eq!(val, Value::Str("héllo, yöu")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Some(Value::I64(1234))); + assert_eq!(val, Value::I64(1234)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Some(Value::Str("Emil.RuleZ!"))); + assert_eq!(val, Value::Str("Emil.RuleZ!")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterators don't loop. Once one returns None, additional calls @@ -819,10 +990,10 @@ fn test_iter() { let mut iter = sk.iter_from(&reader, "moo").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Reader.iter_from() works as expected when the given key is a prefix @@ -830,10 +1001,10 @@ fn test_iter() { let mut iter = sk.iter_from(&reader, "no").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); } @@ -928,84 +1099,84 @@ fn test_multiple_store_iter() { let mut iter = s1.iter_start(&reader).unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Some(Value::Bool(true))); + assert_eq!(val, Value::Bool(true)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Some(Value::Str("héllo, yöu"))); + assert_eq!(val, Value::Str("héllo, yöu")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Some(Value::I64(1234))); + assert_eq!(val, Value::I64(1234)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Some(Value::Str("Emil.RuleZ!"))); + assert_eq!(val, Value::Str("Emil.RuleZ!")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate through the whole store in "s2" let mut iter = s2.iter_start(&reader).unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Some(Value::Bool(true))); + assert_eq!(val, Value::Bool(true)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Some(Value::Str("héllo, yöu"))); + assert_eq!(val, Value::Str("héllo, yöu")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Some(Value::I64(1234))); + assert_eq!(val, Value::I64(1234)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Some(Value::Str("Emil.RuleZ!"))); + assert_eq!(val, Value::Str("Emil.RuleZ!")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given key in "s1" let mut iter = s1.iter_from(&reader, "moo").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given key in "s2" let mut iter = s2.iter_from(&reader, "moo").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given prefix in "s1" let mut iter = s1.iter_from(&reader, "no").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given prefix in "s2" let mut iter = s2.iter_from(&reader, "no").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); } diff --git a/third_party/rust/rkv/tests/env-migration.rs b/third_party/rust/rkv/tests/env-migration.rs new file mode 100644 index 000000000000..4bb8da8c1c63 --- /dev/null +++ b/third_party/rust/rkv/tests/env-migration.rs @@ -0,0 +1,356 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fs, + path::Path, +}; + +use tempfile::Builder; + +use rkv::{ + backend::{ + Lmdb, + SafeMode, + }, + Migrator, + Rkv, + StoreOptions, + Value, +}; + +macro_rules! populate_store { + ($env:expr) => { + let store = $env.open_single("store", StoreOptions::create()).expect("opened"); + let mut writer = $env.write().expect("writer"); + store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + }; +} + +#[test] +fn test_simple_migrator_lmdb_to_safe() { + let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Populate source environment and persist to disk. + { + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + // Check if the files were written to disk. + { + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + assert!(datamdb.exists()); + assert!(lockmdb.exists()); + } + // Verify that database was written to disk. + { + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + let store = src_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = src_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Open and migrate. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Verify that the database was indeed migrated. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Check if the old files were deleted from disk. + { + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + assert!(!datamdb.exists()); + assert!(!lockmdb.exists()); + } +} + +#[test] +fn test_simple_migrator_safe_to_lmdb() { + let root = Builder::new().prefix("test_simple_migrator_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Populate source environment and persist to disk. + { + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + // Check if the files were written to disk. + { + let mut safebin = root.path().to_path_buf(); + safebin.push("data.safe.bin"); + assert!(safebin.exists()); + } + // Verify that database was written to disk. + { + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + let store = src_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = src_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Open and migrate. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Verify that the database was indeed migrated. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Check if the old files were deleted from disk. + { + let mut safebin = root.path().to_path_buf(); + safebin.push("data.safe.bin"); + assert!(!safebin.exists()); + } +} + +#[test] +fn test_migrator_round_trip() { + let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Populate source environment and persist to disk. + { + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + // Open and migrate. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Open and migrate back. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Verify that the database was indeed migrated twice. + { + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Check if the right files are finally present on disk. + { + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); + assert!(lockmdb.exists()); + assert!(!safebin.exists()); + } +} + +#[test] +fn test_migrator_no_dir_1() { + let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // This won't fail with IoError even though the path is a bogus path, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::easy_migrate_lmdb_to_safe_mode(Path::new("bogus"), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(!datamdb.exists()); + assert!(!lockmdb.exists()); + assert!(!safebin.exists()); // safe mode doesn't write an empty db to disk +} + +#[test] +fn test_migrator_no_dir_2() { + let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // This won't fail with IoError even though the path is a bogus path, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::easy_migrate_safe_mode_to_lmdb(Path::new("bogus"), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); // lmdb writes an empty db to disk + assert!(lockmdb.exists()); + assert!(!safebin.exists()); +} + +#[test] +fn test_migrator_invalid_1() { + let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.mdb"); + fs::write(dbfile, "bogus").expect("dbfile created"); + + // This won't fail with FileInvalid even though the database is a bogus file, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); // corrupted db isn't deleted + assert!(lockmdb.exists()); + assert!(!safebin.exists()); +} + +#[test] +fn test_migrator_invalid_2() { + let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.safe.bin"); + fs::write(dbfile, "bogus").expect("dbfile created"); + + // This won't fail with FileInvalid even though the database is a bogus file, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); // lmdb writes an empty db to disk + assert!(lockmdb.exists()); + assert!(safebin.exists()); // corrupted db isn't deleted +} + +#[test] +#[should_panic(expected = "migrated: SourceEmpty")] +fn test_migrator_lmdb_to_safe_1() { + let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); +} + +#[test] +#[should_panic(expected = "migrated: DestinationNotEmpty")] +fn test_migrator_lmdb_to_safe_2() { + let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&dst_env); + Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); +} + +#[test] +fn test_migrator_lmdb_to_safe_3() { + let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); + + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); +} + +#[test] +#[should_panic(expected = "migrated: SourceEmpty")] +fn test_migrator_safe_to_lmdb_1() { + let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); +} + +#[test] +#[should_panic(expected = "migrated: DestinationNotEmpty")] +fn test_migrator_safe_to_lmdb_2() { + let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&dst_env); + Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); +} + +#[test] +fn test_migrator_safe_to_lmdb_3() { + let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::(root.path()).expect("new succeeded"); + Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); + + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); +} diff --git a/third_party/rust/rkv/tests/env-safe.rs b/third_party/rust/rkv/tests/env-safe.rs index 3a82ee7fe2b0..65dad4014e1c 100644 --- a/third_party/rust/rkv/tests/env-safe.rs +++ b/third_party/rust/rkv/tests/env-safe.rs @@ -12,13 +12,16 @@ // deprecates `clippy::cyclomatic_complexity`. #![allow(clippy::complexity)] -use std::fs; -use std::str; -use std::sync::{ - Arc, - RwLock, +use std::{ + fs, + path::Path, + str, + sync::{ + Arc, + RwLock, + }, + thread, }; -use std::thread; use byteorder::{ ByteOrder, @@ -26,14 +29,14 @@ use byteorder::{ }; use tempfile::Builder; -use rkv::backend::{ - BackendEnvironmentBuilder, - SafeMode, - SafeModeDatabase, - SafeModeEnvironment, - SafeModeRwTransaction, -}; use rkv::{ + backend::{ + BackendEnvironmentBuilder, + SafeMode, + SafeModeDatabase, + SafeModeEnvironment, + SafeModeRwTransaction, + }, Rkv, SingleStore, StoreError, @@ -63,7 +66,7 @@ fn test_open_fails_safe() { let pb = nope.to_path_buf(); match Rkv::new::(nope.as_path()).err() { - Some(StoreError::DirectoryDoesNotExistError(p)) => { + Some(StoreError::UnsuitableEnvironmentPath(p)) => { assert_eq!(pb, p); }, _ => panic!("expected error"), @@ -95,10 +98,37 @@ fn test_open_from_builder_safe() { check_rkv(&k); } +#[test] +fn test_open_from_builder_with_dir_safe_1() { + let root = Builder::new().prefix("test_open_from_builder_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + + let mut builder = Rkv::environment_builder::(); + builder.set_max_dbs(2); + builder.set_make_dir_if_needed(true); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")] +fn test_open_from_builder_with_dir_safe_2() { + let root = Path::new("bogus"); + println!("Root path: {:?}", root); + assert!(!root.is_dir()); + + let mut builder = Rkv::environment_builder::(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root, builder).expect("rkv"); + check_rkv(&k); +} + #[test] #[should_panic(expected = "opened: DbsFull")] -fn test_open_with_capacity_safe() { - let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir"); +fn test_create_with_capacity_safe_1() { + let root = Builder::new().prefix("test_create_with_capacity_safe").tempdir().expect("tempdir"); println!("Root path: {:?}", root.path()); fs::create_dir_all(root.path()).expect("dir created"); assert!(root.path().is_dir()); @@ -106,9 +136,103 @@ fn test_open_with_capacity_safe() { let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); check_rkv(&k); + // This errors with "opened: DbsFull" because we specified a capacity of one (database), + // and check_rkv already opened one (plus the default database, which doesn't count + // against the limit). let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened"); } +#[test] +fn test_create_with_capacity_safe_2() { + let root = Builder::new().prefix("test_create_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This doesn't error with "opened: DbsFull" because even though we specified a capacity + // of one (database), and check_rkv already opened one, the default database doesn't + // count against the limit). + let _zzz = k.open_single(None, StoreOptions::create()).expect("opened"); +} + +#[test] +#[should_panic(expected = "opened: SafeModeError(DbNotFoundError)")] +fn test_open_with_capacity_safe_1() { + let root = Builder::new().prefix("test_open_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single("zzz", StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_open_with_capacity_safe_2() { + let root = Builder::new().prefix("test_open_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single(None, StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_list_dbs_safe_1() { + let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let mut dbs = k.get_dbs().unwrap(); + dbs.sort(); + assert_eq!(dbs, vec![None, Some("s".to_owned())]); +} + +#[test] +fn test_list_dbs_safe_2() { + let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 2).expect("rkv"); + check_rkv(&k); + + let _ = k.open_single("zzz", StoreOptions::create()).expect("opened"); + + let mut dbs = k.get_dbs().unwrap(); + dbs.sort(); + assert_eq!(dbs, vec![None, Some("s".to_owned()), Some("zzz".to_owned())]); +} + +#[test] +fn test_list_dbs_safe_3() { + let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::(root.path(), 0).expect("rkv"); + + let _ = k.open_single(None, StoreOptions::create()).expect("opened"); + + let mut dbs = k.get_dbs().unwrap(); + dbs.sort(); + assert_eq!(dbs, vec![None]); +} + #[test] fn test_round_trip_and_transactions_safe() { let root = Builder::new().prefix("test_round_trip_and_transactions_safe").tempdir().expect("tempdir"); @@ -276,9 +400,9 @@ fn test_multi_put_get_del_safe() { { let mut iter = multistore.get(&writer, "str1").unwrap(); let (id, val) = iter.next().unwrap().unwrap(); - assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 bar")))); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar"))); let (id, val) = iter.next().unwrap().unwrap(); - assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 foo")))); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo"))); } writer.commit().unwrap(); @@ -609,22 +733,22 @@ fn test_iter_safe() { let mut iter = sk.iter_start(&reader).unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Some(Value::Bool(true))); + assert_eq!(val, Value::Bool(true)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Some(Value::Str("héllo, yöu"))); + assert_eq!(val, Value::Str("héllo, yöu")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Some(Value::I64(1234))); + assert_eq!(val, Value::I64(1234)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Some(Value::Str("Emil.RuleZ!"))); + assert_eq!(val, Value::Str("Emil.RuleZ!")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterators don't loop. Once one returns None, additional calls @@ -636,10 +760,10 @@ fn test_iter_safe() { let mut iter = sk.iter_from(&reader, "moo").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Reader.iter_from() works as expected when the given key is a prefix @@ -647,10 +771,10 @@ fn test_iter_safe() { let mut iter = sk.iter_from(&reader, "no").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); } @@ -746,84 +870,84 @@ fn test_multiple_store_iter_safe() { let mut iter = s1.iter_start(&reader).unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Some(Value::Bool(true))); + assert_eq!(val, Value::Bool(true)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Some(Value::Str("héllo, yöu"))); + assert_eq!(val, Value::Str("héllo, yöu")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Some(Value::I64(1234))); + assert_eq!(val, Value::I64(1234)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Some(Value::Str("Emil.RuleZ!"))); + assert_eq!(val, Value::Str("Emil.RuleZ!")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate through the whole store in "s2" let mut iter = s2.iter_start(&reader).unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Some(Value::Bool(true))); + assert_eq!(val, Value::Bool(true)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Some(Value::Str("héllo, yöu"))); + assert_eq!(val, Value::Str("héllo, yöu")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Some(Value::I64(1234))); + assert_eq!(val, Value::I64(1234)); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Some(Value::Str("Emil.RuleZ!"))); + assert_eq!(val, Value::Str("Emil.RuleZ!")); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given key in "s1" let mut iter = s1.iter_from(&reader, "moo").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given key in "s2" let mut iter = s2.iter_from(&reader, "moo").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given prefix in "s1" let mut iter = s1.iter_from(&reader, "no").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); // Iterate from a given prefix in "s2" let mut iter = s2.iter_from(&reader, "no").unwrap(); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Some(Value::F64(1234.0.into()))); + assert_eq!(val, Value::F64(1234.0.into())); let (key, val) = iter.next().unwrap().unwrap(); assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Some(Value::Str("米克規則"))); + assert_eq!(val, Value::Str("米克規則")); assert!(iter.next().is_none()); } diff --git a/third_party/rust/rkv/tests/integer-store.rs b/third_party/rust/rkv/tests/integer-store.rs index 3a0ca93371d6..680afc1da33d 100644 --- a/third_party/rust/rkv/tests/integer-store.rs +++ b/third_party/rust/rkv/tests/integer-store.rs @@ -8,14 +8,15 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #![cfg(feature = "db-int-key")] +#![allow(clippy::many_single_char_names)] use std::fs; use serde_derive::Serialize; use tempfile::Builder; -use rkv::backend::Lmdb; use rkv::{ + backend::Lmdb, PrimitiveInt, Rkv, StoreOptions, diff --git a/third_party/rust/rkv/tests/manager.rs b/third_party/rust/rkv/tests/manager.rs index 221498dd0954..0dee9b9869ef 100644 --- a/third_party/rust/rkv/tests/manager.rs +++ b/third_party/rust/rkv/tests/manager.rs @@ -8,21 +8,26 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. -use std::fs; -use std::sync::Arc; +use std::{ + fs, + sync::Arc, +}; use tempfile::Builder; -use rkv::backend::{ - Lmdb, - LmdbEnvironment, - SafeMode, - SafeModeEnvironment, +use rkv::{ + backend::{ + Lmdb, + LmdbEnvironment, + SafeMode, + SafeModeEnvironment, + }, + Rkv, }; -use rkv::Rkv; /// Test that a manager can be created with simple type inference. #[test] +#[allow(clippy::let_underscore_lock)] fn test_simple() { type Manager = rkv::Manager; @@ -31,6 +36,7 @@ fn test_simple() { /// Test that a manager can be created with simple type inference. #[test] +#[allow(clippy::let_underscore_lock)] fn test_simple_safe() { type Manager = rkv::Manager; diff --git a/third_party/rust/rkv/tests/multi-integer-store.rs b/third_party/rust/rkv/tests/multi-integer-store.rs index 57111f0f92d7..204d52f93d36 100644 --- a/third_party/rust/rkv/tests/multi-integer-store.rs +++ b/third_party/rust/rkv/tests/multi-integer-store.rs @@ -8,14 +8,15 @@ // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #![cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] +#![allow(clippy::many_single_char_names)] use std::fs; use serde_derive::Serialize; use tempfile::Builder; -use rkv::backend::Lmdb; use rkv::{ + backend::Lmdb, PrimitiveInt, Rkv, StoreOptions, @@ -41,7 +42,7 @@ fn test_multi_integer_keys() { .get(&writer, $key) .expect("read") .map(|result| result.expect("ok")) - .map(|(_, v)| v.expect("multi read")) + .map(|(_, v)| v) .collect::>(); assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]); writer.commit().expect("committed"); @@ -51,7 +52,7 @@ fn test_multi_integer_keys() { .get(&reader, $key) .expect("read") .map(|result| result.expect("ok")) - .map(|(_, v)| v.expect("multi read")) + .map(|(_, v)| v) .collect::>(); assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]); }}; diff --git a/third_party/rust/rkv/tests/test_txn.rs b/third_party/rust/rkv/tests/test_txn.rs index 15e2e961da1f..8e63924e0f0e 100644 --- a/third_party/rust/rkv/tests/test_txn.rs +++ b/third_party/rust/rkv/tests/test_txn.rs @@ -13,13 +13,13 @@ use std::fs; use tempfile::Builder; -use rkv::backend::{ - Lmdb, - LmdbDatabase, - LmdbRoCursor, - LmdbRwTransaction, -}; use rkv::{ + backend::{ + Lmdb, + LmdbDatabase, + LmdbRoCursor, + LmdbRwTransaction, + }, Readable, Rkv, StoreOptions, @@ -33,10 +33,10 @@ use rkv::{ /// value: String, /// date: String, /// } -/// We would like to index all of the fields so that we can search for the struct not only by ID -/// but also by value and date. When we index the fields individually in their own tables, it -/// is important that we run all operations within a single transaction to ensure coherence of -/// the indices. +/// We would like to index all of the fields so that we can search for the struct not only +/// by ID but also by value and date. When we index the fields individually in their own +/// tables, it is important that we run all operations within a single transaction to +/// ensure coherence of the indices. /// This test features helper functions for reading and writing the parts of the struct. /// Note that the reader functions take `Readable` because they might run within a Read /// Transaction or a Write Transaction. The test demonstrates fetching values via both. @@ -97,9 +97,11 @@ where store .get(txn, field) .expect("get iterator") - .map(|id| match id.expect("field") { - (_, Some(Value::U64(id))) => id, - _ => panic!("getting value in iter"), + .map(|id| { + match id.expect("field") { + (_, Value::U64(id)) => id, + _ => panic!("getting value in iter"), + } }) .collect::>() }