Backed out changeset e99833064d36 (bug 1529681) for build bustages on a CLOSED TREE

This commit is contained in:
Andreea Pavel 2019-02-25 18:44:17 +02:00
parent 906d3332da
commit 6d82da21f3
83 changed files with 914 additions and 11595 deletions

40
Cargo.lock generated
View File

@ -1,5 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "Inflector"
version = "0.11.2"
@ -162,7 +164,7 @@ dependencies = [
name = "baldrdash"
version = "0.1.0"
dependencies = [
"bindgen 0.47.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bindgen 0.43.2 (registry+https://github.com/rust-lang/crates.io-index)",
"cranelift-codegen 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cranelift-wasm 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -215,21 +217,20 @@ dependencies = [
[[package]]
name = "bindgen"
version = "0.47.2"
version = "0.43.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clang-sys 0.26.4 (registry+https://github.com/rust-lang/crates.io-index)",
"clang-sys 0.26.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"which 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"which 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -390,7 +391,7 @@ dependencies = [
[[package]]
name = "clang-sys"
version = "0.26.4"
version = "0.26.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1200,15 +1201,6 @@ dependencies = [
"tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hashbrown"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hashglobe"
version = "0.1.0"
@ -1310,7 +1302,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "js"
version = "0.1.4"
dependencies = [
"bindgen 0.47.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bindgen 0.43.2 (registry+https://github.com/rust-lang/crates.io-index)",
"cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2516,7 +2508,7 @@ dependencies = [
"app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bindgen 0.47.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bindgen 0.43.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.25.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3172,10 +3164,9 @@ dependencies = [
[[package]]
name = "which"
version = "2.0.1"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -3312,7 +3303,7 @@ dependencies = [
"checksum base64 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "621fc7ecb8008f86d7fb9b95356cd692ce9514b80a86d85b397f32a22da7b9e2"
"checksum binary-space-partition 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "88ceb0d16c4fd0e42876e298d7d3ce3780dd9ebdcbe4199816a32c77e08597ff"
"checksum bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bda13183df33055cbb84b847becce220d392df502ebe7a4a78d7021771ed94d0"
"checksum bindgen 0.47.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2162f593f5f3636eb727875b3fb93a4448dd41f84de5f228c9f4c9f5fc873270"
"checksum bindgen 0.43.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d52d263eacd15d26cbcf215d254b410bd58212aaa2d3c453a04b2d3b3adcf41"
"checksum binjs_meta 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "430239e4551e42b80fa5d92322ac80ea38c9dda56e5d5582e057e2288352b71a"
"checksum bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f1efcc46c18245a69c38fcc5cc650f16d3a59d034f3106e9ed63748f695730a"
"checksum bit-vec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4440d5cb623bb7390ae27fec0bb6c61111969860f8e3ae198bfa0663645e67cf"
@ -3335,7 +3326,7 @@ dependencies = [
"checksum cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fc0086be9ca82f7fc89fc873435531cb898b86e850005850de1f820e2db6e9b"
"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
"checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878"
"checksum clang-sys 0.26.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ef0c1bcf2e99c649104bd7a7012d8f8802684400e03db0ec0af48583c6fa0e4"
"checksum clang-sys 0.26.1 (registry+https://github.com/rust-lang/crates.io-index)" = "481e42017c1416b1c0856ece45658ecbb7c93d8a93455f7e5fa77f3b35455557"
"checksum clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f0f16b89cbb9ee36d87483dc939fe9f1e13c05898d56d7b230a0d4dff033a536"
"checksum cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "56d741ea7a69e577f6d06b36b7dff4738f680593dc27a701ffa8506b73ce28bb"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
@ -3411,7 +3402,6 @@ dependencies = [
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
"checksum goblin 0.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "5911d7df7b8f65ab676c5327b50acea29d3c6a1a4ad05e444cf5dce321b26db2"
"checksum h2 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "a27e7ed946e8335bdf9a191bc1b9b14a03ba822d013d2f58437f4fabcbd7fc2c"
"checksum hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da"
"checksum http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "dca621d0fa606a5ff2850b6e337b57ad6137ee4d67e940449643ff45af6874c6"
"checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07"
"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e"
@ -3587,7 +3577,7 @@ dependencies = [
"checksum want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "797464475f30ddb8830cc529aaaae648d581f99e2036a928877dfde027ddf6b3"
"checksum wasmparser 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b5e01c420bc7d36e778bd242e1167b079562ba8b34087122cc9057187026d060"
"checksum webidl 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0f807f7488d680893f7188aa09d7672a3a0a8461975a098a2edf0a52e3fee29"
"checksum which 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b57acb10231b9493c8472b20cb57317d0679a49e0bdbee44b3b803a6473af164"
"checksum which 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4be6cfa54dab45266e98b5d7be2f8ce959ddd49abd141a05d52dce4b07f803bb"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)" = "<none>"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"

View File

@ -7,7 +7,7 @@ license = "MPL-2.0"
[build-dependencies]
env_logger = {version = "0.5", default-features = false} # disable `regex` to reduce code size
bindgen = {version = "0.47", default-features = false} # disable `logging` to reduce code size
bindgen = {version = "0.43", default-features = false} # disable `logging` to reduce code size
cmake = "0.1"
glob = "0.2.11"

View File

@ -15,7 +15,7 @@ log = { version = "0.4.6", default-features = false, features = ["release_max_le
env_logger = "0.5.6"
[build-dependencies]
bindgen = {version = "0.47", default-features = false} # disable `logging` to reduce code size
bindgen = {version = "0.43", default-features = false} # disable `logging` to reduce code size
# Uncomment this to enable perf support in release mode.
#[profile.release]

View File

@ -78,7 +78,7 @@ void = "1.0.2"
[build-dependencies]
lazy_static = "1"
log = "0.4"
bindgen = {version = "0.47", optional = true, default-features = false}
bindgen = { version = "0.43", optional = true, default-features = false }
regex = {version = "1.0", optional = true}
walkdir = "2.1.4"
toml = {version = "0.4.5", optional = true, default-features = false}

View File

@ -1 +1 @@
{"files":{"Cargo.toml":"72ad97e8fe0d94862507c796dbc9f4d36aaaa3aa4609a1f827b9936a8800fb37","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"0b50adc1da2d15211d61cab2ff8b9f1e8eccc37ae25695ba7a0c21b77389aa4c","build.rs":"a9f6915c54d75f357ce32f96327bf4df53dc81a505b70831978f9dac6f43841d","src/callbacks.rs":"b24d7982332c6a35928f134184ddf4072fe4545a45546b97b9b0e0c1fbb77c08","src/clang.rs":"4afb2865ac815c72b613a5ca4cb1289218d4e08abc6345b3e250023d7d23c0fb","src/codegen/bitfield_unit.rs":"88b0604322dc449fc9284850eadc1f5d14b42fa747d4258bae0b6b9535f52dfd","src/codegen/bitfield_unit_tests.rs":"2073ac6a36e0bc9afaef5b1207966817c8fb7a1a9f6368c3b1b8f79822efbfba","src/codegen/error.rs":"2613af1d833377fd4a70719f4a09951d9d45dc9227827b9a2a938a1bcaaea2dd","src/codegen/helpers.rs":"58cd5ad72425f766eb0560528a3953b05c58bc35b6637a331cd7403fdc0b3116","src/codegen/impl_debug.rs":"19a8f75a1513efb523f33fd02246976e81842d38f249261b0d1a671365f78caf","src/codegen/impl_partialeq.rs":"d40d9ee2849c4d3d557b033c4d3af5e6de4a44347f67c0f016198086338811af","src/codegen/mod.rs":"1a3f1fd56572e1f7257995c0f3fc92c6b0565a7feb35df509f0f9c92349cf64c","src/codegen/struct_layout.rs":"43132726f981b2d90f957fa6a0909fe48c07ca3e19b83886f24c876a33c61848","src/extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","src/features.rs":"288202e0d8a330a0fc7be624a0bce586d00f00733ccff608e61a71a08f37a2ae","src/ir/analysis/derive_copy.rs":"a4c0db650162ac9353051dd6718edab9ed8f67348c8c67e3202cc3cf05b84cac","src/ir/analysis/derive_debug.rs":"395912b60c701e5214425ee9c371da1af51c4c7934cb2d795a262752553b5caa","src/ir/analysis/derive_default.rs":"c2e44d72b53eb61376ba281696f2d12e9dea30dee4c5655c538b20f3927d1ee8","src/ir/analysis/derive_hash.rs":"08972280c609a5018b2fc318f71b0542b2e1a0e05d4978add3edb25852744705","src/ir/analysis/derive_partialeq_or_partialord.rs":"cace3eb52fabb2c6441e2b91697caa785b40360e0719417138b46f5005900941","src/ir/analysis/has_destructor.rs":"63644f479738df35e531d3324ff892614083c3656e0747aa34d9f20dada878ec","src/ir/analysis/has_float.rs":"76162a309e4285a806755a08c687a3e7bc894a100a63da4e88584035e215b11d","src/ir/analysis/has_type_param_in_array.rs":"fdbc0af28a144c88ea2de83e6e6da5e1ffb40e3dd63fd7a708095d085bb06f94","src/ir/analysis/has_vtable.rs":"5788372d27bdbaaf0454bc17be31a5480918bc41a8a1c4832e8c61185c07f9cd","src/ir/analysis/mod.rs":"532213b1ac2f995b41e44e9c2dbd607eeacebfdf2543d45251df3b4b7c32477f","src/ir/analysis/sizedness.rs":"8dc10043d872e68e660ef96edca4d9733f95be45cdad4893462fa929b335014f","src/ir/analysis/template_params.rs":"6312c008bbc80f50e72a766756c8daddea0b6eeb31ec924b83a231df931e170e","src/ir/annotations.rs":"39a5ab19f4d5dfa617577e4a0d0d2b67b5369d480c7cca4b14d172458c9843f0","src/ir/comment.rs":"c48abe01c5af0f09f583a89f1394bc6c161b40f6c8f0f600bbfe3c907b47969b","src/ir/comp.rs":"67bb94ab81c731739295a7ae91f29326c909c4635abc41c09c714ebfca887494","src/ir/context.rs":"6c382f28d4d5e5019fc328c289a66d36c739833419e6a6a7112c35b293bb6ee7","src/ir/derive.rs":"19601e76528d6cce8e04a66572e75da4e9efdecc4d60a983fc68c11958e9f3ec","src/ir/dot.rs":"95ed2968fc3239d87892e9f1edf1ed6dd18630d949564961765967ea1d16960c","src/ir/enum_ty.rs":"9cc242d6b3c1866665594e8b306860ee39c0ea42d22198d46b7fded473fe3e84","src/ir/function.rs":"b0b7355b5ad5fb6e5bf783ae0984ddbcf6d9c16bbbb63334f21e3649957d60b9","src/ir/int.rs":"07e0c7dbd2dd977177fae3acd2a14adf271c6cf9ff4b57cddc11d50734fd4801","src/ir/item.rs":"4dd447d43ff09bd424e518395340ebc2cb3b1819cef318124a359ca2ce59481a","src/ir/item_kind.rs":"dbeae8c4fd0e5c9485d325aea040e056a1f2cd6d43fc927dee8fe1c0c59a7197","src/ir/layout.rs":"e722edffcd34914b534813da5af6fe8ba69927a54e0ec88ae1733f5ddf0e50b1","src/ir/mod.rs":"2eae90f207fad2e45957ec9287064992a419e3fc916aba84faff2ea25cbeb5ee","src/ir/module.rs":"c4d90bf38fe3672e01923734ccbdb7951ea929949d5f413a9c2aee12395a5094","src/ir/objc.rs":"758aa955a0c5d6ad82606c88a1f4cd1d93e666b71e82d43b18b1aaae96cf888a","src/ir/template.rs":"c0f8570b927dfd6a421fc4ce3094ec837a3ed936445225dbfac961e8e0842ae5","src/ir/traversal.rs":"ea751379a5aec02f93f8d2c61e18232776b1f000dbeae64b9a7195ba21a19dd6","src/ir/ty.rs":"1068a7e4916d69b5034a76c47b67e6257db906cc16dad6d8af4bdb39ad52cd84","src/ir/var.rs":"8bdafb6d02f2c55ae11c28d88b19fb7a65ba8466da12ff039ae4c16c790b291e","src/lib.rs":"11afe51d093bce1cedfcdfc45d150816f4d6379f30dafe190104b9a2f106f9cf","src/log_stubs.rs":"6dfdd908b7c6453da416cf232893768f9480e551ca4add0858ef88bf71ee6ceb","src/main.rs":"e519053bcdde6bc88f60f955246a02d53b3db1cc5ccd1612e6675b790b7460b0","src/options.rs":"4aeef74c25fbb1ed0f12b81f3e340078359168d396755458fd4a4bdb0a2b22c0","src/parse.rs":"be7d13cc84fae79ec7b3aa9e77063fa475a48d74a854423e2c72d75006a25202","src/regex_set.rs":"5cb72fc3714c0d79e9e942d003349c0775fafd7cd0c9603c65f5261883bbf9cf","src/time.rs":"3b763e6fee51d0eb01228dfe28bc28a9f692aff73b2a7b90a030902e0238fca6"},"package":"2162f593f5f3636eb727875b3fb93a4448dd41f84de5f228c9f4c9f5fc873270"}
{"files":{"Cargo.toml":"e0559de35f6564bbfc4779f43d9104d604befb7ff7de5baf591379c285544d3c","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"630d1a1d123c131bad0fec23173e263ba8ecc064b5cd8446d4cab7ffd197db45","build.rs":"032a1c51963894a421b0535f9227796d88768ac5f665a81d2edced69dc6d106a","src/callbacks.rs":"936198c967ca4205ab043ce2264d8188d0716ad7c294cebdaacde2b486224450","src/clang.rs":"b25f8d455e3cd89d416a4c5e55d828db9691f4def82109c1dd12457e5ca2c13c","src/codegen/bitfield_unit.rs":"88b0604322dc449fc9284850eadc1f5d14b42fa747d4258bae0b6b9535f52dfd","src/codegen/bitfield_unit_tests.rs":"2073ac6a36e0bc9afaef5b1207966817c8fb7a1a9f6368c3b1b8f79822efbfba","src/codegen/error.rs":"2613af1d833377fd4a70719f4a09951d9d45dc9227827b9a2a938a1bcaaea2dd","src/codegen/helpers.rs":"2c890c96a1a6b72ada63593cb544f005476fb176d7181553713e83710dc8eefd","src/codegen/impl_debug.rs":"43b977b8d16073d021977ce57f3c22eb5b1083493905ae19a171e2271939f574","src/codegen/impl_partialeq.rs":"671dd0eac712bf8281e11a7b3e545a443c6e9e2c8ee7fbebeb03c76667ca206b","src/codegen/mod.rs":"57a6c0dc52af70b08f54e744b629df67c5528a8d63ccb9485cc1af91d02dadc0","src/codegen/struct_layout.rs":"b77f03dfbbed408a5fa6e693560aea8dc902fe7d10d847ce39122e6961078515","src/extra_assertions.rs":"449549c4a7a50c3f0b06332452b2fb6c9b23f31ca8e5e1656fe6c7f21e8ef7fa","src/features.rs":"be74e03d4f00582fa8970439da52057b04204b450193833953ed84772933bd46","src/ir/analysis/derive_copy.rs":"b7e12cdc74937909529e4cefe9f43b3ee0a5590f07392b73481811ac9fddedd2","src/ir/analysis/derive_debug.rs":"cf9346ecb3afd4e94094a2723e4d76c76c55f42a13dc1d5ec6564d25d3a46cf4","src/ir/analysis/derive_default.rs":"87332eccd5accbfbf7fad2e1511be4f8945b0538ae3e0628c8af17d16068691f","src/ir/analysis/derive_hash.rs":"521ea1dbe221755042a95e8e8dcb594e427e54be2eb869c61ebbdb27fec5aa77","src/ir/analysis/derive_partialeq_or_partialord.rs":"3c5d051f69401fe50b56143143eca3e71674d6a87d0013c31745b75d0f3d584f","src/ir/analysis/has_destructor.rs":"d9aaaceba580b48eb0df4e5537b34b417c51ccdfeb8f6b72484f3bf4992317fe","src/ir/analysis/has_float.rs":"5f7ee1b834978817041d884fee4648b31ecb66c62aafb8e7a9a17e5ac434bfe5","src/ir/analysis/has_type_param_in_array.rs":"abf74468b923c015aaf67599e50857267516010472819a79ca494fe02dd6ac93","src/ir/analysis/has_vtable.rs":"37765e954ef792e369a58ccfe1d827a00fe9bce680466da1d6523671b94b6c92","src/ir/analysis/mod.rs":"ea5ace45c77e855674bb565ba0fef556f60e3293b0ddcf11d3a5a6ec15ab0648","src/ir/analysis/sizedness.rs":"4f788bff0ceb0e008d70145510340ab636e5203787316f0be41f789ce9b2f73d","src/ir/analysis/template_params.rs":"6554dd1240142ec0e7299e678b696725f5cba99243d1c3d1cbf58d4764082fd6","src/ir/annotations.rs":"ef106afcbe6084c18bd13a37ee3c1cdc9596bfb055db8c773d81f8f15fec3208","src/ir/comment.rs":"1b068d5834da7360aec4cb80d9c55219cedbb2ae8b9727a39ec7d156c88fe0b5","src/ir/comp.rs":"7b22f3ff19ca45a6fbfe7ea015109d43f4ddf65b33b47b1c37829fcb87cdff9b","src/ir/context.rs":"925ba08ad614c40b0578be524206a662aec53f959d47d3b2cc84389718fc485b","src/ir/derive.rs":"19601e76528d6cce8e04a66572e75da4e9efdecc4d60a983fc68c11958e9f3ec","src/ir/dot.rs":"d01f1621ab67e368d854a82bd6bb0b8dd52f3c2c733de8eaf81aece9543818cb","src/ir/enum_ty.rs":"9cc242d6b3c1866665594e8b306860ee39c0ea42d22198d46b7fded473fe3e84","src/ir/function.rs":"c497a6e07e95dc65be73f12396e344929973243d5cf7808a97c5309b0b090ef8","src/ir/int.rs":"07e0c7dbd2dd977177fae3acd2a14adf271c6cf9ff4b57cddc11d50734fd4801","src/ir/item.rs":"d626a0054df8254a504b44019dc531a933ec1bd3961b1465a602f0d767e0ad4e","src/ir/item_kind.rs":"dbeae8c4fd0e5c9485d325aea040e056a1f2cd6d43fc927dee8fe1c0c59a7197","src/ir/layout.rs":"e722edffcd34914b534813da5af6fe8ba69927a54e0ec88ae1733f5ddf0e50b1","src/ir/mod.rs":"2eae90f207fad2e45957ec9287064992a419e3fc916aba84faff2ea25cbeb5ee","src/ir/module.rs":"c4d90bf38fe3672e01923734ccbdb7951ea929949d5f413a9c2aee12395a5094","src/ir/objc.rs":"828a890acdc8b10c44e69e2ed4a4f5d8c0e734606d3a8cc71658dcf43a49acf4","src/ir/template.rs":"c0f8570b927dfd6a421fc4ce3094ec837a3ed936445225dbfac961e8e0842ae5","src/ir/traversal.rs":"ea751379a5aec02f93f8d2c61e18232776b1f000dbeae64b9a7195ba21a19dd6","src/ir/ty.rs":"1068a7e4916d69b5034a76c47b67e6257db906cc16dad6d8af4bdb39ad52cd84","src/ir/var.rs":"5c0caaa505faef18e334c6198b3634b6f390d14cf9da629226cd78617fd3594b","src/lib.rs":"994d8495557cadc8c4a748e2643b35c6850f2c7130e35c8abf4ae02b83cfeff7","src/log_stubs.rs":"6dfdd908b7c6453da416cf232893768f9480e551ca4add0858ef88bf71ee6ceb","src/main.rs":"e519053bcdde6bc88f60f955246a02d53b3db1cc5ccd1612e6675b790b7460b0","src/options.rs":"a4b4028542d6292363fc97621c704bf1b4e7eb149e9cb86b52e30aad0be13b99","src/parse.rs":"be7d13cc84fae79ec7b3aa9e77063fa475a48d74a854423e2c72d75006a25202","src/regex_set.rs":"c417889726b5e3325f9375551bf23fd54c9b40020151c364741ea6126ede386b","src/time.rs":"3b763e6fee51d0eb01228dfe28bc28a9f692aff73b2a7b90a030902e0238fca6"},"package":"6d52d263eacd15d26cbcf215d254b410bd58212aaa2d3c453a04b2d3b3adcf41"}

View File

@ -12,18 +12,18 @@
[package]
name = "bindgen"
version = "0.47.2"
version = "0.43.2"
authors = ["Jyun-Yan You <jyyou.tw@gmail.com>", "Emilio Cobos Álvarez <emilio@crisal.io>", "Nick Fitzgerald <fitzgen@gmail.com>", "The Servo project developers"]
build = "build.rs"
include = ["LICENSE", "README.md", "Cargo.toml", "build.rs", "src/*.rs", "src/**/*.rs"]
description = "Automatically generates Rust FFI bindings to C and C++ libraries."
homepage = "https://rust-lang.github.io/rust-bindgen/"
homepage = "https://rust-lang-nursery.github.io/rust-bindgen/"
documentation = "https://docs.rs/bindgen"
readme = "README.md"
keywords = ["bindings", "ffi", "code-generation"]
categories = ["external-ffi-bindings", "development-tools::ffi"]
license = "BSD-3-Clause"
repository = "https://github.com/rust-lang/rust-bindgen"
repository = "https://github.com/rust-lang-nursery/rust-bindgen"
[lib]
path = "src/lib.rs"
@ -42,7 +42,7 @@ version = "0.3.3"
version = "0.1.0"
[dependencies.clang-sys]
version = "0.26.4"
version = "0.26"
features = ["runtime", "clang_6_0"]
[dependencies.clap]
@ -52,9 +52,6 @@ version = "2"
version = "0.6"
optional = true
[dependencies.hashbrown]
version = "0.1"
[dependencies.lazy_static]
version = "1"
@ -66,18 +63,18 @@ optional = true
version = "0.1.2"
[dependencies.proc-macro2]
version = "0.4"
version = "0.3.2, < 0.3.6"
default-features = false
[dependencies.quote]
version = "0.6"
version = "0.5"
default-features = false
[dependencies.regex]
version = "1.0"
[dependencies.which]
version = "2.0"
version = "1.0.2"
[dev-dependencies.clap]
version = "2"
@ -98,4 +95,4 @@ testing_only_libclang_3_9 = []
testing_only_libclang_4 = []
testing_only_libclang_5 = []
[badges.travis-ci]
repository = "rust-lang/rust-bindgen"
repository = "rust-lang-nursery/rust-bindgen"

View File

@ -1,5 +1,7 @@
# `bindgen`
[`impl period`](https://blog.rust-lang.org/2017/09/18/impl-future-for-rust.html) has been started! Join us at [Gitter.im](https://gitter.im/rust-impl-period/WG-dev-tools-bindgen).
**`bindgen` automatically generates Rust FFI bindings to C (and some C++) libraries.**
For example, given the C header `doggo.h`:
@ -32,7 +34,7 @@ extern "C" {
## Users Guide
[📚 Read the `bindgen` users guide here! 📚](https://rust-lang.github.io/rust-bindgen)
[📚 Read the `bindgen` users guide here! 📚](https://rust-lang-nursery.github.io/rust-bindgen)
## API Reference

View File

@ -56,7 +56,7 @@ mod testgen {
dst,
"test_header!(header_{}, {:?});",
func,
entry.path(),
entry.path()
).unwrap();
}
_ => {}

View File

@ -35,11 +35,6 @@ pub trait ParseCallbacks: fmt::Debug + UnwindSafe {
None
}
/// This will be run on every string macro. The callback can not influence the further
/// treatment of the macro, but may use the value to generate additional code or configuration.
fn str_macro(&self, _name: &str, _value: &[u8]) {
}
/// This function should return whether, given an enum variant
/// name, and value, this enum variant will forcibly be a constant.
fn enum_variant_behavior(

View File

@ -499,29 +499,6 @@ impl Cursor {
}
}
/// Does this cursor have the given simple attribute?
///
/// Note that this will only work for attributes that don't have an existing libclang
/// CursorKind, e.g. pure, const, etc.
pub fn has_simple_attr(&self, attr: &str) -> bool {
let mut found_attr = false;
self.visit(|cur| {
if cur.kind() == CXCursor_UnexposedAttr {
found_attr = cur.tokens().iter().any(|t| {
t.kind == CXToken_Identifier && t.spelling() == attr.as_bytes()
});
if found_attr {
return CXChildVisit_Break;
}
}
CXChildVisit_Continue
});
found_attr
}
/// Given that this cursor's referent is a `typedef`, get the `Type` that is
/// being aliased.
pub fn typedef_type(&self) -> Option<Type> {
@ -550,27 +527,33 @@ impl Cursor {
/// Given that this cursor's referent is a function, return cursors to its
/// parameters.
///
/// Returns None if the cursor's referent is not a function/method call or
/// declaration.
pub fn args(&self) -> Option<Vec<Cursor>> {
// XXX: We might want to use and keep num_args
// match self.kind() {
// CXCursor_FunctionDecl |
// CXCursor_CXXMethod => {
self.num_args().ok().map(|num| {
(0..num).map(|i| {
Cursor {
x: unsafe { clang_Cursor_getArgument(self.x, i as c_uint) },
unsafe {
let w = clang_Cursor_getNumArguments(self.x);
if w == -1 {
None
} else {
let num = w as u32;
let mut args = vec![];
for i in 0..num {
args.push(Cursor {
x: clang_Cursor_getArgument(self.x, i as c_uint),
});
}
})
.collect()
})
Some(args)
}
}
}
/// Given that this cursor's referent is a function/method call or
/// declaration, return the number of arguments it takes.
///
/// Returns Err if the cursor's referent is not a function/method call or
/// Returns -1 if the cursor's referent is not a function/method call or
/// declaration.
pub fn num_args(&self) -> Result<u32, ()> {
unsafe {
@ -645,126 +628,64 @@ impl Cursor {
}
/// Gets the tokens that correspond to that cursor.
pub fn tokens(&self) -> RawTokens {
RawTokens::new(self)
pub fn tokens(&self) -> Option<Vec<Token>> {
let range = self.extent();
let mut tokens = vec![];
unsafe {
let tu = clang_Cursor_getTranslationUnit(self.x);
let mut token_ptr = ptr::null_mut();
let mut num_tokens: c_uint = 0;
clang_tokenize(tu, range, &mut token_ptr, &mut num_tokens);
if token_ptr.is_null() {
return None;
}
let token_array =
slice::from_raw_parts(token_ptr, num_tokens as usize);
for &token in token_array.iter() {
let kind = clang_getTokenKind(token);
let spelling =
cxstring_into_string(clang_getTokenSpelling(tu, token));
tokens.push(Token {
kind: kind,
spelling: spelling,
});
}
clang_disposeTokens(tu, token_ptr, num_tokens);
}
Some(tokens)
}
/// Gets the tokens that correspond to that cursor as `cexpr` tokens.
pub fn cexpr_tokens(self) -> Vec<cexpr::token::Token> {
pub fn cexpr_tokens(self) -> Option<Vec<cexpr::token::Token>> {
use cexpr::token;
self.tokens().iter().filter_map(|token| {
let kind = match token.kind {
CXToken_Punctuation => token::Kind::Punctuation,
CXToken_Literal => token::Kind::Literal,
CXToken_Identifier => token::Kind::Identifier,
CXToken_Keyword => token::Kind::Keyword,
// NB: cexpr is not too happy about comments inside
// expressions, so we strip them down here.
CXToken_Comment => return None,
_ => {
error!("Found unexpected token kind: {:?}", token);
return None;
}
};
self.tokens().map(|tokens| {
tokens
.into_iter()
.filter_map(|token| {
let kind = match token.kind {
CXToken_Punctuation => token::Kind::Punctuation,
CXToken_Literal => token::Kind::Literal,
CXToken_Identifier => token::Kind::Identifier,
CXToken_Keyword => token::Kind::Keyword,
// NB: cexpr is not too happy about comments inside
// expressions, so we strip them down here.
CXToken_Comment => return None,
_ => {
error!("Found unexpected token kind: {:?}", token);
return None;
}
};
Some(token::Token {
kind,
raw: token.spelling().to_vec().into_boxed_slice(),
})
}).collect()
}
}
/// A struct that owns the tokenizer result from a given cursor.
pub struct RawTokens<'a> {
cursor: &'a Cursor,
tu: CXTranslationUnit,
tokens: *mut CXToken,
token_count: c_uint,
}
impl<'a> RawTokens<'a> {
fn new(cursor: &'a Cursor) -> Self {
let mut tokens = ptr::null_mut();
let mut token_count = 0;
let range = cursor.extent();
let tu = unsafe {
clang_Cursor_getTranslationUnit(cursor.x)
};
unsafe { clang_tokenize(tu, range, &mut tokens, &mut token_count) };
Self { cursor, tu, tokens, token_count }
}
fn as_slice(&self) -> &[CXToken] {
if self.tokens.is_null() {
return &[];
}
unsafe { slice::from_raw_parts(self.tokens, self.token_count as usize) }
}
/// Get an iterator over these tokens.
pub fn iter(&self) -> ClangTokenIterator {
ClangTokenIterator {
tu: self.tu,
raw: self.as_slice().iter(),
}
}
}
impl<'a> Drop for RawTokens<'a> {
fn drop(&mut self) {
if !self.tokens.is_null() {
unsafe {
clang_disposeTokens(self.tu, self.tokens, self.token_count as c_uint);
}
}
}
}
/// A raw clang token, that exposes only the kind and spelling. This is a
/// slightly more convenient version of `CXToken` which owns the spelling
/// string.
#[derive(Debug)]
pub struct ClangToken {
spelling: CXString,
/// The kind of token, this is the same as the relevant member from
/// `CXToken`.
pub kind: CXTokenKind,
}
impl ClangToken {
/// Get the token spelling, without being converted to utf-8.
pub fn spelling(&self) -> &[u8] {
let c_str = unsafe {
CStr::from_ptr(clang_getCString(self.spelling) as *const _)
};
c_str.to_bytes()
}
}
impl Drop for ClangToken {
fn drop(&mut self) {
unsafe { clang_disposeString(self.spelling) }
}
}
/// An iterator over a set of Tokens.
pub struct ClangTokenIterator<'a> {
tu: CXTranslationUnit,
raw: slice::Iter<'a, CXToken>,
}
impl<'a> Iterator for ClangTokenIterator<'a> {
type Item = ClangToken;
fn next(&mut self) -> Option<Self::Item> {
let raw = self.raw.next()?;
unsafe {
let kind = clang_getTokenKind(*raw);
let spelling = clang_getTokenSpelling(self.tu, *raw);
Some(ClangToken { kind, spelling })
}
Some(token::Token {
kind: kind,
raw: token.spelling.into_bytes().into_boxed_slice(),
})
})
.collect::<Vec<_>>()
})
}
}
@ -931,37 +852,18 @@ impl Type {
unsafe { clang_isConstQualifiedType(self.x) != 0 }
}
#[inline]
fn is_non_deductible_auto_type(&self) -> bool {
self.kind() == CXType_Auto && self.canonical_type() == *self
}
#[inline]
fn clang_size_of(&self) -> c_longlong {
if self.is_non_deductible_auto_type() {
return -6; // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813
}
unsafe { clang_Type_getSizeOf(self.x) }
}
#[inline]
fn clang_align_of(&self) -> c_longlong {
if self.is_non_deductible_auto_type() {
return -6; // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813
}
unsafe { clang_Type_getAlignOf(self.x) }
}
/// What is the size of this type? Paper over invalid types by returning `0`
/// for them.
pub fn size(&self) -> usize {
let val = self.clang_size_of();
if val < 0 { 0 } else { val as usize }
unsafe {
let val = clang_Type_getSizeOf(self.x);
if val < 0 { 0 } else { val as usize }
}
}
/// What is the size of this type?
pub fn fallible_size(&self) -> Result<usize, LayoutError> {
let val = self.clang_size_of();
let val = unsafe { clang_Type_getSizeOf(self.x) };
if val < 0 {
Err(LayoutError::from(val as i32))
} else {
@ -972,17 +874,21 @@ impl Type {
/// What is the alignment of this type? Paper over invalid types by
/// returning `0`.
pub fn align(&self) -> usize {
let val = self.clang_align_of();
if val < 0 { 0 } else { val as usize }
unsafe {
let val = clang_Type_getAlignOf(self.x);
if val < 0 { 0 } else { val as usize }
}
}
/// What is the alignment of this type?
pub fn fallible_align(&self) -> Result<usize, LayoutError> {
let val = self.clang_align_of();
if val < 0 {
Err(LayoutError::from(val as i32))
} else {
Ok(val as usize)
unsafe {
let val = clang_Type_getAlignOf(self.x);
if val < 0 {
Err(LayoutError::from(val as i32))
} else {
Ok(val as usize)
}
}
}
@ -1026,31 +932,6 @@ impl Type {
})
}
/// Given that this type is a function prototype, return the types of its parameters.
///
/// Returns None if the type is not a function prototype.
pub fn args(&self) -> Option<Vec<Type>> {
self.num_args().ok().map(|num| {
(0..num).map(|i| {
Type {
x: unsafe { clang_getArgType(self.x, i as c_uint) },
}
})
.collect()
})
}
/// Given that this type is a function prototype, return the number of arguments it takes.
///
/// Returns Err if the type is not a function prototype.
pub fn num_args(&self) -> Result<u32, ()> {
unsafe {
let w = clang_getNumArgTypes(self.x);
if w == -1 { Err(()) } else { Ok(w as u32) }
}
}
/// Given that this type is a pointer type, return the type that it points
/// to.
pub fn pointee_type(&self) -> Option<Type> {

View File

@ -2,55 +2,52 @@
use ir::context::BindgenContext;
use ir::layout::Layout;
use proc_macro2::{Ident, Span, TokenStream};
use quote::TokenStreamExt;
use quote;
use proc_macro2::{Term, Span};
pub mod attributes {
use proc_macro2::{Ident, Span, TokenStream};
use std::str::FromStr;
use quote;
use proc_macro2::{Term, Span};
pub fn repr(which: &str) -> TokenStream {
let which = Ident::new(which, Span::call_site());
pub fn repr(which: &str) -> quote::Tokens {
let which = Term::new(which, Span::call_site());
quote! {
#[repr( #which )]
}
}
pub fn repr_list(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones.iter().cloned().map(|one| TokenStream::from_str(one).expect("repr to be valid"));
pub fn repr_list(which_ones: &[&str]) -> quote::Tokens {
let which_ones = which_ones.iter().cloned().map(|one| Term::new(one, Span::call_site()));
quote! {
#[repr( #( #which_ones ),* )]
}
}
pub fn derives(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones.iter().cloned().map(|one| Ident::new(one, Span::call_site()));
pub fn derives(which_ones: &[&str]) -> quote::Tokens {
let which_ones = which_ones.iter().cloned().map(|one| Term::new(one, Span::call_site()));
quote! {
#[derive( #( #which_ones ),* )]
}
}
pub fn inline() -> TokenStream {
pub fn inline() -> quote::Tokens {
quote! {
#[inline]
}
}
pub fn must_use() -> TokenStream {
quote! {
#[must_use]
}
pub fn doc(comment: String) -> quote::Tokens {
// Doc comments are already preprocessed into nice `///` formats by the
// time they get here. Just make sure that we have newlines around it so
// that nothing else gets wrapped into the comment.
let mut tokens = quote! {};
tokens.append(Term::new("\n", Span::call_site()));
tokens.append(Term::new(&comment, Span::call_site()));
tokens.append(Term::new("\n", Span::call_site()));
tokens
}
pub fn doc(comment: String) -> TokenStream {
use std::str::FromStr;
// NOTE(emilio): By this point comments are already preprocessed and in
// `///` form. Quote turns them into `#[doc]` comments, but oh well.
TokenStream::from_str(&comment).unwrap()
}
pub fn link_name(name: &str) -> TokenStream {
pub fn link_name(name: &str) -> quote::Tokens {
// LLVM mangles the name by default but it's already mangled.
// Prefixing the name with \u{1} should tell LLVM to not mangle it.
let name = format!("\u{1}{}", name);
@ -62,7 +59,7 @@ pub mod attributes {
/// Generates a proper type for a field or type with a given `Layout`, that is,
/// a type with the correct size and alignment restrictions.
pub fn blob(ctx: &BindgenContext, layout: Layout) -> TokenStream {
pub fn blob(ctx: &BindgenContext, layout: Layout) -> quote::Tokens {
let opaque = layout.opaque();
// FIXME(emilio, #412): We fall back to byte alignment, but there are
@ -77,7 +74,7 @@ pub fn blob(ctx: &BindgenContext, layout: Layout) -> TokenStream {
}
};
let ty_name = Ident::new(ty_name, Span::call_site());
let ty_name = Term::new(ty_name, Span::call_site());
let data_len = opaque.array_size(ctx).unwrap_or(layout.size);
@ -93,14 +90,14 @@ pub fn blob(ctx: &BindgenContext, layout: Layout) -> TokenStream {
}
/// Integer type of the same size as the given `Layout`.
pub fn integer_type(ctx: &BindgenContext, layout: Layout) -> Option<TokenStream> {
pub fn integer_type(ctx: &BindgenContext, layout: Layout) -> Option<quote::Tokens> {
let name = Layout::known_type_for_size(ctx, layout.size)?;
let name = Ident::new(name, Span::call_site());
let name = Term::new(name, Span::call_site());
Some(quote! { #name })
}
/// Generates a bitfield allocation unit type for a type with the given `Layout`.
pub fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> TokenStream {
pub fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> quote::Tokens {
let mut tokens = quote! {};
if ctx.options().enable_cxx_namespaces {
@ -127,14 +124,14 @@ pub mod ast_ty {
use ir::function::FunctionSig;
use ir::layout::Layout;
use ir::ty::FloatKind;
use std::str::FromStr;
use proc_macro2::{self, TokenStream};
use quote;
use proc_macro2;
pub fn raw_type(ctx: &BindgenContext, name: &str) -> TokenStream {
pub fn raw_type(ctx: &BindgenContext, name: &str) -> quote::Tokens {
let ident = ctx.rust_ident_raw(name);
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
let prefix = ctx.rust_ident_raw(prefix.as_str());
quote! {
#prefix::#ident
}
@ -149,7 +146,7 @@ pub mod ast_ty {
ctx: &BindgenContext,
fk: FloatKind,
layout: Option<Layout>,
) -> TokenStream {
) -> quote::Tokens {
// TODO: we probably should take the type layout into account more
// often?
//
@ -189,25 +186,25 @@ pub mod ast_ty {
}
}
pub fn int_expr(val: i64) -> TokenStream {
pub fn int_expr(val: i64) -> quote::Tokens {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::i64_unsuffixed(val);
quote!(#val)
}
pub fn uint_expr(val: u64) -> TokenStream {
pub fn uint_expr(val: u64) -> quote::Tokens {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::u64_unsuffixed(val);
quote!(#val)
}
pub fn byte_array_expr(bytes: &[u8]) -> TokenStream {
pub fn byte_array_expr(bytes: &[u8]) -> quote::Tokens {
let mut bytes: Vec<_> = bytes.iter().cloned().collect();
bytes.push(0);
quote! { [ #(#bytes),* ] }
}
pub fn cstr_expr(mut string: String) -> TokenStream {
pub fn cstr_expr(mut string: String) -> quote::Tokens {
string.push('\0');
let b = proc_macro2::Literal::byte_string(&string.as_bytes());
quote! {
@ -218,7 +215,7 @@ pub mod ast_ty {
pub fn float_expr(
ctx: &BindgenContext,
f: f64,
) -> Result<TokenStream, ()> {
) -> Result<quote::Tokens, ()> {
if f.is_finite() {
let val = proc_macro2::Literal::f64_unsuffixed(f);
@ -252,7 +249,7 @@ pub mod ast_ty {
pub fn arguments_from_signature(
signature: &FunctionSig,
ctx: &BindgenContext,
) -> Vec<TokenStream> {
) -> Vec<quote::Tokens> {
let mut unnamed_arguments = 0;
signature
.argument_types()

View File

@ -3,14 +3,14 @@ use ir::context::BindgenContext;
use ir::derive::CanTriviallyDeriveDebug;
use ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName};
use ir::ty::{RUST_DERIVE_IN_ARRAY_LIMIT, TypeKind};
use proc_macro2;
use quote;
pub fn gen_debug_impl(
ctx: &BindgenContext,
fields: &[Field],
item: &Item,
kind: CompKind,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
let struct_name = item.canonical_name(ctx);
let mut format_string = format!("{} {{{{ ", struct_name);
let mut tokens = vec![];
@ -63,7 +63,7 @@ pub trait ImplDebug<'a> {
&self,
ctx: &BindgenContext,
extra: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)>;
) -> Option<(String, Vec<quote::Tokens>)>;
}
impl<'a> ImplDebug<'a> for FieldData {
@ -73,7 +73,7 @@ impl<'a> ImplDebug<'a> for FieldData {
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
) -> Option<(String, Vec<quote::Tokens>)> {
if let Some(name) = self.name() {
ctx.resolve_item(self.ty()).impl_debug(ctx, name)
} else {
@ -89,7 +89,7 @@ impl<'a> ImplDebug<'a> for BitfieldUnit {
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
) -> Option<(String, Vec<quote::Tokens>)> {
let mut format_string = String::new();
let mut tokens = vec![];
for (i, bitfield) in self.bitfields().iter().enumerate() {
@ -118,7 +118,7 @@ impl<'a> ImplDebug<'a> for Item {
&self,
ctx: &BindgenContext,
name: &str,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
) -> Option<(String, Vec<quote::Tokens>)> {
let name_ident = ctx.rust_ident(name);
// We don't know if blacklisted items `impl Debug` or not, so we can't
@ -136,8 +136,8 @@ impl<'a> ImplDebug<'a> for Item {
fn debug_print(
name: &str,
name_ident: proc_macro2::TokenStream,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
name_ident: quote::Tokens,
) -> Option<(String, Vec<quote::Tokens>)> {
Some((
format!("{}: {{:?}}", name),
vec![quote! {

View File

@ -3,6 +3,7 @@ use ir::comp::{CompInfo, CompKind, Field, FieldMethods};
use ir::context::BindgenContext;
use ir::item::{IsOpaque, Item};
use ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
use quote;
use proc_macro2;
/// Generate a manual implementation of `PartialEq` trait for the
@ -11,8 +12,8 @@ pub fn gen_partialeq_impl(
ctx: &BindgenContext,
comp_info: &CompInfo,
item: &Item,
ty_for_impl: &proc_macro2::TokenStream,
) -> Option<proc_macro2::TokenStream> {
ty_for_impl: &quote::Tokens,
) -> Option<quote::Tokens> {
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
@ -70,8 +71,8 @@ pub fn gen_partialeq_impl(
})
}
fn gen_field(ctx: &BindgenContext, ty_item: &Item, name: &str) -> proc_macro2::TokenStream {
fn quote_equals(name_ident: proc_macro2::Ident) -> proc_macro2::TokenStream {
fn gen_field(ctx: &BindgenContext, ty_item: &Item, name: &str) -> quote::Tokens {
fn quote_equals(name_ident: proc_macro2::Term) -> quote::Tokens {
quote! { self.#name_ident == other.#name_ident }
}

View File

@ -37,23 +37,22 @@ use ir::template::{AsTemplateParam, TemplateInstantiation, TemplateParameters};
use ir::ty::{Type, TypeKind};
use ir::var::Var;
use quote::TokenStreamExt;
use proc_macro2::{self, Ident, Span};
use quote;
use proc_macro2::{self, Term, Span};
use std;
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::VecDeque;
use std::collections::{HashSet, VecDeque};
use std::collections::hash_map::{Entry, HashMap};
use std::fmt::Write;
use std::iter;
use std::ops;
use std::str::FromStr;
use {HashMap, HashSet, Entry};
// Name of type defined in constified enum module
pub static CONSTIFIED_ENUM_MODULE_REPR_NAME: &'static str = "Type";
fn top_level_path(ctx: &BindgenContext, item: &Item) -> Vec<proc_macro2::TokenStream> {
fn top_level_path(ctx: &BindgenContext, item: &Item) -> Vec<quote::Tokens> {
let mut path = vec![quote! { self }];
if ctx.options().enable_cxx_namespaces {
@ -65,7 +64,7 @@ fn top_level_path(ctx: &BindgenContext, item: &Item) -> Vec<proc_macro2::TokenSt
path
}
fn root_import(ctx: &BindgenContext, module: &Item) -> proc_macro2::TokenStream {
fn root_import(ctx: &BindgenContext, module: &Item) -> quote::Tokens {
assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up");
assert!(module.is_module());
@ -77,7 +76,7 @@ fn root_import(ctx: &BindgenContext, module: &Item) -> proc_macro2::TokenStream
let mut tokens = quote! {};
tokens.append_separated(path, quote!(::));
tokens.append_separated(path, Term::new("::", Span::call_site()));
quote! {
#[allow(unused_imports)]
@ -86,7 +85,7 @@ fn root_import(ctx: &BindgenContext, module: &Item) -> proc_macro2::TokenStream
}
struct CodegenResult<'a> {
items: Vec<proc_macro2::TokenStream>,
items: Vec<quote::Tokens>,
/// A monotonic counter used to add stable unique id's to stuff that doesn't
/// need to be referenced by anything.
@ -95,6 +94,9 @@ struct CodegenResult<'a> {
/// Whether a bindgen union has been generated at least once.
saw_bindgen_union: bool,
/// Whether an union has been generated at least once.
saw_union: bool,
/// Whether an incomplete array has been generated at least once.
saw_incomplete_array: bool,
@ -137,6 +139,7 @@ impl<'a> CodegenResult<'a> {
fn new(codegen_id: &'a Cell<usize>) -> Self {
CodegenResult {
items: vec![],
saw_union: false,
saw_bindgen_union: false,
saw_incomplete_array: false,
saw_objc: false,
@ -150,7 +153,12 @@ impl<'a> CodegenResult<'a> {
}
}
fn saw_union(&mut self) {
self.saw_union = true;
}
fn saw_bindgen_union(&mut self) {
self.saw_union();
self.saw_bindgen_union = true;
}
@ -204,7 +212,7 @@ impl<'a> CodegenResult<'a> {
self.vars_seen.insert(name.into());
}
fn inner<F>(&mut self, cb: F) -> Vec<proc_macro2::TokenStream>
fn inner<F>(&mut self, cb: F) -> Vec<quote::Tokens>
where
F: FnOnce(&mut Self),
{
@ -212,18 +220,18 @@ impl<'a> CodegenResult<'a> {
cb(&mut new);
self.saw_union |= new.saw_union;
self.saw_incomplete_array |= new.saw_incomplete_array;
self.saw_objc |= new.saw_objc;
self.saw_block |= new.saw_block;
self.saw_bitfield_unit |= new.saw_bitfield_unit;
self.saw_bindgen_union |= new.saw_bindgen_union;
new.items
}
}
impl<'a> ops::Deref for CodegenResult<'a> {
type Target = Vec<proc_macro2::TokenStream>;
type Target = Vec<quote::Tokens>;
fn deref(&self) -> &Self::Target {
&self.items
@ -239,11 +247,11 @@ impl<'a> ops::DerefMut for CodegenResult<'a> {
/// A trait to convert a rust type into a pointer, optionally const, to the same
/// type.
trait ToPtr {
fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream;
fn to_ptr(self, is_const: bool) -> quote::Tokens;
}
impl ToPtr for proc_macro2::TokenStream {
fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream {
impl ToPtr for quote::Tokens {
fn to_ptr(self, is_const: bool) -> quote::Tokens {
if is_const {
quote! { *const #self }
} else {
@ -252,7 +260,7 @@ impl ToPtr for proc_macro2::TokenStream {
}
}
/// An extension trait for `proc_macro2::TokenStream` that lets us append any implicit
/// An extension trait for `quote::Tokens` that lets us append any implicit
/// template parameters that exist for some type, if necessary.
trait AppendImplicitTemplateParams {
fn append_implicit_template_params(
@ -262,7 +270,7 @@ trait AppendImplicitTemplateParams {
);
}
impl AppendImplicitTemplateParams for proc_macro2::TokenStream {
impl AppendImplicitTemplateParams for quote::Tokens {
fn append_implicit_template_params(
&mut self,
ctx: &BindgenContext,
@ -431,7 +439,10 @@ impl CodeGenerator for Module {
if let Some(raw_lines) = ctx.options().module_lines.get(&path) {
for raw_line in raw_lines {
found_any = true;
result.push(proc_macro2::TokenStream::from_str(raw_line).unwrap());
// FIXME(emilio): The use of `Term` is an abuse, but we abuse it
// in a bunch more places.
let line = Term::new(raw_line, Span::call_site());
result.push(quote! { #line });
}
}
@ -745,7 +756,7 @@ impl CodeGenerator for Type {
pub use
});
let path = top_level_path(ctx, item);
tokens.append_separated(path, quote!(::));
tokens.append_separated(path, Term::new("::", Span::call_site()));
tokens.append_all(quote! {
:: #inner_rust_type as #rust_name ;
});
@ -857,7 +868,7 @@ impl<'a> TryToRustTy for Vtable<'a> {
&self,
ctx: &BindgenContext,
_: &(),
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
let name = ctx.rust_ident(self.canonical_name(ctx));
Ok(quote! {
#name
@ -952,8 +963,8 @@ trait FieldCodegen<'a> {
methods: &mut M,
extra: Self::Extra,
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>;
F: Extend<quote::Tokens>,
M: Extend<quote::Tokens>;
}
impl<'a> FieldCodegen<'a> for Field {
@ -972,8 +983,8 @@ impl<'a> FieldCodegen<'a> for Field {
methods: &mut M,
_: (),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
F: Extend<quote::Tokens>,
M: Extend<quote::Tokens>,
{
match *self {
Field::DataMember(ref data) => {
@ -1024,8 +1035,8 @@ impl<'a> FieldCodegen<'a> for FieldData {
methods: &mut M,
_: (),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
F: Extend<quote::Tokens>,
M: Extend<quote::Tokens>,
{
// Bitfields are handled by `FieldCodegen` implementations for
// `BitfieldUnit` and `Bitfield`.
@ -1038,7 +1049,6 @@ impl<'a> FieldCodegen<'a> for FieldData {
// NB: If supported, we use proper `union` types.
let ty = if parent.is_union() && !parent.can_be_rust_union(ctx) {
result.saw_bindgen_union();
if ctx.options().enable_cxx_namespaces {
quote! {
root::__BindgenUnionField<#ty>
@ -1160,8 +1170,8 @@ impl<'a> FieldCodegen<'a> for FieldData {
impl BitfieldUnit {
/// Get the constructor name for this bitfield unit.
fn ctor_name(&self) -> proc_macro2::TokenStream {
let ctor_name = Ident::new(&format!("new_bitfield_{}", self.nth()), Span::call_site());
fn ctor_name(&self) -> quote::Tokens {
let ctor_name = Term::new(&format!("new_bitfield_{}", self.nth()), Span::call_site());
quote! {
#ctor_name
}
@ -1179,9 +1189,9 @@ impl Bitfield {
fn extend_ctor_impl(
&self,
ctx: &BindgenContext,
param_name: proc_macro2::TokenStream,
mut ctor_impl: proc_macro2::TokenStream,
) -> proc_macro2::TokenStream {
param_name: quote::Tokens,
mut ctor_impl: quote::Tokens,
) -> quote::Tokens {
let bitfield_ty = ctx.resolve_type(self.ty());
let bitfield_ty_layout = bitfield_ty.layout(ctx).expect(
"Bitfield without layout? Gah!",
@ -1225,15 +1235,14 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit {
methods: &mut M,
_: (),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
F: Extend<quote::Tokens>,
M: Extend<quote::Tokens>,
{
result.saw_bitfield_unit();
let field_ty = {
let ty = helpers::bitfield_unit(ctx, self.layout());
if parent.is_union() && !parent.can_be_rust_union(ctx) {
result.saw_bindgen_union();
if ctx.options().enable_cxx_namespaces {
quote! {
root::__BindgenUnionField<#ty>
@ -1324,7 +1333,7 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit {
fn bitfield_getter_name(
ctx: &BindgenContext,
bitfield: &Bitfield,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
let name = bitfield.getter_name();
let name = ctx.rust_ident_raw(name);
quote! { #name }
@ -1333,7 +1342,7 @@ fn bitfield_getter_name(
fn bitfield_setter_name(
ctx: &BindgenContext,
bitfield: &Bitfield,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
let setter = bitfield.setter_name();
let setter = ctx.rust_ident_raw(setter);
quote! { #setter }
@ -1355,13 +1364,13 @@ impl<'a> FieldCodegen<'a> for Bitfield {
methods: &mut M,
(unit_field_name, bitfield_representable_as_int): (&'a str, &mut bool),
) where
F: Extend<proc_macro2::TokenStream>,
M: Extend<proc_macro2::TokenStream>,
F: Extend<quote::Tokens>,
M: Extend<quote::Tokens>,
{
let prefix = ctx.trait_prefix();
let getter_name = bitfield_getter_name(ctx, self);
let setter_name = bitfield_setter_name(ctx, self);
let unit_field_ident = Ident::new(unit_field_name, Span::call_site());
let unit_field_ident = Term::new(unit_field_name, Span::call_site());
let bitfield_ty_item = ctx.resolve_item(self.ty());
let bitfield_ty = bitfield_ty_item.expect_type();
@ -1504,15 +1513,14 @@ impl CodeGenerator for CompInfo {
continue;
}
let inner_item = ctx.resolve_item(base.ty);
let mut inner = inner_item.to_rust_ty_or_opaque(ctx, &());
inner.append_implicit_template_params(ctx, &inner_item);
let inner = base.ty.to_rust_ty_or_opaque(ctx, &());
let field_name = ctx.rust_ident(&base.field_name);
struct_layout.saw_base(inner_item.expect_type());
let base_ty = ctx.resolve_type(base.ty);
struct_layout.saw_base(base_ty);
fields.push(quote! {
pub #field_name: #inner,
pub #field_name : #inner ,
});
}
}
@ -1586,6 +1594,11 @@ impl CodeGenerator for CompInfo {
}
}
} else if is_union && !self.is_forward_declaration() {
result.saw_union();
if !self.can_be_rust_union(ctx) {
result.saw_bindgen_union();
}
// TODO(emilio): It'd be nice to unify this with the struct path
// above somehow.
let layout = layout.expect("Unable to get layout information?");
@ -1674,10 +1687,7 @@ impl CodeGenerator for CompInfo {
attributes.push(attributes::doc(comment));
}
if packed && !is_opaque {
let n = layout.map_or(1, |l| l.align);
assert!(ctx.options().rust_features().repr_packed_n || n == 1);
let packed_repr = if n == 1 { "packed".to_string() } else { format!("packed({})", n) };
attributes.push(attributes::repr_list(&["C", &packed_repr]));
attributes.push(attributes::repr_list(&["C", "packed"]));
} else {
attributes.push(attributes::repr("C"));
}
@ -1754,8 +1764,6 @@ impl CodeGenerator for CompInfo {
derives.push("Eq");
}
derives.extend(item.annotations().derives().iter().map(String::as_str));
if !derives.is_empty() {
attributes.push(attributes::derives(&derives))
}
@ -1795,7 +1803,7 @@ impl CodeGenerator for CompInfo {
if self.found_unknown_attr() {
warn!(
"Type {} has an unknown attribute that may affect layout",
canonical_ident
canonical_ident.as_str()
);
}
@ -1809,7 +1817,7 @@ impl CodeGenerator for CompInfo {
if ctx.options().layout_tests && !self.is_forward_declaration() {
if let Some(layout) = layout {
let fn_name =
format!("bindgen_test_layout_{}", canonical_ident);
format!("bindgen_test_layout_{}", canonical_ident.as_str());
let fn_name = ctx.rust_ident_raw(fn_name);
let prefix = ctx.trait_prefix();
let size_of_expr = quote! {
@ -1871,7 +1879,7 @@ impl CodeGenerator for CompInfo {
})
})
})
.collect::<Vec<proc_macro2::TokenStream>>();
.collect::<Vec<quote::Tokens>>();
asserts
};
@ -2011,7 +2019,7 @@ trait MethodCodegen {
fn codegen_method<'a>(
&self,
ctx: &BindgenContext,
methods: &mut Vec<proc_macro2::TokenStream>,
methods: &mut Vec<quote::Tokens>,
method_names: &mut HashMap<String, usize>,
result: &mut CodegenResult<'a>,
parent: &CompInfo,
@ -2022,7 +2030,7 @@ impl MethodCodegen for Method {
fn codegen_method<'a>(
&self,
ctx: &BindgenContext,
methods: &mut Vec<proc_macro2::TokenStream>,
methods: &mut Vec<quote::Tokens>,
method_names: &mut HashMap<String, usize>,
result: &mut CodegenResult<'a>,
_parent: &CompInfo,
@ -2146,13 +2154,9 @@ impl MethodCodegen for Method {
let mut attrs = vec![];
attrs.push(attributes::inline());
if signature.must_use() && ctx.options().rust_features().must_use_function {
attrs.push(attributes::must_use());
}
let name = ctx.rust_ident(&name);
methods.push(quote! {
#(#attrs)*
#[inline]
pub unsafe fn #name ( #( #args ),* ) #ret {
#block
}
@ -2227,24 +2231,24 @@ impl std::str::FromStr for EnumVariation {
enum EnumBuilder<'a> {
Rust {
codegen_depth: usize,
attrs: Vec<proc_macro2::TokenStream>,
ident: Ident,
tokens: proc_macro2::TokenStream,
attrs: Vec<quote::Tokens>,
ident: Term,
tokens: quote::Tokens,
emitted_any_variants: bool,
},
Bitfield {
codegen_depth: usize,
canonical_name: &'a str,
tokens: proc_macro2::TokenStream,
tokens: quote::Tokens,
},
Consts {
variants: Vec<proc_macro2::TokenStream>,
variants: Vec<quote::Tokens>,
codegen_depth: usize,
},
ModuleConsts {
codegen_depth: usize,
module_name: &'a str,
module_items: Vec<proc_macro2::TokenStream>,
module_items: Vec<quote::Tokens>,
},
}
@ -2263,12 +2267,12 @@ impl<'a> EnumBuilder<'a> {
/// the representation, and which variation it should be generated as.
fn new(
name: &'a str,
attrs: Vec<proc_macro2::TokenStream>,
repr: proc_macro2::TokenStream,
attrs: Vec<quote::Tokens>,
repr: quote::Tokens,
enum_variation: EnumVariation,
enum_codegen_depth: usize,
) -> Self {
let ident = Ident::new(name, Span::call_site());
let ident = Term::new(name, Span::call_site());
match enum_variation {
EnumVariation::Bitfield => {
@ -2306,7 +2310,7 @@ impl<'a> EnumBuilder<'a> {
}
EnumVariation::ModuleConsts => {
let ident = Ident::new(CONSTIFIED_ENUM_MODULE_REPR_NAME, Span::call_site());
let ident = Term::new(CONSTIFIED_ENUM_MODULE_REPR_NAME, Span::call_site());
let type_definition = quote! {
#( #attrs )*
pub type #ident = #repr;
@ -2327,7 +2331,7 @@ impl<'a> EnumBuilder<'a> {
ctx: &BindgenContext,
variant: &EnumVariant,
mangling_prefix: Option<&str>,
rust_ty: proc_macro2::TokenStream,
rust_ty: quote::Tokens,
result: &mut CodegenResult<'b>,
is_ty_named: bool,
) -> Self {
@ -2429,9 +2433,9 @@ impl<'a> EnumBuilder<'a> {
fn build<'b>(
self,
ctx: &BindgenContext,
rust_ty: proc_macro2::TokenStream,
rust_ty: quote::Tokens,
result: &mut CodegenResult<'b>,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
match self {
EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants, .. } => {
let variants = if !emitted_any_variants {
@ -2577,11 +2581,7 @@ impl CodeGenerator for Enum {
if variation.is_rust() {
attrs.push(attributes::repr(repr_name));
} else if variation.is_bitfield() {
if ctx.options().rust_features.repr_transparent {
attrs.push(attributes::repr("transparent"));
} else {
attrs.push(attributes::repr("C"));
}
attrs.push(attributes::repr("C"));
}
if let Some(comment) = item.comment(ctx) {
@ -2606,23 +2606,23 @@ impl CodeGenerator for Enum {
ctx: &BindgenContext,
enum_: &Type,
// Only to avoid recomputing every time.
enum_canonical_name: &Ident,
enum_canonical_name: &Term,
// May be the same as "variant" if it's because the
// enum is unnamed and we still haven't seen the
// value.
variant_name: &Ident,
referenced_name: &Ident,
enum_rust_ty: proc_macro2::TokenStream,
variant_name: &str,
referenced_name: &Term,
enum_rust_ty: quote::Tokens,
result: &mut CodegenResult<'a>,
) {
let constant_name = if enum_.name().is_some() {
if ctx.options().prepend_enum_name {
format!("{}_{}", enum_canonical_name, variant_name)
format!("{}_{}", enum_canonical_name.as_str(), variant_name)
} else {
format!("{}", variant_name)
variant_name.into()
}
} else {
format!("{}", variant_name)
variant_name.into()
};
let constant_name = ctx.rust_ident(constant_name);
@ -2646,7 +2646,7 @@ impl CodeGenerator for Enum {
);
// A map where we keep a value -> variant relation.
let mut seen_values = HashMap::<_, Ident>::default();
let mut seen_values = HashMap::<_, Term>::new();
let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &());
let is_toplevel = item.is_toplevel(ctx);
@ -2718,7 +2718,7 @@ impl CodeGenerator for Enum {
ctx,
enum_ty,
&ident,
&Ident::new(&*mangled_name, Span::call_site()),
&*mangled_name,
existing_variant_name,
enum_rust_ty.clone(),
result,
@ -2759,11 +2759,11 @@ impl CodeGenerator for Enum {
let parent_name =
parent_canonical_name.as_ref().unwrap();
Ident::new(
Term::new(
&format!(
"{}_{}",
parent_name,
variant_name
variant_name.as_str()
),
Span::call_site()
)
@ -2773,7 +2773,7 @@ impl CodeGenerator for Enum {
ctx,
enum_ty,
&ident,
&mangled_name,
mangled_name.as_str(),
&variant_name,
enum_rust_ty.clone(),
result,
@ -2810,7 +2810,7 @@ trait TryToOpaque {
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
self.try_get_layout(ctx, extra).map(|layout| {
helpers::blob(ctx, layout)
})
@ -2837,7 +2837,7 @@ trait ToOpaque: TryToOpaque {
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
let layout = self.get_layout(ctx, extra);
helpers::blob(ctx, layout)
}
@ -2863,7 +2863,7 @@ trait TryToRustTy {
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> error::Result<proc_macro2::TokenStream>;
) -> error::Result<quote::Tokens>;
}
/// Fallible conversion to a Rust type or an opaque blob with the correct size
@ -2878,7 +2878,7 @@ trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque {
&self,
ctx: &BindgenContext,
extra: &<Self as TryToRustTyOrOpaque>::Extra,
) -> error::Result<proc_macro2::TokenStream>;
) -> error::Result<quote::Tokens>;
}
impl<E, T> TryToRustTyOrOpaque for T
@ -2892,7 +2892,7 @@ where
&self,
ctx: &BindgenContext,
extra: &E,
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
self.try_to_rust_ty(ctx, extra).or_else(
|_| if let Ok(layout) =
self.try_get_layout(ctx, extra)
@ -2929,7 +2929,7 @@ trait ToRustTyOrOpaque: TryToRustTy + ToOpaque {
&self,
ctx: &BindgenContext,
extra: &<Self as ToRustTyOrOpaque>::Extra,
) -> proc_macro2::TokenStream;
) -> quote::Tokens;
}
impl<E, T> ToRustTyOrOpaque for T
@ -2942,7 +2942,7 @@ where
&self,
ctx: &BindgenContext,
extra: &E,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
self.try_to_rust_ty(ctx, extra).unwrap_or_else(|_| {
self.to_opaque(ctx, extra)
})
@ -2974,7 +2974,7 @@ where
&self,
ctx: &BindgenContext,
_: &(),
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &())
}
}
@ -2998,7 +2998,7 @@ impl TryToRustTy for Item {
&self,
ctx: &BindgenContext,
_: &(),
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
self.kind().expect_type().try_to_rust_ty(ctx, self)
}
}
@ -3022,7 +3022,7 @@ impl TryToRustTy for Type {
&self,
ctx: &BindgenContext,
item: &Item,
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
use self::helpers::ast_ty::*;
match *self.kind() {
@ -3124,7 +3124,7 @@ impl TryToRustTy for Type {
}
TypeKind::Enum(..) => {
let path = item.namespace_aware_canonical_path(ctx);
let path = proc_macro2::TokenStream::from_str(&path.join("::")).unwrap();
let path = Term::new(&path.join("::"), Span::call_site());
Ok(quote!(#path))
}
TypeKind::TemplateInstantiation(ref inst) => {
@ -3227,7 +3227,7 @@ impl TryToRustTy for TemplateInstantiation {
&self,
ctx: &BindgenContext,
item: &Item,
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
if self.is_opaque(ctx, item) {
return Err(error::Error::InstantiationOfOpaqueType);
}
@ -3239,7 +3239,7 @@ impl TryToRustTy for TemplateInstantiation {
let mut ty = quote! {};
let def_path = def.namespace_aware_canonical_path(ctx);
ty.append_separated(def_path.into_iter().map(|p| ctx.rust_ident(p)), quote!(::));
ty.append_separated(def_path.into_iter().map(|p| ctx.rust_ident(p)), Term::new("::", Span::call_site()));
let def_params = def.self_template_params(ctx);
if def_params.is_empty() {
@ -3291,7 +3291,7 @@ impl TryToRustTy for FunctionSig {
&self,
ctx: &BindgenContext,
_: &(),
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
// TODO: we might want to consider ignoring the reference return value.
let ret = utils::fnsig_return_ty(ctx, &self);
let arguments = utils::fnsig_arguments(ctx, &self);
@ -3300,7 +3300,7 @@ impl TryToRustTy for FunctionSig {
match abi {
Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => {
warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target");
Ok(proc_macro2::TokenStream::new())
Ok(quote::Tokens::new())
}
_ => {
Ok(quote! {
@ -3374,10 +3374,6 @@ impl CodeGenerator for Function {
let mut attributes = vec![];
if signature.must_use() && ctx.options().rust_features().must_use_function {
attributes.push(attributes::must_use());
}
if let Some(comment) = item.comment(ctx) {
attributes.push(attributes::doc(comment));
}
@ -3432,7 +3428,7 @@ fn objc_method_codegen(
method: &ObjCMethod,
class_name: Option<&str>,
prefix: &str,
) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) {
) -> (quote::Tokens, quote::Tokens) {
let signature = method.signature();
let fn_args = utils::fnsig_arguments(ctx, signature);
let fn_ret = utils::fnsig_return_ty(ctx, signature);
@ -3546,7 +3542,7 @@ impl CodeGenerator for ObjCInterface {
}
}
pub(crate) fn codegen(context: BindgenContext) -> (Vec<proc_macro2::TokenStream>, BindgenOptions) {
pub(crate) fn codegen(context: BindgenContext) -> (Vec<quote::Tokens>, BindgenOptions) {
context.gen(|context| {
let _t = context.timer("codegen");
let counter = Cell::new(0);
@ -3554,12 +3550,11 @@ pub(crate) fn codegen(context: BindgenContext) -> (Vec<proc_macro2::TokenStream>
debug!("codegen: {:?}", context.options());
let codegen_items = context.codegen_items();
if context.options().emit_ir {
let codegen_items = context.codegen_items();
for (id, item) in context.items() {
if codegen_items.contains(&id) {
println!("ir: {:?} = {:#?}", id, item);
}
for &id in codegen_items {
let item = context.resolve_item(id);
println!("ir: {:?} = {:#?}", id, item);
}
}
@ -3583,12 +3578,12 @@ mod utils {
use ir::function::FunctionSig;
use ir::item::{Item, ItemCanonicalPath};
use ir::ty::TypeKind;
use proc_macro2;
use quote;
use proc_macro2::{Term, Span};
use std::mem;
use std::str::FromStr;
pub fn prepend_bitfield_unit_type(result: &mut Vec<proc_macro2::TokenStream>) {
let bitfield_unit_type = proc_macro2::TokenStream::from_str(include_str!("./bitfield_unit.rs")).unwrap();
pub fn prepend_bitfield_unit_type(result: &mut Vec<quote::Tokens>) {
let bitfield_unit_type = Term::new(include_str!("./bitfield_unit.rs"), Span::call_site());
let bitfield_unit_type = quote!(#bitfield_unit_type);
let items = vec![bitfield_unit_type];
@ -3598,7 +3593,7 @@ mod utils {
pub fn prepend_objc_header(
ctx: &BindgenContext,
result: &mut Vec<proc_macro2::TokenStream>,
result: &mut Vec<quote::Tokens>,
) {
let use_objc = if ctx.options().objc_extern_crate {
quote! {
@ -3623,7 +3618,7 @@ mod utils {
pub fn prepend_block_header(
ctx: &BindgenContext,
result: &mut Vec<proc_macro2::TokenStream>,
result: &mut Vec<quote::Tokens>,
) {
let use_block = if ctx.options().block_extern_crate {
quote! {
@ -3642,7 +3637,7 @@ mod utils {
pub fn prepend_union_types(
ctx: &BindgenContext,
result: &mut Vec<proc_macro2::TokenStream>,
result: &mut Vec<quote::Tokens>,
) {
let prefix = ctx.trait_prefix();
@ -3741,7 +3736,7 @@ mod utils {
pub fn prepend_incomplete_array_types(
ctx: &BindgenContext,
result: &mut Vec<proc_macro2::TokenStream>,
result: &mut Vec<quote::Tokens>,
) {
let prefix = ctx.trait_prefix();
@ -3749,14 +3744,14 @@ mod utils {
#[repr(C)]
#[derive(Default)]
pub struct __IncompleteArrayField<T>(
::#prefix::marker::PhantomData<T>, [T; 0]);
::#prefix::marker::PhantomData<T>);
};
let incomplete_array_impl = quote! {
impl<T> __IncompleteArrayField<T> {
#[inline]
pub fn new() -> Self {
__IncompleteArrayField(::#prefix::marker::PhantomData, [])
__IncompleteArrayField(::#prefix::marker::PhantomData)
}
#[inline]
@ -3799,17 +3794,22 @@ mod utils {
}
};
let incomplete_array_copy_impl = quote! {
impl<T> ::#prefix::marker::Copy for __IncompleteArrayField<T> {}
};
let items = vec![incomplete_array_decl,
incomplete_array_impl,
incomplete_array_debug_impl,
incomplete_array_clone_impl];
incomplete_array_clone_impl,
incomplete_array_copy_impl];
let old_items = mem::replace(result, items);
result.extend(old_items.into_iter());
}
pub fn prepend_complex_type(
result: &mut Vec<proc_macro2::TokenStream>,
result: &mut Vec<quote::Tokens>,
) {
let complex_type = quote! {
#[derive(PartialEq, Copy, Clone, Hash, Debug, Default)]
@ -3828,14 +3828,18 @@ mod utils {
pub fn build_path(
item: &Item,
ctx: &BindgenContext,
) -> error::Result<proc_macro2::TokenStream> {
) -> error::Result<quote::Tokens> {
use proc_macro2::{Term, Span};
let path = item.namespace_aware_canonical_path(ctx);
let tokens = proc_macro2::TokenStream::from_str(&path.join("::")).unwrap();
let path = Term::new(&path.join("::"), Span::call_site());
let tokens = quote! {#path};
//tokens.append_separated(path, "::");
Ok(tokens)
}
fn primitive_ty(ctx: &BindgenContext, name: &str) -> proc_macro2::TokenStream {
fn primitive_ty(ctx: &BindgenContext, name: &str) -> quote::Tokens {
let ident = ctx.rust_ident_raw(name);
quote! {
#ident
@ -3845,7 +3849,7 @@ mod utils {
pub fn type_from_named(
ctx: &BindgenContext,
name: &str,
) -> Option<proc_macro2::TokenStream> {
) -> Option<quote::Tokens> {
// FIXME: We could use the inner item to check this is really a
// primitive type but, who the heck overrides these anyway?
Some(match name {
@ -3868,7 +3872,7 @@ mod utils {
pub fn fnsig_return_ty(
ctx: &BindgenContext,
sig: &FunctionSig,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
let return_item = ctx.resolve_item(sig.return_type());
if let TypeKind::Void = *return_item.kind().expect_type().kind() {
quote! { }
@ -3883,7 +3887,7 @@ mod utils {
pub fn fnsig_arguments(
ctx: &BindgenContext,
sig: &FunctionSig,
) -> Vec<proc_macro2::TokenStream> {
) -> Vec<quote::Tokens> {
use super::ToPtr;
let mut unnamed_arguments = 0;
@ -3946,7 +3950,7 @@ mod utils {
pub fn fnsig_block(
ctx: &BindgenContext,
sig: &FunctionSig,
) -> proc_macro2::TokenStream {
) -> quote::Tokens {
let args = sig.argument_types().iter().map(|&(_, ty)| {
let arg_item = ctx.resolve_item(ty);
@ -3961,7 +3965,7 @@ mod utils {
};
quote! {
*const ::block::Block<(#(#args,)*), #ret_ty>
*const ::block::Block<(#(#args),*), #ret_ty>
}
}
}

View File

@ -6,7 +6,8 @@ use ir::comp::CompInfo;
use ir::context::BindgenContext;
use ir::layout::Layout;
use ir::ty::{Type, TypeKind};
use proc_macro2::{self, Ident, Span};
use quote;
use proc_macro2::{Term, Span};
use std::cmp;
/// Trace the layout of struct.
@ -153,7 +154,7 @@ impl<'a> StructLayoutTracker<'a> {
field_name: &str,
field_ty: &Type,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
) -> Option<quote::Tokens> {
let mut field_layout = field_ty.layout(self.ctx)?;
if let TypeKind::Array(inner, len) =
@ -235,7 +236,7 @@ impl<'a> StructLayoutTracker<'a> {
padding_layout.map(|layout| self.padding_field(layout))
}
pub fn pad_struct(&mut self, layout: Layout) -> Option<proc_macro2::TokenStream> {
pub fn pad_struct(&mut self, layout: Layout) -> Option<quote::Tokens> {
debug!(
"pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
self,
@ -309,13 +310,13 @@ impl<'a> StructLayoutTracker<'a> {
align_to(self.latest_offset, layout.align) - self.latest_offset
}
fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
fn padding_field(&mut self, layout: Layout) -> quote::Tokens {
let ty = helpers::blob(self.ctx, layout);
let padding_count = self.padding_count;
self.padding_count += 1;
let padding_field_name = Ident::new(&format!("__bindgen_padding_{}", padding_count), Span::call_site());
let padding_field_name = Term::new(&format!("__bindgen_padding_{}", padding_count), Span::call_site());
self.max_field_align = cmp::max(self.max_field_align, layout.align);

View File

@ -1,8 +1,6 @@
//! Macros for defining extra assertions that should only be checked in testing
//! and/or CI when the `testing_only_extra_assertions` feature is enabled.
/// Simple macro that forwards to assert! when using
/// testing_only_extra_assertions.
#[macro_export]
macro_rules! extra_assert {
( $cond:expr ) => {
@ -17,8 +15,6 @@ macro_rules! extra_assert {
};
}
/// Simple macro that forwards to assert_eq! when using
/// testing_only_extra_assertions.
#[macro_export]
macro_rules! extra_assert_eq {
( $lhs:expr , $rhs:expr ) => {

View File

@ -98,12 +98,6 @@ macro_rules! rust_target_base {
=> Stable_1_25 => 1.25;
/// Rust stable 1.26
=> Stable_1_26 => 1.26;
/// Rust stable 1.27
=> Stable_1_27 => 1.27;
/// Rust stable 1.28
=> Stable_1_28 => 1.28;
/// Rust stable 1.33
=> Stable_1_33 => 1.33;
/// Nightly rust
=> Nightly => nightly;
);
@ -184,18 +178,6 @@ rust_feature_def!(
/// [i128 / u128 support](https://doc.rust-lang.org/std/primitive.i128.html)
=> i128_and_u128;
}
Stable_1_27 {
/// `must_use` attribute on functions ([PR](https://github.com/rust-lang/rust/pull/48925))
=> must_use_function;
}
Stable_1_28 {
/// repr(transparent) ([PR](https://github.com/rust-lang/rust/pull/51562))
=> repr_transparent;
}
Stable_1_33 {
/// repr(packed(N)) ([PR](https://github.com/rust-lang/rust/pull/57049))
=> repr_packed_n;
}
Nightly {
/// `thiscall` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/42202))
=> thiscall_abi;

View File

@ -11,7 +11,8 @@ use ir::template::TemplateParameters;
use ir::traversal::EdgeKind;
use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::HashMap;
use std::collections::HashSet;
/// An analysis that finds for each IR item whether copy cannot be derived.
///
@ -102,7 +103,7 @@ impl<'ctx> MonotoneFramework for CannotDeriveCopy<'ctx> {
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> CannotDeriveCopy<'ctx> {
let cannot_derive_copy = HashSet::default();
let cannot_derive_copy = HashSet::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
CannotDeriveCopy {

View File

@ -10,7 +10,8 @@ use ir::item::IsOpaque;
use ir::traversal::EdgeKind;
use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::HashMap;
use std::collections::HashSet;
/// An analysis that finds for each IR item whether debug cannot be derived.
///
@ -103,7 +104,7 @@ impl<'ctx> MonotoneFramework for CannotDeriveDebug<'ctx> {
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> CannotDeriveDebug<'ctx> {
let cannot_derive_debug = HashSet::default();
let cannot_derive_debug = HashSet::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
CannotDeriveDebug {

View File

@ -12,7 +12,8 @@ use ir::traversal::EdgeKind;
use ir::traversal::Trace;
use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::HashMap;
use std::collections::HashSet;
/// An analysis that finds for each IR item whether default cannot be derived.
///
@ -98,8 +99,8 @@ impl<'ctx> MonotoneFramework for CannotDeriveDefault<'ctx> {
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> CannotDeriveDefault<'ctx> {
let mut dependencies = HashMap::default();
let cannot_derive_default = HashSet::default();
let mut dependencies = HashMap::new();
let cannot_derive_default = HashSet::new();
let whitelisted_items: HashSet<_> =
ctx.whitelisted_items().iter().cloned().collect();

View File

@ -10,7 +10,8 @@ use ir::item::IsOpaque;
use ir::traversal::EdgeKind;
use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::HashMap;
use std::collections::HashSet;
/// An analysis that finds for each IR item whether hash cannot be derived.
///
@ -95,7 +96,7 @@ impl<'ctx> MonotoneFramework for CannotDeriveHash<'ctx> {
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> CannotDeriveHash<'ctx> {
let cannot_derive_hash = HashSet::default();
let cannot_derive_hash = HashSet::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
CannotDeriveHash {

View File

@ -9,7 +9,8 @@ use ir::item::{Item, IsOpaque};
use ir::traversal::{EdgeKind, Trace};
use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use ir::ty::{TypeKind, Type};
use {HashMap, Entry};
use std::collections::HashMap;
use std::collections::hash_map::Entry;
/// An analysis that finds for each IR item whether `PartialEq`/`PartialOrd`
/// cannot be derived.
@ -325,7 +326,7 @@ impl<'ctx> MonotoneFramework for CannotDerivePartialEqOrPartialOrd<'ctx> {
fn new(
ctx: &'ctx BindgenContext,
) -> CannotDerivePartialEqOrPartialOrd<'ctx> {
let can_derive_partialeq_or_partialord = HashMap::default();
let can_derive_partialeq_or_partialord = HashMap::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
CannotDerivePartialEqOrPartialOrd {

View File

@ -5,7 +5,8 @@ use ir::context::{BindgenContext, ItemId};
use ir::traversal::EdgeKind;
use ir::comp::{CompKind, Field, FieldMethods};
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::HashMap;
use std::collections::HashSet;
/// An analysis that finds for each IR item whether it has a destructor or not
///
@ -72,7 +73,7 @@ impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> {
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> Self {
let have_destructor = HashSet::default();
let have_destructor = HashSet::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasDestructorAnalysis {

View File

@ -1,7 +1,8 @@
//! Determining which types has float.
use super::{ConstrainResult, MonotoneFramework, generate_dependencies};
use {HashSet, HashMap};
use std::collections::HashSet;
use std::collections::HashMap;
use ir::context::{BindgenContext, ItemId};
use ir::traversal::EdgeKind;
use ir::ty::TypeKind;
@ -83,7 +84,7 @@ impl<'ctx> MonotoneFramework for HasFloat<'ctx> {
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> {
let has_float = HashSet::default();
let has_float = HashSet::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasFloat {

View File

@ -6,7 +6,8 @@ use ir::comp::FieldMethods;
use ir::context::{BindgenContext, ItemId};
use ir::traversal::EdgeKind;
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::HashMap;
use std::collections::HashSet;
/// An analysis that finds for each IR item whether it has array or not.
///
@ -91,7 +92,7 @@ impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> {
fn new(
ctx: &'ctx BindgenContext,
) -> HasTypeParameterInArray<'ctx> {
let has_type_parameter_in_array = HashSet::default();
let has_type_parameter_in_array = HashSet::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasTypeParameterInArray {

View File

@ -5,8 +5,9 @@ use ir::context::{BindgenContext, ItemId};
use ir::traversal::EdgeKind;
use ir::ty::TypeKind;
use std::cmp;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::ops;
use {HashMap, Entry};
/// The result of the `HasVtableAnalysis` for an individual item.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord)]
@ -147,7 +148,7 @@ impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> {
type Output = HashMap<ItemId, HasVtableResult>;
fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> {
let have_vtable = HashMap::default();
let have_vtable = HashMap::new();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasVtableAnalysis {

View File

@ -64,7 +64,7 @@ pub use self::sizedness::{Sizedness, SizednessAnalysis, SizednessResult};
use ir::context::{BindgenContext, ItemId};
use ir::traversal::{EdgeKind, Trace};
use HashMap;
use std::collections::HashMap;
use std::fmt;
use std::ops;
@ -190,7 +190,7 @@ pub fn generate_dependencies<F>(
where
F: Fn(EdgeKind) -> bool,
{
let mut dependencies = HashMap::default();
let mut dependencies = HashMap::new();
for &item in ctx.whitelisted_items() {
dependencies.entry(item).or_insert(vec![]);
@ -219,7 +219,7 @@ where
#[cfg(test)]
mod tests {
use super::*;
use {HashMap, HashSet};
use std::collections::{HashMap, HashSet};
// Here we find the set of nodes that are reachable from any given
// node. This is a lattice mapping nodes to subsets of all nodes. Our join
@ -334,14 +334,14 @@ mod tests {
// implementation. Don't copy this code outside of this test!
let original_size =
self.reachable.entry(node).or_insert(HashSet::default()).len();
self.reachable.entry(node).or_insert(HashSet::new()).len();
for sub_node in self.graph.0[&node].iter() {
self.reachable.get_mut(&node).unwrap().insert(*sub_node);
let sub_reachable = self.reachable
.entry(*sub_node)
.or_insert(HashSet::default())
.or_insert(HashSet::new())
.clone();
for transitive in sub_reachable {
@ -386,7 +386,7 @@ mod tests {
nodes.as_ref().iter().cloned().map(Node).collect()
}
let mut expected = HashMap::default();
let mut expected = HashMap::new();
expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(2), nodes([2]));
expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8]));

View File

@ -5,8 +5,10 @@ use ir::context::{BindgenContext, TypeId};
use ir::item::IsOpaque;
use ir::traversal::EdgeKind;
use ir::ty::TypeKind;
use std::{cmp, ops};
use {HashMap, Entry};
use std::cmp;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::ops;
/// The result of the `Sizedness` analysis for an individual item.
///
@ -192,7 +194,7 @@ impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> {
})
.collect();
let sized = HashMap::default();
let sized = HashMap::new();
SizednessAnalysis {
ctx,

View File

@ -94,7 +94,7 @@ use ir::item::{Item, ItemSet};
use ir::template::{TemplateInstantiation, TemplateParameters};
use ir::traversal::{EdgeKind, Trace};
use ir::ty::TypeKind;
use {HashMap, HashSet};
use std::collections::{HashMap, HashSet};
/// An analysis that finds for each IR item its set of template parameters that
/// it uses.
@ -373,8 +373,8 @@ impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> {
fn new(
ctx: &'ctx BindgenContext,
) -> UsedTemplateParameters<'ctx> {
let mut used = HashMap::default();
let mut dependencies = HashMap::default();
let mut used = HashMap::new();
let mut dependencies = HashMap::new();
let whitelisted_items: HashSet<_> =
ctx.whitelisted_items().iter().cloned().collect();

View File

@ -58,8 +58,6 @@ pub struct Annotations {
/// In that case, bindgen will generate a constant for `Bar` instead of
/// `Baz`.
constify_enum_variant: bool,
/// List of explicit derives for this type.
derives: Vec<String>,
}
fn parse_accessor(s: &str) -> FieldAccessorKind {
@ -81,7 +79,6 @@ impl Default for Annotations {
private_fields: None,
accessor_kind: None,
constify_enum_variant: false,
derives: vec![],
}
}
}
@ -133,11 +130,6 @@ impl Annotations {
self.use_instead_of.as_ref().map(|s| &**s)
}
/// The list of derives that have been specified in this annotation.
pub fn derives(&self) -> &[String] {
&self.derives
}
/// Should we avoid implementing the `Copy` trait?
pub fn disallow_copy(&self) -> bool {
self.disallow_copy
@ -173,9 +165,6 @@ impl Annotations {
attr.value.split("::").map(Into::into).collect(),
)
}
"derive" => {
self.derives.push(attr.value)
}
"private" => {
self.private_fields = Some(attr.value != "false")
}

View File

@ -48,7 +48,7 @@ fn preprocess_single_lines(comment: &str, indent: usize) -> String {
let mut is_first = true;
let lines: Vec<_> = comment
.lines()
.map(|l| l.trim().trim_start_matches('/'))
.map(|l| l.trim().trim_left_matches('/'))
.map(|l| {
let indent = if is_first { "" } else { &*indent };
is_first = false;
@ -60,15 +60,15 @@ fn preprocess_single_lines(comment: &str, indent: usize) -> String {
fn preprocess_multi_line(comment: &str, indent: usize) -> String {
let comment = comment
.trim_start_matches('/')
.trim_end_matches('/')
.trim_end_matches('*');
.trim_left_matches('/')
.trim_right_matches('/')
.trim_right_matches('*');
let indent = make_indent(indent);
// Strip any potential `*` characters preceding each line.
let mut is_first = true;
let mut lines: Vec<_> = comment.lines()
.map(|line| line.trim().trim_start_matches('*').trim_start_matches('!'))
.map(|line| line.trim().trim_left_matches('*').trim_left_matches('!'))
.skip_while(|line| line.trim().is_empty()) // Skip the first empty lines.
.map(|line| {
let indent = if is_first { "" } else { &*indent };

View File

@ -17,7 +17,7 @@ use peeking_take_while::PeekableExt;
use std::cmp;
use std::io;
use std::mem;
use HashMap;
use std::collections::HashMap;
/// The kind of compound type.
#[derive(Debug, Copy, Clone, PartialEq)]
@ -1517,10 +1517,6 @@ impl CompInfo {
}) {
info!("Found a struct that was defined within `#pragma packed(...)`");
return true;
} else if self.has_own_virtual_method {
if parent_layout.align == 1 {
return true;
}
}
}
@ -1653,19 +1649,16 @@ impl IsOpaque for CompInfo {
return true;
}
if !ctx.options().rust_features().repr_packed_n {
// If we don't have `#[repr(packed(N)]`, the best we can
// do is make this struct opaque.
//
// See https://github.com/rust-lang-nursery/rust-bindgen/issues/537 and
// https://github.com/rust-lang/rust/issues/33158
if self.is_packed(ctx, layout) && layout.map_or(false, |l| l.align > 1) {
warn!("Found a type that is both packed and aligned to greater than \
1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \
are treating it as opaque. You may wish to set bindgen's rust target \
version to 1.33 or later to enable `#[repr(packed(N))]` support.");
return true;
}
// We don't have `#[repr(packed = "N")]` in Rust yet, so the best we can
// do is make this struct opaque.
//
// See https://github.com/rust-lang-nursery/rust-bindgen/issues/537 and
// https://github.com/rust-lang/rust/issues/33158
if self.is_packed(ctx, layout) && layout.map_or(false, |l| l.align > 1) {
warn!("Found a type that is both packed and aligned to greater than \
1; Rust doesn't have `#[repr(packed = \"N\")]` yet, so we \
are treating it as opaque");
return true;
}
false

View File

@ -10,7 +10,7 @@ use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault,
CanDeriveHash, CanDerivePartialOrd, CanDeriveOrd,
CanDerivePartialEq, CanDeriveEq, CanDerive};
use super::int::IntKind;
use super::item::{IsOpaque, Item, ItemAncestors, ItemSet};
use super::item::{IsOpaque, Item, ItemAncestors, ItemCanonicalPath, ItemSet};
use super::item_kind::ItemKind;
use super::module::{Module, ModuleKind};
use super::template::{TemplateInstantiation, TemplateParameters};
@ -24,13 +24,13 @@ use cexpr;
use clang::{self, Cursor};
use clang_sys;
use parse::ClangItemParser;
use proc_macro2::{Ident, Span};
use proc_macro2::{Term, Span};
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::{HashMap, HashSet, hash_map};
use std::collections::btree_map::{self, BTreeMap};
use std::iter::IntoIterator;
use std::mem;
use std::collections::HashMap as StdHashMap;
use {HashMap, HashSet, Entry};
/// An identifier for some kind of IR item.
#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)]
@ -301,8 +301,14 @@ enum TypeKey {
/// A context used during parsing and generation of structs.
#[derive(Debug)]
pub struct BindgenContext {
/// The map of all the items parsed so far, keyed off ItemId.
items: Vec<Option<Item>>,
/// The map of all the items parsed so far.
///
/// It's a BTreeMap because we want the keys to be sorted to have consistent
/// output.
items: BTreeMap<ItemId, Item>,
/// The next item id to use during this bindings regeneration.
next_item_id: ItemId,
/// Clang USR to type map. This is needed to be able to associate types with
/// item ids during parsing.
@ -342,12 +348,10 @@ pub struct BindgenContext {
/// potentially break that assumption.
currently_parsed_types: Vec<PartialType>,
/// A map with all the already parsed macro names. This is done to avoid
/// A HashSet with all the already parsed macro names. This is done to avoid
/// hard errors while parsing duplicated macros, as well to allow macro
/// expression parsing.
///
/// This needs to be an std::HashMap because the cexpr API requires it.
parsed_macros: StdHashMap<Vec<u8>, cexpr::expr::EvalResult>,
parsed_macros: HashMap<Vec<u8>, cexpr::expr::EvalResult>,
/// The active replacements collected from replaces="xxx" annotations.
replacements: HashMap<Vec<String>, ItemId>,
@ -590,11 +594,12 @@ If you encounter an error missing from this list, please file an issue or a PR!"
let root_module = Self::build_root_module(ItemId(0));
let root_module_id = root_module.id().as_module_id_unchecked();
BindgenContext {
items: vec![Some(root_module)],
let mut me = BindgenContext {
items: Default::default(),
types: Default::default(),
type_params: Default::default(),
modules: Default::default(),
next_item_id: ItemId(1),
root_module: root_module_id,
current_module: root_module_id,
semantic_parents: Default::default(),
@ -623,7 +628,11 @@ If you encounter an error missing from this list, please file an issue or a PR!"
have_destructor: None,
has_type_param_in_array: None,
has_float: None,
}
};
me.add_item(root_module, None, None);
me
}
/// Creates a timer for the current bindgen phase. If time_phases is `true`,
@ -706,7 +715,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
self.need_bitfield_allocation.push(id);
}
let old_item = mem::replace(&mut self.items[id.0], Some(item));
let old_item = self.items.insert(id, item);
assert!(
old_item.is_none(),
"should not have already associated an item with the given id"
@ -734,7 +743,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
debug!(
"Invalid declaration {:?} found for type {:?}",
declaration,
self.resolve_item_fallible(id).unwrap().kind().expect_type()
self.items.get(&id).unwrap().kind().expect_type()
);
return;
}
@ -763,9 +772,9 @@ If you encounter an error missing from this list, please file an issue or a PR!"
/// details.
fn add_item_to_module(&mut self, item: &Item) {
assert!(item.id() != self.root_module);
assert!(self.resolve_item_fallible(item.id()).is_none());
assert!(!self.items.contains_key(&item.id()));
if let Some(ref mut parent) = self.items[item.parent_id().0] {
if let Some(parent) = self.items.get_mut(&item.parent_id()) {
if let Some(module) = parent.as_module_mut() {
debug!(
"add_item_to_module: adding {:?} as child of parent module {:?}",
@ -784,8 +793,8 @@ If you encounter an error missing from this list, please file an issue or a PR!"
self.current_module
);
self.items[(self.current_module.0).0]
.as_mut()
self.items
.get_mut(&self.current_module.into())
.expect("Should always have an item for self.current_module")
.as_module_mut()
.expect("self.current_module should always be a module")
@ -813,7 +822,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
self.add_item_to_module(&item);
let id = item.id();
let old_item = mem::replace(&mut self.items[id.0], Some(item));
let old_item = self.items.insert(id, item);
assert!(
old_item.is_none(),
"should not have already associated an item with the given id"
@ -913,7 +922,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
}
/// Returns a mangled name as a rust identifier.
pub fn rust_ident<S>(&self, name: S) -> Ident
pub fn rust_ident<S>(&self, name: S) -> Term
where
S: AsRef<str>
{
@ -921,22 +930,16 @@ If you encounter an error missing from this list, please file an issue or a PR!"
}
/// Returns a mangled name as a rust identifier.
pub fn rust_ident_raw<T>(&self, name: T) -> Ident
pub fn rust_ident_raw<T>(&self, name: T) -> Term
where
T: AsRef<str>
{
Ident::new(name.as_ref(), Span::call_site())
Term::new(name.as_ref(), Span::call_site())
}
/// Iterate over all items that have been defined.
pub fn items(&self) -> impl Iterator<Item = (ItemId, &Item)> {
self.items
.iter()
.enumerate()
.filter_map(|(index, item)| {
let item = item.as_ref()?;
Some((ItemId(index), item))
})
pub fn items<'a>(&'a self) -> btree_map::Iter<'a, ItemId, Item> {
self.items.iter()
}
/// Have we collected all unresolved type references yet?
@ -951,8 +954,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
debug_assert!(!self.collected_typerefs);
self.collected_typerefs = true;
let mut typerefs = vec![];
for (id, item) in self.items() {
for (id, ref mut item) in &mut self.items {
let kind = item.kind();
let ty = match kind.as_type() {
Some(ty) => ty,
@ -961,7 +963,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
match *ty.kind() {
TypeKind::UnresolvedTypeRef(ref ty, loc, parent_id) => {
typerefs.push((id, ty.clone(), loc, parent_id));
typerefs.push((*id, ty.clone(), loc, parent_id));
}
_ => {}
};
@ -982,7 +984,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
Item::new_opaque_type(self.next_item_id(), &ty, self)
});
let item = self.items[id.0].as_mut().unwrap();
let item = self.items.get_mut(&id).unwrap();
*item.kind_mut().as_type_mut().unwrap().kind_mut() =
TypeKind::ResolvedTypeRef(resolved);
resolved
@ -1013,11 +1015,11 @@ If you encounter an error missing from this list, please file an issue or a PR!"
where
F: (FnOnce(&BindgenContext, &mut Item) -> T)
{
let mut item = self.items[id.0].take().unwrap();
let mut item = self.items.remove(&id).unwrap();
let result = f(self, &mut item);
let existing = mem::replace(&mut self.items[id.0], Some(item));
let existing = self.items.insert(id, item);
assert!(existing.is_none());
result
@ -1046,13 +1048,15 @@ If you encounter an error missing from this list, please file an issue or a PR!"
fn deanonymize_fields(&mut self) {
let _t = self.timer("deanonymize_fields");
let comp_item_ids: Vec<ItemId> = self.items()
let comp_item_ids: Vec<ItemId> = self.items
.iter()
.filter_map(|(id, item)| {
if item.kind().as_type()?.is_comp() {
return Some(id);
}
None
})
.cloned()
.collect();
for id in comp_item_ids {
@ -1083,7 +1087,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
let mut replacements = vec![];
for (id, item) in self.items() {
for (id, item) in self.items.iter() {
if item.annotations().use_instead_of().is_some() {
continue;
}
@ -1103,14 +1107,14 @@ If you encounter an error missing from this list, please file an issue or a PR!"
_ => continue,
}
let path = item.path_for_whitelisting(self);
let path = item.canonical_path(self);
let replacement = self.replacements.get(&path[1..]);
if let Some(replacement) = replacement {
if *replacement != id {
if replacement != id {
// We set this just after parsing the annotation. It's
// very unlikely, but this can happen.
if self.resolve_item_fallible(*replacement).is_some() {
if self.items.get(replacement).is_some() {
replacements.push((id.expect_type_id(self), replacement.expect_type_id(self)));
}
}
@ -1119,9 +1123,9 @@ If you encounter an error missing from this list, please file an issue or a PR!"
for (id, replacement_id) in replacements {
debug!("Replacing {:?} with {:?}", id, replacement_id);
let new_parent = {
let item_id: ItemId = id.into();
let item = self.items[item_id.0].as_mut().unwrap();
let item = self.items.get_mut(&id.into()).unwrap();
*item.kind_mut().as_type_mut().unwrap().kind_mut() =
TypeKind::ResolvedTypeRef(replacement_id);
item.parent_id()
@ -1139,9 +1143,8 @@ If you encounter an error missing from this list, please file an issue or a PR!"
continue;
}
let replacement_item_id: ItemId = replacement_id.into();
self.items[replacement_item_id.0]
.as_mut()
self.items
.get_mut(&replacement_id.into())
.unwrap()
.set_parent_for_replacement(new_parent);
@ -1177,16 +1180,16 @@ If you encounter an error missing from this list, please file an issue or a PR!"
continue;
}
self.items[old_module.0]
.as_mut()
self.items
.get_mut(&old_module)
.unwrap()
.as_module_mut()
.unwrap()
.children_mut()
.remove(&replacement_id.into());
self.items[new_module.0]
.as_mut()
self.items
.get_mut(&new_module)
.unwrap()
.as_module_mut()
.unwrap()
@ -1254,7 +1257,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
assert!(self.in_codegen_phase());
assert!(self.current_module == self.root_module);
let roots = self.items().map(|(id, _)| id);
let roots = self.items().map(|(&id, _)| id);
traversal::AssertNoDanglingItemsTraversal::new(
self,
roots,
@ -1270,7 +1273,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
assert!(self.in_codegen_phase());
assert!(self.current_module == self.root_module);
for (id, _item) in self.items() {
for (&id, _item) in self.items() {
if id == self.root_module {
continue;
}
@ -1377,7 +1380,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
} else {
// If you aren't recursively whitelisting, then we can't really make
// any sense of template parameter usage, and you're on your own.
let mut used_params = HashMap::default();
let mut used_params = HashMap::new();
for &id in self.whitelisted_items() {
used_params.entry(id).or_insert(
id.self_template_params(self).into_iter().map(|p| p.into()).collect()
@ -1461,7 +1464,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
debug_assert!(item.kind().is_type());
self.add_item_to_module(&item);
let id = item.id();
let old_item = mem::replace(&mut self.items[id.0], Some(item));
let old_item = self.items.insert(id, item);
assert!(old_item.is_none(), "Inserted type twice?");
}
@ -1496,13 +1499,13 @@ If you encounter an error missing from this list, please file an issue or a PR!"
///
/// Panics if the id resolves to an item that is not a type.
pub fn safe_resolve_type(&self, type_id: TypeId) -> Option<&Type> {
self.resolve_item_fallible(type_id).map(|t| t.kind().expect_type())
self.items.get(&type_id.into()).map(|t| t.kind().expect_type())
}
/// Resolve the given `ItemId` into an `Item`, or `None` if no such item
/// exists.
pub fn resolve_item_fallible<Id: Into<ItemId>>(&self, id: Id) -> Option<&Item> {
self.items.get(id.into().0)?.as_ref()
self.items.get(&id.into())
}
/// Resolve the given `ItemId` into an `Item`.
@ -1510,7 +1513,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
/// Panics if the given id does not resolve to any item.
pub fn resolve_item<Id: Into<ItemId>>(&self, item_id: Id) -> &Item {
let item_id = item_id.into();
match self.resolve_item_fallible(item_id) {
match self.items.get(&item_id) {
Some(item) => item,
None => panic!("Not an item: {:?}", item_id),
}
@ -1776,8 +1779,8 @@ If you encounter an error missing from this list, please file an issue or a PR!"
sub_item
);
self.add_item_to_module(&sub_item);
debug_assert_eq!(sub_id, sub_item.id());
self.items[sub_id.0] = Some(sub_item);
debug_assert!(sub_id == sub_item.id());
self.items.insert(sub_id, sub_item);
args.push(sub_id.as_type_id_unchecked());
}
}
@ -1836,8 +1839,8 @@ If you encounter an error missing from this list, please file an issue or a PR!"
// Bypass all the validations in add_item explicitly.
debug!("instantiate_template: inserting item: {:?}", item);
self.add_item_to_module(&item);
debug_assert_eq!(with_id, item.id());
self.items[with_id.0] = Some(item);
debug_assert!(with_id == item.id());
self.items.insert(with_id, item);
Some(with_id.as_type_id_unchecked())
}
@ -1993,8 +1996,8 @@ If you encounter an error missing from this list, please file an issue or a PR!"
/// Returns the next item id to be used for an item.
pub fn next_item_id(&mut self) -> ItemId {
let ret = ItemId(self.items.len());
self.items.push(None);
let ret = self.next_item_id;
self.next_item_id = ItemId(self.next_item_id.0 + 1);
ret
}
@ -2076,7 +2079,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
}
/// Get the currently parsed macros.
pub fn parsed_macros(&self) -> &StdHashMap<Vec<u8>, cexpr::expr::EvalResult> {
pub fn parsed_macros(&self) -> &HashMap<Vec<u8>, cexpr::expr::EvalResult> {
debug_assert!(!self.in_codegen_phase());
&self.parsed_macros
}
@ -2102,7 +2105,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
/// and implies that the original type is hidden.
pub fn replace(&mut self, name: &[String], potential_ty: ItemId) {
match self.replacements.entry(name.into()) {
Entry::Vacant(entry) => {
hash_map::Entry::Vacant(entry) => {
debug!(
"Defining replacement for {:?} as {:?}",
name,
@ -2110,7 +2113,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
);
entry.insert(potential_ty);
}
Entry::Occupied(occupied) => {
hash_map::Entry::Occupied(occupied) => {
warn!(
"Replacement for {:?} already defined as {:?}; \
ignoring duplicate replacement definition as {:?}",
@ -2160,17 +2163,21 @@ If you encounter an error missing from this list, please file an issue or a PR!"
let mut module_name = None;
let spelling = cursor.spelling();
if !spelling.is_empty() {
if !spelling.is_empty()
{
module_name = Some(spelling)
}
let tokens = cursor.tokens();
let tokens = match cursor.tokens() {
Some(tokens) => tokens,
None => return (module_name, ModuleKind::Normal),
};
let mut iter = tokens.iter();
let mut kind = ModuleKind::Normal;
let mut found_namespace_keyword = false;
while let Some(token) = iter.next() {
match token.spelling() {
b"inline" => {
match &*token.spelling {
"inline" => {
assert!(!found_namespace_keyword);
assert!(kind != ModuleKind::Inline);
kind = ModuleKind::Inline;
@ -2185,16 +2192,16 @@ If you encounter an error missing from this list, please file an issue or a PR!"
//
// Fortunately enough, inline nested namespace specifiers aren't
// a thing, and are invalid C++ :)
b"namespace" | b"::" => {
"namespace" | "::" => {
found_namespace_keyword = true;
}
b"{" => {
"{" => {
assert!(found_namespace_keyword);
break;
}
name if found_namespace_keyword => {
if module_name.is_none() {
module_name = Some(String::from_utf8_lossy(name).into_owned());
module_name = Some(name.to_owned());
}
break;
}
@ -2300,7 +2307,7 @@ If you encounter an error missing from this list, please file an issue or a PR!"
return true;
}
let name = item.path_for_whitelisting(self)[1..].join("::");
let name = item.canonical_path(self)[1..].join("::");
debug!("whitelisted_items: testing {:?}", name);
match *item.kind() {
ItemKind::Module(..) => true,
@ -2315,40 +2322,35 @@ If you encounter an error missing from this list, please file an issue or a PR!"
return true;
}
// Unnamed top-level enums are special and we
// whitelist them via the `whitelisted_vars` filter,
// since they're effectively top-level constants,
// and there's no way for them to be referenced
// consistently.
let parent = self.resolve_item(item.parent_id());
if !parent.is_module() {
return false;
if parent.is_module() {
let mut prefix_path = parent.canonical_path(self);
// Unnamed top-level enums are special and we
// whitelist them via the `whitelisted_vars` filter,
// since they're effectively top-level constants,
// and there's no way for them to be referenced
// consistently.
if let TypeKind::Enum(ref enum_) = *ty.kind() {
if ty.name().is_none() &&
enum_.variants().iter().any(|variant| {
prefix_path.push(variant.name().into());
let name = prefix_path[1..].join("::");
prefix_path.pop().unwrap();
self.options()
.whitelisted_vars
.matches(&name)
}) {
return true;
}
}
}
let enum_ = match *ty.kind() {
TypeKind::Enum(ref e) => e,
_ => return false,
};
if ty.name().is_some() {
return false;
}
let mut prefix_path =
parent.path_for_whitelisting(self);
enum_.variants().iter().any(|variant| {
prefix_path.push(variant.name().into());
let name = prefix_path[1..].join("::");
prefix_path.pop().unwrap();
self.options()
.whitelisted_vars
.matches(&name)
})
false
}
}
})
.map(|(id, _)| id)
.map(|(&id, _)| id)
.collect::<Vec<_>>();
// The reversal preserves the expected ordering of traversal,
@ -2387,23 +2389,11 @@ If you encounter an error missing from this list, please file an issue or a PR!"
self.whitelisted = Some(whitelisted);
self.codegen_items = Some(codegen_items);
for item in self.options().whitelisted_functions.unmatched_items() {
error!("unused option: --whitelist-function {}", item);
}
for item in self.options().whitelisted_vars.unmatched_items() {
error!("unused option: --whitelist-var {}", item);
}
for item in self.options().whitelisted_types.unmatched_items() {
error!("unused option: --whitelist-type {}", item);
}
}
/// Convenient method for getting the prefix to use for most traits in
/// codegen depending on the `use_core` option.
pub fn trait_prefix(&self) -> Ident {
pub fn trait_prefix(&self) -> Term {
if self.options().use_core {
self.rust_ident_raw("core")
} else {
@ -2580,19 +2570,19 @@ If you encounter an error missing from this list, please file an issue or a PR!"
/// Check if `--no-partialeq` flag is enabled for this item.
pub fn no_partialeq_by_name(&self, item: &Item) -> bool {
let name = item.path_for_whitelisting(self)[1..].join("::");
let name = item.canonical_path(self)[1..].join("::");
self.options().no_partialeq_types.matches(&name)
}
/// Check if `--no-copy` flag is enabled for this item.
pub fn no_copy_by_name(&self, item: &Item) -> bool {
let name = item.path_for_whitelisting(self)[1..].join("::");
let name = item.canonical_path(self)[1..].join("::");
self.options().no_copy_types.matches(&name)
}
/// Check if `--no-hash` flag is enabled for this item.
pub fn no_hash_by_name(&self, item: &Item) -> bool {
let name = item.path_for_whitelisting(self)[1..].join("::");
let name = item.canonical_path(self)[1..].join("::");
self.options().no_hash_types.matches(&name)
}
}

View File

@ -32,7 +32,7 @@ where
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_whitelisted = ctx.whitelisted_items().contains(&id);
let is_whitelisted = ctx.whitelisted_items().contains(id);
writeln!(
&mut dot_file,

View File

@ -12,8 +12,6 @@ use ir::derive::{CanTriviallyDeriveDebug, CanTriviallyDeriveHash,
CanTriviallyDerivePartialEqOrPartialOrd, CanDerive};
use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
use quote;
use quote::TokenStreamExt;
use proc_macro2;
use std::io;
const RUST_DERIVE_FUNPTR_LIMIT: usize = 12;
@ -194,7 +192,7 @@ impl Abi {
}
impl quote::ToTokens for Abi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
fn to_tokens(&self, tokens: &mut quote::Tokens) {
tokens.append_all(match *self {
Abi::C => quote! { "C" },
Abi::Stdcall => quote! { "stdcall" },
@ -223,9 +221,6 @@ pub struct FunctionSig {
/// Whether this function is variadic.
is_variadic: bool,
/// Whether this function's return value must be used.
must_use: bool,
/// The ABI of this function.
abi: Abi,
}
@ -309,45 +304,18 @@ pub fn cursor_mangling(
Some(mangling)
}
fn args_from_ty_and_cursor(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Vec<(Option<String>, TypeId)> {
match (cursor.args(), ty.args()) {
(Some(cursor_args), Some(ty_args)) => {
ty_args.iter().enumerate().map(|(i, ty)| {
let name = cursor_args.get(i)
.map(|c| c.spelling())
.and_then(|name| if name.is_empty() { None } else { Some(name) });
(name, Item::from_ty_or_ref(*ty, *cursor, None, ctx))
}).collect()
}
(Some(cursor_args), None) => {
cursor_args.iter().map(|cursor| {
let name = cursor.spelling();
let name = if name.is_empty() { None } else { Some(name) };
(name, Item::from_ty_or_ref(cursor.cur_type(), *cursor, None, ctx))
}).collect()
}
_ => panic!()
}
}
impl FunctionSig {
/// Construct a new function signature.
pub fn new(
return_type: TypeId,
argument_types: Vec<(Option<String>, TypeId)>,
arguments: Vec<(Option<String>, TypeId)>,
is_variadic: bool,
must_use: bool,
abi: Abi,
) -> Self {
FunctionSig {
return_type,
argument_types,
is_variadic,
must_use,
return_type: return_type,
argument_types: arguments,
is_variadic: is_variadic,
abi: abi,
}
}
@ -362,8 +330,7 @@ impl FunctionSig {
debug!("FunctionSig::from_ty {:?} {:?}", ty, cursor);
// Skip function templates
let kind = cursor.kind();
if kind == CXCursor_FunctionTemplate {
if cursor.kind() == CXCursor_FunctionTemplate {
return Err(ParseError::Continue);
}
@ -373,29 +340,34 @@ impl FunctionSig {
return Err(ParseError::Continue);
}
// Constructors of non-type template parameter classes for some reason
// include the template parameter in their name. Just skip them, since
// we don't handle well non-type template parameters anyway.
if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) &&
spelling.contains('<')
{
return Err(ParseError::Continue);
}
let cursor = if cursor.is_valid() {
*cursor
} else {
ty.declaration()
};
let mut args = match kind {
let mut args: Vec<_> = match cursor.kind() {
CXCursor_FunctionDecl |
CXCursor_Constructor |
CXCursor_CXXMethod |
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
args_from_ty_and_cursor(&ty, &cursor, ctx)
},
// For CXCursor_FunctionDecl, cursor.args() is the reliable way
// to get parameter names and types.
cursor
.args()
.unwrap()
.iter()
.map(|arg| {
let arg_ty = arg.cur_type();
let name = arg.spelling();
let name =
if name.is_empty() { None } else { Some(name) };
let ty = Item::from_ty_or_ref(arg_ty, *arg, None, ctx);
(name, ty)
})
.collect()
}
_ => {
// For non-CXCursor_FunctionDecl, visiting the cursor's children
// is the only reliable way to get parameter names.
@ -415,12 +387,9 @@ impl FunctionSig {
}
};
let must_use =
ctx.options().enable_function_attribute_detection &&
cursor.has_simple_attr("warn_unused_result");
let is_method = kind == CXCursor_CXXMethod;
let is_constructor = kind == CXCursor_Constructor;
let is_destructor = kind == CXCursor_Destructor;
let is_method = cursor.kind() == CXCursor_CXXMethod;
let is_constructor = cursor.kind() == CXCursor_Constructor;
let is_destructor = cursor.kind() == CXCursor_Destructor;
if (is_constructor || is_destructor || is_method) &&
cursor.lexical_parent() != cursor.semantic_parent()
{
@ -463,8 +432,8 @@ impl FunctionSig {
}
}
let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl ||
kind == CXCursor_ObjCClassMethodDecl
let ty_ret_type = if cursor.kind() == CXCursor_ObjCInstanceMethodDecl ||
cursor.kind() == CXCursor_ObjCClassMethodDecl
{
ty.ret_type().or_else(|| cursor.ret_type()).ok_or(
ParseError::Continue,
@ -489,7 +458,7 @@ impl FunctionSig {
warn!("Unknown calling convention: {:?}", call_conv);
}
Ok(Self::new(ret.into(), args, ty.is_variadic(), must_use, abi))
Ok(Self::new(ret.into(), args, ty.is_variadic(), abi))
}
/// Get this function signature's return type.
@ -515,11 +484,6 @@ impl FunctionSig {
self.is_variadic && !self.argument_types.is_empty()
}
/// Must this function's return value be used?
pub fn must_use(&self) -> bool {
self.must_use
}
/// Are function pointers with this signature able to derive Rust traits?
/// Rust only supports deriving traits for function pointers with a limited
/// number of parameters and a couple ABIs.

View File

@ -635,7 +635,7 @@ impl Item {
return true;
}
let path = self.path_for_whitelisting(ctx);
let path = self.canonical_path(ctx);
let name = path[1..].join("::");
ctx.options().blacklisted_items.matches(&name) ||
match self.kind {
@ -875,13 +875,10 @@ impl Item {
let name = names.join("_");
let name = if opt.user_mangled == UserMangled::Yes {
ctx.parse_callbacks()
.and_then(|callbacks| callbacks.item_name(&name))
.unwrap_or(name)
} else {
name
};
let name = ctx
.parse_callbacks()
.and_then(|callbacks| callbacks.item_name(&name))
.unwrap_or(name);
ctx.rust_mangle(&name).into_owned()
}
@ -975,44 +972,6 @@ impl Item {
}
}
}
/// Returns the path we should use for whitelisting / blacklisting, which
/// doesn't include user-mangling.
pub fn path_for_whitelisting(&self, ctx: &BindgenContext) -> Vec<String> {
self.compute_path(ctx, UserMangled::No)
}
fn compute_path(&self, ctx: &BindgenContext, mangled: UserMangled) -> Vec<String> {
if let Some(path) = self.annotations().use_instead_of() {
let mut ret =
vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()];
ret.extend_from_slice(path);
return ret;
}
let target = ctx.resolve_item(self.name_target(ctx));
let mut path: Vec<_> = target
.ancestors(ctx)
.chain(iter::once(ctx.root_module().into()))
.map(|id| ctx.resolve_item(id))
.filter(|item| {
item.id() == target.id() ||
item.as_module().map_or(false, |module| {
!module.is_inline() ||
ctx.options().conservative_inline_namespaces
})
})
.map(|item| {
ctx.resolve_item(item.name_target(ctx))
.name(ctx)
.within_namespaces()
.user_mangled(mangled)
.get()
})
.collect();
path.reverse();
path
}
}
impl<T> IsOpaque for T
@ -1040,7 +999,7 @@ impl IsOpaque for Item {
);
self.annotations.opaque() ||
self.as_type().map_or(false, |ty| ty.is_opaque(ctx, self)) ||
ctx.opaque_by_name(&self.path_for_whitelisting(ctx))
ctx.opaque_by_name(&self.canonical_path(ctx))
}
}
@ -1449,7 +1408,6 @@ impl ClangItemParser for Item {
let is_const = ty.is_const();
let kind = TypeKind::UnresolvedTypeRef(ty, location, parent_id);
let current_module = ctx.current_module();
ctx.add_item(
Item::new(
potential_id,
@ -1863,19 +1821,35 @@ impl ItemCanonicalPath for Item {
}
fn canonical_path(&self, ctx: &BindgenContext) -> Vec<String> {
self.compute_path(ctx, UserMangled::Yes)
}
}
if let Some(path) = self.annotations().use_instead_of() {
let mut ret =
vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()];
ret.extend_from_slice(path);
return ret;
}
/// Whether to use the user-mangled name (mangled by the `item_name` callback or
/// not.
///
/// Most of the callers probably want just yes, but the ones dealing with
/// whitelisting and blacklisting don't.
#[derive(Copy, Clone, Debug, PartialEq)]
enum UserMangled {
No,
Yes,
let target = ctx.resolve_item(self.name_target(ctx));
let mut path: Vec<_> = target
.ancestors(ctx)
.chain(iter::once(ctx.root_module().into()))
.map(|id| ctx.resolve_item(id))
.filter(|item| {
item.id() == target.id() ||
item.as_module().map_or(false, |module| {
!module.is_inline() ||
ctx.options().conservative_inline_namespaces
})
})
.map(|item| {
ctx.resolve_item(item.name_target(ctx))
.name(ctx)
.within_namespaces()
.get()
})
.collect();
path.reverse();
path
}
}
/// Builder struct for naming variations, which hold inside different
@ -1885,7 +1859,6 @@ pub struct NameOptions<'a> {
item: &'a Item,
ctx: &'a BindgenContext,
within_namespaces: bool,
user_mangled: UserMangled,
}
impl<'a> NameOptions<'a> {
@ -1895,7 +1868,6 @@ impl<'a> NameOptions<'a> {
item: item,
ctx: ctx,
within_namespaces: false,
user_mangled: UserMangled::Yes,
}
}
@ -1906,11 +1878,6 @@ impl<'a> NameOptions<'a> {
self
}
fn user_mangled(&mut self, user_mangled: UserMangled) -> &mut Self {
self.user_mangled = user_mangled;
self
}
/// Construct a name `String`
pub fn get(&self) -> String {
self.item.real_canonical_name(self.ctx, self)

View File

@ -12,7 +12,8 @@ use clang_sys::CXCursor_ObjCClassRef;
use clang_sys::CXCursor_ObjCInstanceMethodDecl;
use clang_sys::CXCursor_ObjCProtocolDecl;
use clang_sys::CXCursor_ObjCProtocolRef;
use proc_macro2::{TokenStream, Ident, Span};
use quote;
use proc_macro2::{Term, Span};
/// Objective C interface as used in TypeKind
///
@ -131,9 +132,10 @@ impl ObjCInterface {
if protocol.is_protocol
{
debug!("Checking protocol {}, ty.name {:?}", protocol.name, ty.name());
if Some(needle.as_ref()) == ty.name() {
if Some(needle.as_ref()) == ty.name()
{
debug!("Found conforming protocol {:?}", item);
interface.conforms_to.push(id);
interface.conforms_to.push(*id);
break;
}
}
@ -211,11 +213,11 @@ impl ObjCMethod {
}
/// Formats the method call
pub fn format_method_call(&self, args: &[TokenStream]) -> TokenStream {
pub fn format_method_call(&self, args: &[quote::Tokens]) -> quote::Tokens {
let split_name: Vec<_> = self.name
.split(':')
.filter(|p| !p.is_empty())
.map(|name| Ident::new(name, Span::call_site()))
.map(|name| Term::new(name, Span::call_site()))
.collect();
// No arguments
@ -241,7 +243,7 @@ impl ObjCMethod {
let arg = arg.to_string();
let name_and_sig: Vec<&str> = arg.split(' ').collect();
let name = name_and_sig[0];
args_without_types.push(Ident::new(name, Span::call_site()))
args_without_types.push(Term::new(name, Span::call_site()))
};
let args = split_name

View File

@ -199,9 +199,6 @@ impl ClangSubItemParser for Var {
true,
ctx,
);
if let Some(callbacks) = ctx.parse_callbacks() {
callbacks.str_macro(&name, &val);
}
(TypeKind::Pointer(char_ty), VarType::String(val))
}
EvalResult::Int(Wrapping(value)) => {
@ -309,7 +306,7 @@ fn parse_macro(
) -> Option<(Vec<u8>, cexpr::expr::EvalResult)> {
use cexpr::expr;
let mut cexpr_tokens = cursor.cexpr_tokens();
let mut cexpr_tokens = cursor.cexpr_tokens()?;
let parser = expr::IdentifierParser::new(ctx.parsed_macros());
@ -338,7 +335,7 @@ fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option<i64> {
use cexpr::expr;
use cexpr::expr::EvalResult;
let cexpr_tokens = cursor.cexpr_tokens();
let cexpr_tokens = cursor.cexpr_tokens()?;
// TODO(emilio): We can try to parse other kinds of literals.
match expr::expr(&cexpr_tokens) {

View File

@ -5,7 +5,7 @@
//!
//! See the [`Builder`](./struct.Builder.html) struct for usage.
//!
//! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for
//! See the [Users Guide](https://rust-lang-nursery.github.io/rust-bindgen/) for
//! additional documentation.
#![deny(missing_docs)]
#![deny(warnings)]
@ -23,7 +23,6 @@ extern crate cexpr;
#[allow(unused_extern_crates)]
extern crate cfg_if;
extern crate clang_sys;
extern crate hashbrown;
#[macro_use]
extern crate lazy_static;
extern crate peeking_take_while;
@ -89,6 +88,7 @@ use regex_set::RegexSet;
pub use codegen::EnumVariation;
use std::borrow::Cow;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{self, Write};
use std::iter;
@ -96,11 +96,6 @@ use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Arc;
// Some convenient typedefs for a fast hash map and hash set.
type HashMap<K, V> = ::hashbrown::HashMap<K, V>;
type HashSet<K> = ::hashbrown::HashSet<K>;
pub(crate) use ::hashbrown::hash_map::Entry;
fn args_are_cpp(clang_args: &[String]) -> bool {
return clang_args
.windows(2)
@ -238,7 +233,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--bitfield-enum".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -248,7 +247,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--rustified-enum".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -258,7 +261,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--constified-enum-module".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -268,7 +275,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--constified-enum".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -278,7 +289,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--blacklist-type".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -288,7 +303,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--blacklist-function".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -298,7 +317,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--blacklist-item".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -395,9 +418,6 @@ impl Builder {
if self.options.enable_cxx_namespaces {
output_vector.push("--enable-cxx-namespaces".into());
}
if self.options.enable_function_attribute_detection {
output_vector.push("--enable-function-attribute-detection".into());
}
if self.options.disable_name_namespacing {
output_vector.push("--disable-name-namespacing".into());
}
@ -449,7 +469,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--opaque-type".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -458,7 +482,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--raw-line".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -476,7 +504,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--whitelist-function".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -486,7 +518,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--whitelist-type".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -496,7 +532,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--whitelist-var".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -514,10 +554,6 @@ impl Builder {
);
}
if !self.options.record_matches {
output_vector.push("--no-record-matches".into());
}
if !self.options.rustfmt_bindings {
output_vector.push("--no-rustfmt-bindings".into());
}
@ -537,7 +573,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--no-partialeq".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -547,7 +587,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--no-copy".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -557,7 +601,11 @@ impl Builder {
.iter()
.map(|item| {
output_vector.push("--no-hash".into());
output_vector.push(item.to_owned());
output_vector.push(
item.trim_left_matches("^")
.trim_right_matches("$")
.into(),
);
})
.count();
@ -1009,18 +1057,6 @@ impl Builder {
self
}
/// Enable detecting must_use attributes on C functions.
///
/// This is quite slow in some cases (see #1465), so it's disabled by
/// default.
///
/// Note that for this to do something meaningful for now at least, the rust
/// target version has to have support for `#[must_use]`.
pub fn enable_function_attribute_detection(mut self) -> Self {
self.options.enable_function_attribute_detection = true;
self
}
/// Disable name auto-namespacing.
///
/// By default, bindgen mangles names like `foo::bar::Baz` to look like
@ -1146,12 +1182,6 @@ impl Builder {
self
}
/// Set whether we should record matched items in our regex sets.
pub fn record_matches(mut self, doit: bool) -> Self {
self.options.record_matches = doit;
self
}
/// Set the absolute path to the rustfmt configuration file, if None, the standard rustfmt
/// options are used.
pub fn rustfmt_configuration_file(mut self, path: Option<PathBuf>) -> Self {
@ -1361,10 +1391,6 @@ struct BindgenOptions {
/// generated bindings.
enable_cxx_namespaces: bool,
/// True if we should try to find unexposed attributes in functions, in
/// order to be able to generate #[must_use] attributes in Rust.
enable_function_attribute_detection: bool,
/// True if we should avoid mangling names with namespaces.
disable_name_namespacing: bool,
@ -1501,12 +1527,6 @@ struct BindgenOptions {
/// Features to enable, derived from `rust_target`
rust_features: RustFeatures,
/// Whether we should record which items in the regex sets ever matched.
///
/// This may be a bit slower, but will enable reporting of unused whitelist
/// items via the `error!` log.
record_matches: bool,
/// Whether rustfmt should format the generated bindings.
rustfmt_bindings: bool,
@ -1532,26 +1552,20 @@ impl ::std::panic::UnwindSafe for BindgenOptions {}
impl BindgenOptions {
fn build(&mut self) {
let mut regex_sets = [
&mut self.whitelisted_vars,
&mut self.whitelisted_types,
&mut self.whitelisted_functions,
&mut self.blacklisted_types,
&mut self.blacklisted_functions,
&mut self.blacklisted_items,
&mut self.opaque_types,
&mut self.bitfield_enums,
&mut self.constified_enums,
&mut self.constified_enum_modules,
&mut self.rustified_enums,
&mut self.no_partialeq_types,
&mut self.no_copy_types,
&mut self.no_hash_types,
];
let record_matches = self.record_matches;
for regex_set in &mut regex_sets {
regex_set.build(record_matches);
}
self.whitelisted_vars.build();
self.whitelisted_types.build();
self.whitelisted_functions.build();
self.blacklisted_types.build();
self.blacklisted_functions.build();
self.blacklisted_items.build();
self.opaque_types.build();
self.bitfield_enums.build();
self.constified_enums.build();
self.constified_enum_modules.build();
self.rustified_enums.build();
self.no_partialeq_types.build();
self.no_copy_types.build();
self.no_hash_types.build();
}
/// Update rust target version
@ -1604,7 +1618,6 @@ impl Default for BindgenOptions {
derive_partialeq: false,
derive_eq: false,
enable_cxx_namespaces: false,
enable_function_attribute_detection: false,
disable_name_namespacing: false,
use_core: false,
ctypes_prefix: None,
@ -1628,7 +1641,6 @@ impl Default for BindgenOptions {
enable_mangling: true,
prepend_enum_name: true,
time_phases: false,
record_matches: true,
rustfmt_bindings: true,
rustfmt_configuration_file: None,
no_partialeq_types: Default::default(),
@ -1663,7 +1675,7 @@ fn ensure_libclang_is_loaded() {
#[derive(Debug)]
pub struct Bindings {
options: BindgenOptions,
module: proc_macro2::TokenStream,
module: quote::Tokens,
}
impl Bindings {
@ -1673,8 +1685,6 @@ impl Bindings {
) -> Result<Bindings, ()> {
ensure_libclang_is_loaded();
debug!("Generating bindings, libclang at {}", clang_sys::get_library().unwrap().path().display());
options.build();
// Filter out include paths and similar stuff, so we don't incorrectly
@ -1704,15 +1714,11 @@ impl Bindings {
}).cloned().collect::<Vec<_>>()
};
debug!("Trying to find clang with flags: {:?}", clang_args_for_clang_sys);
// TODO: Make this path fixup configurable?
if let Some(clang) = clang_sys::support::Clang::find(
None,
&clang_args_for_clang_sys,
) {
debug!("Found clang: {:?}", clang);
// If --target is specified, assume caller knows what they're doing
// and don't mess with include paths for them
let has_target_arg = options
@ -1772,8 +1778,6 @@ impl Bindings {
options.clang_args.push(f.name.to_str().unwrap().to_owned())
}
debug!("Fixed-up options: {:?}", options);
let time_phases = options.time_phases;
let mut context = BindgenContext::new(options);
@ -1835,7 +1839,7 @@ impl Bindings {
writer.write(rustfmt_bindings.as_bytes())?;
},
Err(err) => {
eprintln!("Failed to run rustfmt: {} (non-fatal, continuing)", err);
eprintln!("{:?}", err);
writer.write(bindings.as_bytes())?;
},
}
@ -1859,7 +1863,7 @@ impl Bindings {
None => {
let path = which::which("rustfmt")
.map_err(|e| {
io::Error::new(io::ErrorKind::Other, format!("{}", e))
io::Error::new(io::ErrorKind::Other, e.to_owned())
})?;
Cow::Owned(path)

View File

@ -277,10 +277,6 @@ where
Useful when debugging bindgen, using C-Reduce, or when \
filing issues. The resulting file will be named \
something like `__bindgen.i` or `__bindgen.ii`."),
Arg::with_name("no-record-matches")
.long("no-record-matches")
.help("Do not record matching items in the regex sets. \
This disables reporting of unused items."),
Arg::with_name("no-rustfmt-bindings")
.long("no-rustfmt-bindings")
.help("Do not format the generated bindings with rustfmt."),
@ -319,10 +315,6 @@ where
.takes_value(true)
.multiple(true)
.number_of_values(1),
Arg::with_name("enable-function-attribute-detection")
.long("enable-function-attribute-detection")
.help("Enables detecting unexposed attributes in functions (slow).
Used to generate #[must_use] annotations."),
]) // .args()
.get_matches_from(args);
@ -492,10 +484,6 @@ where
builder = builder.enable_cxx_namespaces();
}
if matches.is_present("enable-function-attribute-detection") {
builder = builder.enable_function_attribute_detection();
}
if matches.is_present("disable-name-namespacing") {
builder = builder.disable_name_namespacing();
}
@ -595,10 +583,6 @@ where
builder.dump_preprocessed_input()?;
}
if matches.is_present("no-record-matches") {
builder = builder.record_matches(false);
}
let no_rustfmt_bindings = matches.is_present("no-rustfmt-bindings");
if no_rustfmt_bindings {
builder = builder.rustfmt_bindings(false);

View File

@ -1,18 +1,12 @@
//! A type that represents the union of a set of regular expressions.
use regex::RegexSet as RxSet;
use std::cell::Cell;
/// A dynamic set of regular expressions.
#[derive(Debug, Default)]
#[derive(Debug)]
pub struct RegexSet {
items: Vec<String>,
/// Whether any of the items in the set was ever matched. The length of this
/// vector is exactly the length of `items`.
matched: Vec<Cell<bool>>,
set: Option<RxSet>,
/// Whether we should record matching items in the `matched` vector or not.
record_matches: bool,
}
impl RegexSet {
@ -26,8 +20,7 @@ impl RegexSet {
where
S: AsRef<str>,
{
self.items.push(string.as_ref().to_owned());
self.matched.push(Cell::new(false));
self.items.push(format!("^{}$", string.as_ref()));
self.set = None;
}
@ -36,26 +29,12 @@ impl RegexSet {
&self.items[..]
}
/// Returns an iterator over regexes in the set which didn't match any
/// strings yet.
pub fn unmatched_items(&self) -> impl Iterator<Item = &String> {
self.items.iter().enumerate().filter_map(move |(i, item)| {
if !self.record_matches || self.matched[i].get() {
return None;
}
Some(item)
})
}
/// Construct a RegexSet from the set of entries we've accumulated.
///
/// Must be called before calling `matches()`, or it will always return
/// false.
pub fn build(&mut self, record_matches: bool) {
let items = self.items.iter().map(|item| format!("^{}$", item));
self.record_matches = record_matches;
self.set = match RxSet::new(items) {
pub fn build(&mut self) {
self.set = match RxSet::new(&self.items) {
Ok(x) => Some(x),
Err(e) => {
error!("Invalid regex in {:?}: {:?}", self.items, e);
@ -70,23 +49,17 @@ impl RegexSet {
S: AsRef<str>,
{
let s = string.as_ref();
let set = match self.set {
Some(ref set) => set,
None => return false,
};
if !self.record_matches {
return set.is_match(s);
}
let matches = set.matches(s);
if !matches.matched_any() {
return false;
}
for i in matches.iter() {
self.matched[i].set(true);
}
true
self.set.as_ref().map(|set| set.is_match(s)).unwrap_or(
false,
)
}
}
impl Default for RegexSet {
fn default() -> Self {
RegexSet {
items: vec![],
set: None,
}
}
}

View File

@ -1 +1 @@
{"files":{"CHANGELOG.md":"e6c5ac39705b3eda6359b294d138e316156b6b8e0212077557930c47073d47e2","Cargo.toml":"ba1d946f62796dd8723dcefccfef073d3cb50fc2313c89e3de29d936480aab02","LICENSE.txt":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","README.md":"21ff1488c29d612cee0d10fc48dab7efbd0a8a24158ee709b88e312e939008a0","appveyor.yml":"1a5d6953fb6e373dc760d50659628f04d48f68bd3f3f8e434800b31e74ef1497","build.rs":"06ef3732108d09118f50294fbca2b90857085fc3364777fca77eb022866399ac","build/common.rs":"4532706a124c0ff40332d5a72cd1e465bee72cd118f8071fbb6e70dde00c68f8","build/dynamic.rs":"0a498915b6a171a8d42072989a6155690a1286ff8783d58bb28346f735e2aea5","build/static.rs":"ff8de756b33efff75770a5552ff4573fe1bbb68ec8bd40d57854a05adb7e9d5c","ci/before_install.sh":"cb6de6f230066004f568d0b5e9d940b3793ff8ee7eb5d35a4f9ec777ee369725","ci/install.bat":"342ce7596152b00c8217364e9475534f6c50a4e597a31fee03205eaf2a1603f0","ci/script.sh":"52db533df970f1b44c0b2663f3bfac4476f2150e94fc392b2bab4145325f418b","ci/test_script.bat":"901609adc59dab2730e16dd374d0351d6406e7559fe4d86ddd9a857ad9c84d2a","clippy.toml":"fcf54943ba571514b244cc098ce08671b4117167733e8107e799d533a12a2195","src/lib.rs":"d9952a832909e5490fca27b08d349a0d36c4004cd04fdb024ddf246cb81503d1","src/link.rs":"c4bc39dad6476d36737aece37979c50e20f6c4724cab5d97f5d1f7374a922967","src/support.rs":"3eae21722287a462921825929c48802642d28ca194d1bc43aee47739350ecd17","tests/header.h":"1b15a686d1c06561960045a26c25a34d840f26c8246f2f5e630f993b69c7492c","tests/lib.rs":"d5d39e3ffbdc7303c2f1b9ae09f60ebf546b7c2c3599ec5d0c99d23332456908"},"package":"6ef0c1bcf2e99c649104bd7a7012d8f8802684400e03db0ec0af48583c6fa0e4"}
{"files":{"CHANGELOG.md":"ed5eb852120b184fda36d06a920080e5bd377b018548f2d37eb575dee32a20b1","Cargo.toml":"ae115d5ba8ac3e9074f6f8e64a89ec88579b388db75bbbd655d010610661b19a","LICENSE.txt":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","README.md":"21ff1488c29d612cee0d10fc48dab7efbd0a8a24158ee709b88e312e939008a0","appveyor.yml":"1a5d6953fb6e373dc760d50659628f04d48f68bd3f3f8e434800b31e74ef1497","build.rs":"06ef3732108d09118f50294fbca2b90857085fc3364777fca77eb022866399ac","build/common.rs":"4532706a124c0ff40332d5a72cd1e465bee72cd118f8071fbb6e70dde00c68f8","build/dynamic.rs":"544e5cb9fe364165a641192fc3f1ab916cb1d632a92536b8ce77a941fbb3082b","build/static.rs":"ff8de756b33efff75770a5552ff4573fe1bbb68ec8bd40d57854a05adb7e9d5c","ci/before_install.sh":"efb85403bedbfc6db19a8c41c61be98eac9f6e09ac6a33c0bdaf2828b5ea73ba","ci/install.bat":"bb02414d81dd23c8597f82f390769b084c2d32129ed197475a769a25ee97249a","ci/script.sh":"52db533df970f1b44c0b2663f3bfac4476f2150e94fc392b2bab4145325f418b","ci/test_script.bat":"901609adc59dab2730e16dd374d0351d6406e7559fe4d86ddd9a857ad9c84d2a","clippy.toml":"fcf54943ba571514b244cc098ce08671b4117167733e8107e799d533a12a2195","src/lib.rs":"d9952a832909e5490fca27b08d349a0d36c4004cd04fdb024ddf246cb81503d1","src/link.rs":"625ac2a7c5d0b85122e4a68542f235f09a069e7743f67f94c433f0f4313c09db","src/support.rs":"3eae21722287a462921825929c48802642d28ca194d1bc43aee47739350ecd17","tests/header.h":"1b15a686d1c06561960045a26c25a34d840f26c8246f2f5e630f993b69c7492c","tests/lib.rs":"d5d39e3ffbdc7303c2f1b9ae09f60ebf546b7c2c3599ec5d0c99d23332456908"},"package":"481e42017c1416b1c0856ece45658ecbb7c93d8a93455f7e5fa77f3b35455557"}

View File

@ -1,18 +1,3 @@
## [0.26.4] - 2018-12-29
### Changed
- Added shared library path to `SharedLibrary` struct
## [0.26.3] - 2018-11-14
### Changed
- Disable default features of `libc` dependency
## [0.26.2] - 2018-11-03
### Fixed
- Fixed dynamic linking on macOS
## [0.26.1] - 2018-10-10
### Fixed

View File

@ -12,7 +12,7 @@
[package]
name = "clang-sys"
version = "0.26.4"
version = "0.26.1"
authors = ["Kyle Mayes <kyle@mayeses.com>"]
build = "build.rs"
links = "clang"
@ -26,7 +26,6 @@ version = "0.2.11"
[dependencies.libc]
version = "0.2.39"
default-features = false
[dependencies.libloading]
version = "0.5.0"

View File

@ -200,12 +200,12 @@ pub fn link() {
println!("cargo:rustc-link-lib=dylib=libclang");
} else {
let name = filename.trim_left_matches("lib");
let name = filename.replace("lib", "");
// Strip extensions and trailing version numbers (e.g., the `.so.7.0` in `libclang.so.7.0`).
let name = match name.find(".dylib").or(name.find(".so")) {
Some(index) => &name[0..index],
// Strip trailing version numbers (e.g., the `.7.0` in `libclang.so.7.0`).
let name = match name.find(".so") {
None => &name,
Some(index) => &name[0..index],
};
println!("cargo:rustc-link-lib=dylib={}", name);

View File

@ -6,6 +6,14 @@ if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
rvm get head || true
fi
function llvm_linux_target_triple() {
if [ "$1" == "5.0" ]; then
echo "linux-x86_64-ubuntu14.04"
else
echo "x86_64-linux-gnu-ubuntu-14.04"
fi
}
function llvm_version_triple() {
if [ "$1" == "3.5" ]; then
echo "3.5.2"
@ -20,11 +28,7 @@ function llvm_version_triple() {
elif [ "$1" == "4.0" ]; then
echo "4.0.1"
elif [ "$1" == "5.0" ]; then
echo "5.0.2"
elif [ "$1" == "6.0" ]; then
echo "6.0.1"
elif [ "$1" == "7.0" ]; then
echo "7.0.0"
echo "5.0.0"
fi
}
@ -45,7 +49,7 @@ function llvm_download() {
}
if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
llvm_download x86_64-linux-gnu-ubuntu-14.04
llvm_download `llvm_linux_target_triple ${LLVM_VERSION}`
export LD_LIBRARY_PATH="${LLVM_DIRECTORY}/lib":$LD_LIBRARY_PATH
else
llvm_download x86_64-apple-darwin

View File

@ -1,4 +1,4 @@
curl -sSf https://static.rust-lang.org/dist/rust-1.30.0-i686-pc-windows-msvc.exe -o rust.exe
curl -sSf https://static.rust-lang.org/dist/rust-1.24.0-i686-pc-windows-msvc.exe -o rust.exe
rust.exe /VERYSILENT /NORESTART /DIR="C:\Rust"
set PATH=%PATH%;C:\Rust\bin

View File

@ -41,7 +41,6 @@ macro_rules! link {
($($(#[cfg($cfg:meta)])* pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;)+) => (
use std::cell::{RefCell};
use std::sync::{Arc};
use std::path::{Path, PathBuf};
/// The set of functions loaded dynamically.
#[derive(Debug, Default)]
@ -53,19 +52,14 @@ macro_rules! link {
#[derive(Debug)]
pub struct SharedLibrary {
library: libloading::Library,
path: PathBuf,
pub functions: Functions,
}
impl SharedLibrary {
//- Constructors -----------------------------
fn new(library: libloading::Library, path: PathBuf) -> SharedLibrary {
SharedLibrary { library, path, functions: Functions::default() }
}
pub fn path(&self) -> &Path {
&self.path
fn new(library: libloading::Library) -> SharedLibrary {
SharedLibrary { library: library, functions: Functions::default() }
}
}
@ -136,7 +130,7 @@ macro_rules! link {
)
});
let mut library = SharedLibrary::new(try!(library), path);
let mut library = SharedLibrary::new(try!(library));
$(load::$name(&mut library);)+
Ok(library)
}

View File

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"3a67f210ecaa8765910557cdb10dec83a0005119fc6f6a7ef3a7e1a642cfba42","Cargo.toml":"a4de88ffca37c01a019dfe5caff8654542d457b5354399f980a9aaf1769518c1","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"0195956b2c2433a702a16e9334ec7b2152c89df24d1f65daa6595aa9153ccdde","benches/bench.rs":"7c54f2ea943daa0699df29f3bd5a69b63dcf361f487c77909847b547e3ce709a","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","src/external_trait_impls/mod.rs":"d69528827794524cfd9acbeacc1ac4f6131e3c7574311e6d919f818f65fbff07","src/external_trait_impls/rayon/helpers.rs":"d4fbca4db924925548f8dab8eb94cf4a3955a53c5e1ff15f59c460546c394034","src/external_trait_impls/rayon/map.rs":"f49e7c1348d4d4a8a2b88f75619cb817bca1f7ededb63202aa2176a8865cb583","src/external_trait_impls/rayon/mod.rs":"b48139960a89ee84ed3b5e10ee5abb0259b40f2bb0ef867d0dd65784252a47c0","src/external_trait_impls/rayon/raw.rs":"bfa25de5e4184f2ab23493e72285187395bb1d81a1d0a55c03fc9fc542d91e27","src/external_trait_impls/rayon/set.rs":"cc70fb41704861d0ec58e3118305a9b80cedbdce4eeca7c96d382cb0f956cea8","src/external_trait_impls/serde.rs":"3611fd340cc18b57510823d5bba0222e98d43eee075e5157fe37f415ece909e3","src/fx.rs":"5f375fff21c94f3a7ed81bb78981d69354168e7808775d6beda20f6596e5998a","src/lib.rs":"8868a2701988f2fef82f7cd3c22f91e3717cd1cda7ff77959869c012cdbd50c4","src/map.rs":"973f217ada3698227eedca27c96955e5d540beb011281811350de7458e114857","src/raw/bitmask.rs":"67927708ecfd9f290cc1160c84d8227bdcaabd9abde70a1e5dcfe7c203545f6f","src/raw/generic.rs":"7ac56ebd531cf49df187cf6ab60e15e65bdd81be97fbea790660c1e4966ed8f3","src/raw/mod.rs":"b675b5b001ed4e922a06d1a00228b0f906e534db3f3af547866bc1a2d151ea75","src/raw/sse2.rs":"1f86578c912fd12088f5aa129ce1f73b29c4b5504b14375828789bc6fb90e8cd","src/set.rs":"8139210e9ebeaaffaf080f92b51360b7dbac02c8b250ae84f5be7dd9b9018b0e","tests/rayon.rs":"d6b5c538a7d8fac9c8dc66021a1aa7cbc67547d22d2f1aa4d8dfc34fd0b9e3f3","tests/serde.rs":"9af7e218f9a13479e60fd6b6c889da6e2d95c430e409179ea398886458a764f8","tests/set.rs":"ae3a9efe51339372d6420f9cd681141ce5c24065b3d63677b4e2ce561d3dd8c1"},"package":"3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da"}

View File

@ -1,74 +0,0 @@
# Change Log
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
## [v0.1.8] - 2019-01-14
### Added
- Rayon parallel iterator support (#37)
- `raw_entry` support (#31)
- `#[may_dangle]` on nightly (#31)
- `try_reserve` support (#31)
### Fixed
- Fixed variance on `IterMut`. (#31)
## [v0.1.7] - 2018-12-05
### Fixed
- Fixed non-SSE version of convert_special_to_empty_and_full_to_deleted. (#32)
- Fixed overflow in rehash_in_place. (#33)
## [v0.1.6] - 2018-11-17
### Fixed
- Fixed compile error on nightly. (#29)
## [v0.1.5] - 2018-11-08
### Fixed
- Fixed subtraction overflow in generic::Group::match_byte. (#28)
## [v0.1.4] - 2018-11-04
### Fixed
- Fixed a bug in the `erase_no_drop` implementation. (#26)
## [v0.1.3] - 2018-11-01
### Added
- Serde support. (#14)
### Fixed
- Make the compiler inline functions more aggressively. (#20)
## [v0.1.2] - 2018-10-31
### Fixed
- `clear` segfaults when called on an empty table. (#13)
## [v0.1.1] - 2018-10-30
### Fixed
- `erase_no_drop` optimization not triggering in the SSE2 implementation. (#3)
- Missing `Send` and `Sync` for hash map and iterator types. (#7)
- Bug when inserting into a table smaller than the group width. (#5)
## v0.1.0 - 2018-10-29
- Initial release
[Unreleased]: https://github.com/Amanieu/hashbrown/compare/v0.1.8...HEAD
[v0.1.8]: https://github.com/Amanieu/hashbrown/compare/v0.1.7...v0.1.8
[v0.1.7]: https://github.com/Amanieu/hashbrown/compare/v0.1.6...v0.1.7
[v0.1.6]: https://github.com/Amanieu/hashbrown/compare/v0.1.5...v0.1.6
[v0.1.5]: https://github.com/Amanieu/hashbrown/compare/v0.1.4...v0.1.5
[v0.1.4]: https://github.com/Amanieu/hashbrown/compare/v0.1.3...v0.1.4
[v0.1.3]: https://github.com/Amanieu/hashbrown/compare/v0.1.2...v0.1.3
[v0.1.2]: https://github.com/Amanieu/hashbrown/compare/v0.1.1...v0.1.2
[v0.1.1]: https://github.com/Amanieu/hashbrown/compare/v0.1.0...v0.1.1

View File

@ -1,55 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "hashbrown"
version = "0.1.8"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "A Rust port of Google's SwissTable hash map"
readme = "README.md"
keywords = ["hash", "no_std", "hashmap", "swisstable"]
categories = ["data-structures", "no-std"]
license = "Apache-2.0/MIT"
repository = "https://github.com/Amanieu/hashbrown"
[dependencies.byteorder]
version = "1.0"
default-features = false
[dependencies.rayon]
version = "1.0"
optional = true
[dependencies.scopeguard]
version = "0.3"
default-features = false
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
[dev-dependencies.lazy_static]
version = "~1.2"
[dev-dependencies.rand]
version = "0.5.1"
[dev-dependencies.rayon]
version = "1.0"
[dev-dependencies.rustc-hash]
version = "1.0"
[dev-dependencies.serde_test]
version = "1.0"
[features]
nightly = []

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,25 +0,0 @@
Copyright (c) 2016 Amanieu d'Antras
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,91 +0,0 @@
hashbrown
=========
[![Build Status](https://travis-ci.com/Amanieu/hashbrown.svg?branch=master)](https://travis-ci.com/Amanieu/hashbrown) [![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown)
This crate is a Rust port of Google's high-performance [SwissTable] hash
map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
and `HashSet` types.
The original C++ version of SwissTable can be found [here], and this
[CppCon talk] gives an overview of how the algorithm works.
[SwissTable]: https://abseil.io/blog/20180927-swisstables
[here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
[CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
## [Documentation](https://docs.rs/hashbrown)
## [Change log](CHANGELOG.md)
## Features
- Drop-in replacement for the standard library `HashMap` and `HashSet` types.
- Uses `FxHash` as the default hasher, which is much faster than SipHash.
- Around 2x faster than `FxHashMap` and 8x faster than the standard `HashMap`.
- Lower memory usage: only 1 byte of overhead per entry instead of 8.
- Compatible with `#[no_std]` (currently requires nightly for the `alloc` crate).
- Empty hash maps do not allocate any memory.
- SIMD lookups to scan multiple hash entries in parallel.
## Performance
Compared to `std::collections::HashMap`:
```
name stdhash ns/iter hashbrown ns/iter diff ns/iter diff % speedup
find_existing 23,831 2,935 -20,896 -87.68% x 8.12
find_nonexisting 25,326 2,283 -23,043 -90.99% x 11.09
get_remove_insert 124 25 -99 -79.84% x 4.96
grow_by_insertion 197 177 -20 -10.15% x 1.11
hashmap_as_queue 72 18 -54 -75.00% x 4.00
new_drop 14 0 -14 -100.00% x inf
new_insert_drop 78 55 -23 -29.49% x 1.42
```
Compared to `rustc_hash::FxHashMap` (standard `HashMap` using `FxHash` instead of `SipHash`):
```
name fxhash ns/iter hashbrown ns/iter diff ns/iter diff % speedup
find_existing 5,951 2,935 -3,016 -50.68% x 2.03
find_nonexisting 4,637 2,283 -2,354 -50.77% x 2.03
get_remove_insert 29 25 -4 -13.79% x 1.16
grow_by_insertion 160 177 17 10.62% x 0.90
hashmap_as_queue 22 18 -4 -18.18% x 1.22
new_drop 9 0 -9 -100.00% x inf
new_insert_drop 64 55 -9 -14.06% x 1.16
```
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
hashbrown = "0.1"
```
and this to your crate root:
```rust
extern crate hashbrown;
```
This crate has the following Cargo features:
- `nightly`: Enables nightly-only features: `no_std` support, `#[may_dangle]` and ~10% speedup from branch hint intrinsics.
## License
Licensed under either of:
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.

View File

@ -1,115 +0,0 @@
#![feature(test)]
extern crate hashbrown;
extern crate rustc_hash;
extern crate test;
use std::hash::Hash;
use test::Bencher;
use hashbrown::HashMap;
//use rustc_hash::FxHashMap as HashMap;
//use std::collections::HashMap;
fn new_map<K: Eq + Hash, V>() -> HashMap<K, V> {
HashMap::default()
}
#[bench]
fn new_drop(b: &mut Bencher) {
b.iter(|| {
let m: HashMap<i32, i32> = new_map();
assert_eq!(m.len(), 0);
})
}
#[bench]
fn new_insert_drop(b: &mut Bencher) {
b.iter(|| {
let mut m = new_map();
m.insert(0, 0);
assert_eq!(m.len(), 1);
})
}
#[bench]
fn grow_by_insertion(b: &mut Bencher) {
let mut m = new_map();
for i in 1..1001 {
m.insert(i, i);
}
let mut k = 1001;
b.iter(|| {
m.insert(k, k);
k += 1;
});
}
#[bench]
fn find_existing(b: &mut Bencher) {
let mut m = new_map();
for i in 1..1001 {
m.insert(i, i);
}
b.iter(|| {
for i in 1..1001 {
m.contains_key(&i);
}
});
}
#[bench]
fn find_nonexisting(b: &mut Bencher) {
let mut m = new_map();
for i in 1..1001 {
m.insert(i, i);
}
b.iter(|| {
for i in 1001..2001 {
m.contains_key(&i);
}
});
}
#[bench]
fn hashmap_as_queue(b: &mut Bencher) {
let mut m = new_map();
for i in 1..1001 {
m.insert(i, i);
}
let mut k = 1;
b.iter(|| {
m.remove(&k);
m.insert(k + 1000, k + 1000);
k += 1;
});
}
#[bench]
fn get_remove_insert(b: &mut Bencher) {
let mut m = new_map();
for i in 1..1001 {
m.insert(i, i);
}
let mut k = 1;
b.iter(|| {
m.get(&(k + 400));
m.get(&(k + 2000));
m.remove(&k);
m.insert(k + 1000, k + 1000);
k += 1;
})
}

View File

@ -1,3 +0,0 @@
status = [
"continuous-integration/travis-ci/push",
]

View File

@ -1,4 +0,0 @@
#[cfg(feature = "rayon")]
pub(crate) mod rayon;
#[cfg(feature = "serde")]
mod serde;

View File

@ -1,26 +0,0 @@
use alloc::collections::LinkedList;
use alloc::vec::Vec;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Helper for collecting parallel iterators to an intermediary
pub(super) fn collect<I: IntoParallelIterator>(iter: I) -> (LinkedList<Vec<I::Item>>, usize) {
let list = iter
.into_par_iter()
.fold(Vec::new, |mut vec, elem| {
vec.push(elem);
vec
})
.map(|vec| {
let mut list = LinkedList::new();
list.push_back(vec);
list
})
.reduce(LinkedList::new, |mut list1, mut list2| {
list1.append(&mut list2);
list1
});
let len = list.iter().map(Vec::len).sum();
(list, len)
}

View File

@ -1,680 +0,0 @@
//! Rayon extensions for `HashMap`.
use core::fmt;
use core::hash::{BuildHasher, Hash};
use hash_map::HashMap;
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over shared references to entries in a map.
///
/// This iterator is created by the [`par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, K: 'a, V: 'a, S: 'a> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParIter<'a, K, V, S> {
type Item = (&'a K, &'a V);
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe {
let r = x.as_ref();
(&r.0, &r.1)
})
.drive_unindexed(consumer)
}
}
impl<'a, K, V, S> Clone for ParIter<'a, K, V, S> {
#[inline]
fn clone(&self) -> Self {
ParIter { map: self.map }
}
}
impl<'a, K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug
for ParIter<'a, K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.iter().fmt(f)
}
}
/// Parallel iterator over shared references to keys in a map.
///
/// This iterator is created by the [`par_keys`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParKeys<'a, K: 'a, V: 'a, S: 'a> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParKeys<'a, K, V, S> {
type Item = &'a K;
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe { &x.as_ref().0 })
.drive_unindexed(consumer)
}
}
impl<'a, K, V, S> Clone for ParKeys<'a, K, V, S> {
#[inline]
fn clone(&self) -> Self {
ParKeys { map: self.map }
}
}
impl<'a, K: fmt::Debug + Eq + Hash, V, S: BuildHasher> fmt::Debug for ParKeys<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.keys().fmt(f)
}
}
/// Parallel iterator over shared references to values in a map.
///
/// This iterator is created by the [`par_values`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValues<'a, K: 'a, V: 'a, S: 'a> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParValues<'a, K, V, S> {
type Item = &'a V;
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe { &x.as_ref().1 })
.drive_unindexed(consumer)
}
}
impl<'a, K, V, S> Clone for ParValues<'a, K, V, S> {
#[inline]
fn clone(&self) -> Self {
ParValues { map: self.map }
}
}
impl<'a, K: Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug for ParValues<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.values().fmt(f)
}
}
/// Parallel iterator over mutable references to entries in a map.
///
/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefMutIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html
pub struct ParIterMut<'a, K: 'a, V: 'a, S: 'a> {
map: &'a mut HashMap<K, V, S>,
}
impl<'a, K: Send + Sync, V: Send, S: Send> ParallelIterator for ParIterMut<'a, K, V, S> {
type Item = (&'a K, &'a mut V);
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe {
let r = x.as_mut();
(&r.0, &mut r.1)
})
.drive_unindexed(consumer)
}
}
impl<'a, K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug
for ParIterMut<'a, K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.iter().fmt(f)
}
}
/// Parallel iterator over mutable references to values in a map.
///
/// This iterator is created by the [`par_values_mut`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValuesMut<'a, K: 'a, V: 'a, S: 'a> {
map: &'a mut HashMap<K, V, S>,
}
impl<'a, K: Send, V: Send, S: Send> ParallelIterator for ParValuesMut<'a, K, V, S> {
type Item = &'a mut V;
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe { &mut x.as_mut().1 })
.drive_unindexed(consumer)
}
}
impl<'a, K: Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug for ParValuesMut<'a, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.values().fmt(f)
}
}
/// Parallel iterator over entries of a consumed map.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<K, V, S> {
map: HashMap<K, V, S>,
}
impl<K: Send, V: Send, S: Send> ParallelIterator for IntoParIter<K, V, S> {
type Item = (K, V);
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map.table.into_par_iter().drive_unindexed(consumer)
}
}
impl<'a, K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug
for IntoParIter<K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.iter().fmt(f)
}
}
/// Parallel draining iterator over entries of a map.
///
/// This iterator is created by the [`par_drain`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParDrain<'a, K: 'a, V: 'a, S: 'a> {
map: &'a mut HashMap<K, V, S>,
}
impl<'a, K: Send, V: Send, S: Send> ParallelIterator for ParDrain<'a, K, V, S> {
type Item = (K, V);
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map.table.par_drain().drive_unindexed(consumer)
}
}
impl<'a, K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug
for ParDrain<'a, K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.map.iter().fmt(f)
}
}
impl<K: Sync, V: Sync, S: Sync> HashMap<K, V, S> {
/// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order.
#[inline]
pub fn par_keys(&self) -> ParKeys<K, V, S> {
ParKeys { map: self }
}
/// Visits (potentially in parallel) immutably borrowed values in an arbitrary order.
#[inline]
pub fn par_values(&self) -> ParValues<K, V, S> {
ParValues { map: self }
}
}
impl<K: Send, V: Send, S: Send> HashMap<K, V, S> {
/// Visits (potentially in parallel) mutably borrowed values in an arbitrary order.
#[inline]
pub fn par_values_mut(&mut self) -> ParValuesMut<K, V, S> {
ParValuesMut { map: self }
}
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the map's allocated memory for reuse.
#[inline]
pub fn par_drain(&mut self) -> ParDrain<K, V, S> {
ParDrain { map: self }
}
}
impl<K, V, S> HashMap<K, V, S>
where
K: Eq + Hash + Sync,
V: PartialEq + Sync,
S: BuildHasher + Sync,
{
/// Returns `true` if the map is equal to another,
/// i.e. both maps contain the same keys mapped to the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len()
&& self
.into_par_iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K: Send, V: Send, S: Send> IntoParallelIterator for HashMap<K, V, S> {
type Item = (K, V);
type Iter = IntoParIter<K, V, S>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
IntoParIter { map: self }
}
}
impl<'a, K: Sync, V: Sync, S: Sync> IntoParallelIterator for &'a HashMap<K, V, S> {
type Item = (&'a K, &'a V);
type Iter = ParIter<'a, K, V, S>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
ParIter { map: self }
}
}
impl<'a, K: Send + Sync, V: Send, S: Send> IntoParallelIterator for &'a mut HashMap<K, V, S> {
type Item = (&'a K, &'a mut V);
type Iter = ParIterMut<'a, K, V, S>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
ParIterMut { map: self }
}
}
/// Collect (key, value) pairs from a parallel iterator into a
/// hashmap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = (K, V)>,
{
let mut map = HashMap::default();
map.par_extend(par_iter);
map
}
}
/// Extend a hash map with items from a parallel iterator.
impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
extend(self, par_iter);
}
}
/// Extend a hash map with copied items from a parallel iterator.
impl<'a, K, V, S> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S>
where
K: Copy + Eq + Hash + Sync,
V: Copy + Sync,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashMap` -- no custom advantage.
fn extend<K, V, S, I>(map: &mut HashMap<K, V, S>, par_iter: I)
where
K: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
HashMap<K, V, S>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Keys may be already present or show multiple times in the iterator.
// Reserve the entire length if the map is empty.
// Otherwise reserve half the length (rounded up), so the map
// will only resize twice in the worst case.
let reserve = if map.is_empty() { len } else { (len + 1) / 2 };
map.reserve(reserve);
for vec in list {
map.extend(vec);
}
}
#[cfg(test)]
mod test_par_map {
use alloc::vec::Vec;
use core::hash::{Hash, Hasher};
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use hash_map::HashMap;
struct Dropable<'a> {
k: usize,
counter: &'a AtomicUsize,
}
impl<'a> Dropable<'a> {
fn new(k: usize, counter: &AtomicUsize) -> Dropable {
counter.fetch_add(1, Ordering::Relaxed);
Dropable { k, counter }
}
}
impl<'a> Drop for Dropable<'a> {
fn drop(&mut self) {
self.counter.fetch_sub(1, Ordering::Relaxed);
}
}
impl<'a> Clone for Dropable<'a> {
fn clone(&self) -> Dropable<'a> {
Dropable::new(self.k, self.counter)
}
}
impl<'a> Hash for Dropable<'a> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.k.hash(state)
}
}
impl<'a> PartialEq for Dropable<'a> {
fn eq(&self, other: &Self) -> bool {
self.k == other.k
}
}
impl<'a> Eq for Dropable<'a> {}
#[test]
fn test_into_iter_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the iterator does not leak anything.
drop(hm.clone().into_par_iter());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm
.into_par_iter()
.filter(|&(ref key, _)| key.k < 50)
.collect();
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_drain_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let mut hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the drain iterator does not leak anything.
drop(hm.clone().par_drain());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect();
assert!(hm.is_empty());
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_empty_iter() {
let mut m: HashMap<isize, bool> = HashMap::new();
assert_eq!(m.par_drain().count(), 0);
assert_eq!(m.par_keys().count(), 0);
assert_eq!(m.par_values().count(), 0);
assert_eq!(m.par_values_mut().count(), 0);
assert_eq!(m.par_iter().count(), 0);
assert_eq!(m.par_iter_mut().count(), 0);
assert_eq!(m.len(), 0);
assert!(m.is_empty());
assert_eq!(m.into_par_iter().count(), 0);
}
#[test]
fn test_iterate() {
let mut m = HashMap::with_capacity(4);
for i in 0..32 {
assert!(m.insert(i, i * 2).is_none());
}
assert_eq!(m.len(), 32);
let observed = AtomicUsize::new(0);
m.par_iter().for_each(|(k, v)| {
assert_eq!(*v, *k * 2);
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let keys: Vec<_> = map.par_keys().cloned().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
#[test]
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_par_iter().collect();
map.par_values_mut().for_each(|value| *value = (*value) * 2);
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_eq() {
let mut m1 = HashMap::new();
m1.insert(1, 2);
m1.insert(2, 3);
m1.insert(3, 4);
let mut m2 = HashMap::new();
m2.insert(1, 2);
m2.insert(2, 3);
assert!(!m1.par_eq(&m2));
m2.insert(3, 4);
assert!(m1.par_eq(&m2));
}
#[test]
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.par_iter().cloned().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
}
}
#[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
let mut b = HashMap::new();
b.insert(2, "two");
b.insert(3, "three");
a.par_extend(&b);
assert_eq!(a.len(), 3);
assert_eq!(a[&1], "one");
assert_eq!(a[&2], "two");
assert_eq!(a[&3], "three");
}
}

View File

@ -1,5 +0,0 @@
mod helpers;
mod raw;
pub(crate) mod map;
pub(crate) mod set;

View File

@ -1,201 +0,0 @@
use alloc::alloc::dealloc;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use raw::Bucket;
use raw::{RawIterRange, RawTable};
use rayon::iter::{
plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer},
ParallelIterator,
};
use scopeguard::guard;
/// Parallel iterator which returns a raw pointer to every full bucket in the table.
pub struct RawParIter<T> {
iter: RawIterRange<T>,
}
unsafe impl<T> Send for RawParIter<T> {}
impl<T> ParallelIterator for RawParIter<T> {
type Item = Bucket<T>;
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = ParIterProducer { iter: self.iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Producer which returns a `Bucket<T>` for every element.
struct ParIterProducer<T> {
iter: RawIterRange<T>,
}
unsafe impl<T> Send for ParIterProducer<T> {}
impl<T> UnindexedProducer for ParIterProducer<T> {
type Item = Bucket<T>;
#[inline]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.split();
let left = ParIterProducer { iter: left };
let right = right.map(|right| ParIterProducer { iter: right });
(left, right)
}
#[inline]
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.iter)
}
}
/// Parallel iterator which consumes a table and returns elements.
pub struct RawIntoParIter<T> {
table: RawTable<T>,
}
unsafe impl<T> Send for RawIntoParIter<T> {}
impl<T: Send> ParallelIterator for RawIntoParIter<T> {
type Item = T;
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let iter = unsafe { self.table.iter().iter };
let _guard = guard(self.table.into_alloc(), |alloc| {
if let Some((ptr, layout)) = *alloc {
unsafe {
dealloc(ptr.as_ptr(), layout);
}
}
});
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Parallel iterator which consumes elements without freeing the table storage.
pub struct RawParDrain<'a, T> {
// We don't use a &'a RawTable<T> because we want RawParDrain to be
// covariant over 'a.
table: NonNull<RawTable<T>>,
_marker: PhantomData<&'a RawTable<T>>,
}
unsafe impl<'a, T> Send for RawParDrain<'a, T> {}
impl<'a, T: Send> ParallelIterator for RawParDrain<'a, T> {
type Item = T;
#[inline]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let _guard = guard(self.table, |table| unsafe {
table.as_mut().clear_no_drop()
});
let iter = unsafe { self.table.as_ref().iter().iter };
mem::forget(self);
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
impl<'a, T> Drop for RawParDrain<'a, T> {
fn drop(&mut self) {
// If drive_unindexed is not called then simply clear the table.
unsafe { self.table.as_mut().clear() }
}
}
/// Producer which will consume all elements in the range, even if it is dropped
/// halfway through.
struct ParDrainProducer<T> {
iter: RawIterRange<T>,
}
unsafe impl<T: Send> Send for ParDrainProducer<T> {}
impl<T: Send> UnindexedProducer for ParDrainProducer<T> {
type Item = T;
#[inline]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.clone().split();
mem::forget(self);
let left = ParDrainProducer { iter: left };
let right = right.map(|right| ParDrainProducer { iter: right });
(left, right)
}
#[inline]
fn fold_with<F>(mut self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// Make sure to modify the iterator in-place so that any remaining
// elements are processed in our Drop impl.
while let Some(item) = self.iter.next() {
folder = folder.consume(unsafe { item.read() });
if folder.full() {
return folder;
}
}
// If we processed all elements then we don't need to run the drop.
mem::forget(self);
folder
}
}
impl<T> Drop for ParDrainProducer<T> {
#[inline]
fn drop(&mut self) {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
unsafe {
item.drop();
}
}
}
}
}
impl<T> RawTable<T> {
/// Returns a parallel iterator over the elements in a `RawTable`.
#[inline]
pub fn par_iter(&self) -> RawParIter<T> {
RawParIter {
iter: unsafe { self.iter().iter },
}
}
/// Returns a parallel iterator over the elements in a `RawTable`.
#[inline]
pub fn into_par_iter(self) -> RawIntoParIter<T> {
RawIntoParIter { table: self }
}
/// Returns a parallel iterator which consumes all elements of a `RawTable`
/// without freeing its memory allocation.
#[inline]
pub fn par_drain(&mut self) -> RawParDrain<T> {
RawParDrain {
table: NonNull::from(self),
_marker: PhantomData,
}
}
}

View File

@ -1,642 +0,0 @@
//! Rayon extensions for `HashSet`.
use core::hash::{BuildHasher, Hash};
use hash_set::HashSet;
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over elements of a consumed set.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<T, S> {
set: HashSet<T, S>,
}
impl<T: Send, S: Send> ParallelIterator for IntoParIter<T, S> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.set
.map
.into_par_iter()
.map(|(k, _)| k)
.drive_unindexed(consumer)
}
}
/// Parallel draining iterator over entries of a set.
///
/// This iterator is created by the [`par_drain`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDrain<'a, T, S> {
set: &'a mut HashSet<T, S>,
}
impl<'a, T: Send, S: Send> ParallelIterator for ParDrain<'a, T, S> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.set
.map
.par_drain()
.map(|(k, _)| k)
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in a set.
///
/// This iterator is created by the [`par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, T: 'a, S: 'a> {
set: &'a HashSet<T, S>,
}
impl<'a, T: Sync, S: Sync> ParallelIterator for ParIter<'a, T, S> {
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.set.map.par_keys().drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the difference of
/// sets.
///
/// This iterator is created by the [`par_difference`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDifference<'a, T: 'a, S: 'a> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParDifference<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| !self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the symmetric
/// difference of sets.
///
/// This iterator is created by the [`par_symmetric_difference`] method on
/// [`HashSet`].
/// See its documentation for more.
///
/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParSymmetricDifference<'a, T: 'a, S: 'a> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParSymmetricDifference<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.par_difference(self.b)
.chain(self.b.par_difference(self.a))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the intersection of
/// sets.
///
/// This iterator is created by the [`par_intersection`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParIntersection<'a, T: 'a, S: 'a> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParIntersection<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the union of sets.
///
/// This iterator is created by the [`par_union`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParUnion<'a, T: 'a, S: 'a> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParUnion<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.chain(self.b.par_difference(self.a))
.drive_unindexed(consumer)
}
}
impl<T, S> HashSet<T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
/// Visits (potentially in parallel) the values representing the difference,
/// i.e. the values that are in `self` but not in `other`.
#[inline]
pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S> {
ParDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the symmetric
/// difference, i.e. the values that are in `self` or in `other` but not in both.
#[inline]
pub fn par_symmetric_difference<'a>(
&'a self,
other: &'a Self,
) -> ParSymmetricDifference<'a, T, S> {
ParSymmetricDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the
/// intersection, i.e. the values that are both in `self` and `other`.
#[inline]
pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S> {
ParIntersection { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates.
#[inline]
pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S> {
ParUnion { a: self, b: other }
}
/// Returns `true` if `self` has no elements in common with `other`.
/// This is equivalent to checking for an empty intersection.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_disjoint(&self, other: &Self) -> bool {
self.into_par_iter().all(|x| !other.contains(x))
}
/// Returns `true` if the set is a subset of another,
/// i.e. `other` contains at least all the values in `self`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_subset(&self, other: &Self) -> bool {
self.into_par_iter().all(|x| other.contains(x))
}
/// Returns `true` if the set is a superset of another,
/// i.e. `self` contains at least all the values in `other`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_superset(&self, other: &Self) -> bool {
other.par_is_subset(self)
}
/// Returns `true` if the set is equal to another,
/// i.e. both sets contain the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.par_is_subset(other)
}
}
impl<T, S> HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher + Send,
{
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the set's allocated memory for reuse.
#[inline]
pub fn par_drain(&mut self) -> ParDrain<T, S> {
ParDrain { set: self }
}
}
impl<T: Send, S: Send> IntoParallelIterator for HashSet<T, S> {
type Item = T;
type Iter = IntoParIter<T, S>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
IntoParIter { set: self }
}
}
impl<'a, T: Sync, S: Sync> IntoParallelIterator for &'a HashSet<T, S> {
type Item = &'a T;
type Iter = ParIter<'a, T, S>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
ParIter { set: self }
}
}
/// Collect values from a parallel iterator into a hashset.
impl<T, S> FromParallelIterator<T> for HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = T>,
{
let mut set = HashSet::default();
set.par_extend(par_iter);
set
}
}
/// Extend a hash set with items from a parallel iterator.
impl<T, S> ParallelExtend<T> for HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend(self, par_iter);
}
}
/// Extend a hash set with copied items from a parallel iterator.
impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S>
where
T: 'a + Copy + Eq + Hash + Sync,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashSet` -- no custom advantage.
fn extend<T, S, I>(set: &mut HashSet<T, S>, par_iter: I)
where
T: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
HashSet<T, S>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Values may be already present or show multiple times in the iterator.
// Reserve the entire length if the set is empty.
// Otherwise reserve half the length (rounded up), so the set
// will only resize twice in the worst case.
let reserve = if set.is_empty() { len } else { (len + 1) / 2 };
set.reserve(reserve);
for vec in list {
set.extend(vec);
}
}
#[cfg(test)]
mod test_par_set {
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use hash_set::HashSet;
#[test]
fn test_disjoint() {
let mut xs = HashSet::new();
let mut ys = HashSet::new();
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(5));
assert!(ys.insert(11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(7));
assert!(xs.insert(19));
assert!(xs.insert(4));
assert!(ys.insert(2));
assert!(ys.insert(-11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(ys.insert(7));
assert!(!xs.par_is_disjoint(&ys));
assert!(!ys.par_is_disjoint(&xs));
}
#[test]
fn test_subset_and_superset() {
let mut a = HashSet::new();
assert!(a.insert(0));
assert!(a.insert(5));
assert!(a.insert(11));
assert!(a.insert(7));
let mut b = HashSet::new();
assert!(b.insert(0));
assert!(b.insert(7));
assert!(b.insert(19));
assert!(b.insert(250));
assert!(b.insert(11));
assert!(b.insert(200));
assert!(!a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(!b.par_is_superset(&a));
assert!(b.insert(5));
assert!(a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(b.par_is_superset(&a));
}
#[test]
fn test_iterate() {
let mut a = HashSet::new();
for i in 0..32 {
assert!(a.insert(i));
}
let observed = AtomicUsize::new(0);
a.par_iter().for_each(|k| {
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_intersection() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(11));
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(77));
assert!(a.insert(103));
assert!(a.insert(5));
assert!(a.insert(-5));
assert!(b.insert(2));
assert!(b.insert(11));
assert!(b.insert(77));
assert!(b.insert(-9));
assert!(b.insert(-42));
assert!(b.insert(5));
assert!(b.insert(3));
let expected = [3, 5, 11, 77];
let i = a
.par_intersection(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(3));
assert!(b.insert(9));
let expected = [1, 5, 11];
let i = a
.par_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_symmetric_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(-2));
assert!(b.insert(3));
assert!(b.insert(9));
assert!(b.insert(14));
assert!(b.insert(22));
let expected = [-2, 1, 5, 11, 14, 22];
let i = a
.par_symmetric_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_union() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(a.insert(16));
assert!(a.insert(19));
assert!(a.insert(24));
assert!(b.insert(-2));
assert!(b.insert(1));
assert!(b.insert(5));
assert!(b.insert(9));
assert!(b.insert(13));
assert!(b.insert(19));
let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
let i = a
.par_union(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
let set: HashSet<_> = xs.par_iter().cloned().collect();
for x in &xs {
assert!(set.contains(x));
}
}
#[test]
fn test_move_iter() {
let hs = {
let mut hs = HashSet::new();
hs.insert('a');
hs.insert('b');
hs
};
let v = hs.into_par_iter().collect::<Vec<char>>();
assert!(v == ['a', 'b'] || v == ['b', 'a']);
}
#[test]
fn test_eq() {
// These constants once happened to expose a bug in insert().
// I'm keeping them around to prevent a regression.
let mut s1 = HashSet::new();
s1.insert(1);
s1.insert(2);
s1.insert(3);
let mut s2 = HashSet::new();
s2.insert(1);
s2.insert(2);
assert!(!s1.par_eq(&s2));
s2.insert(3);
assert!(s1.par_eq(&s2));
}
#[test]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
a.par_extend(&[2, 3, 4][..]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = HashSet::new();
b.insert(5);
b.insert(6);
a.par_extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
}

View File

@ -1,200 +0,0 @@
mod size_hint {
use core::cmp;
/// This presumably exists to prevent denial of service attacks.
///
/// Original discussion: https://github.com/serde-rs/serde/issues/1114.
#[inline]
pub(super) fn cautious(hint: Option<usize>) -> usize {
cmp::min(hint.unwrap_or(0), 4096)
}
}
mod map {
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use hash_map::HashMap;
use super::size_hint;
impl<K, V, H> Serialize for HashMap<K, V, H>
where
K: Serialize + Eq + Hash,
V: Serialize,
H: BuildHasher,
{
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_map(self)
}
}
impl<'de, K, V, S> Deserialize<'de> for HashMap<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct MapVisitor<K, V, S> {
marker: PhantomData<HashMap<K, V, S>>,
}
impl<'de, K, V, S> Visitor<'de> for MapVisitor<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
{
type Value = HashMap<K, V, S>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a map")
}
#[inline]
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = HashMap::with_capacity_and_hasher(
size_hint::cautious(map.size_hint()),
S::default(),
);
while let Some((key, value)) = map.next_entry()? {
values.insert(key, value);
}
Ok(values)
}
}
let visitor = MapVisitor {
marker: PhantomData,
};
deserializer.deserialize_map(visitor)
}
}
}
mod set {
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use hash_set::HashSet;
use super::size_hint;
impl<T, H> Serialize for HashSet<T, H>
where
T: Serialize + Eq + Hash,
H: BuildHasher,
{
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(self)
}
}
impl<'de, T, S> Deserialize<'de> for HashSet<T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct SeqVisitor<T, S> {
marker: PhantomData<HashSet<T, S>>,
}
impl<'de, T, S> Visitor<'de> for SeqVisitor<T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
type Value = HashSet<T, S>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a sequence")
}
#[inline]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = HashSet::with_capacity_and_hasher(
size_hint::cautious(seq.size_hint()),
S::default(),
);
while let Some(value) = seq.next_element()? {
values.insert(value);
}
Ok(values)
}
}
let visitor = SeqVisitor {
marker: PhantomData,
};
deserializer.deserialize_seq(visitor)
}
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
struct SeqInPlaceVisitor<'a, T: 'a, S: 'a>(&'a mut HashSet<T, S>);
impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a sequence")
}
#[inline]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
self.0.clear();
self.0.reserve(size_hint::cautious(seq.size_hint()));
while let Some(value) = seq.next_element()? {
self.0.insert(value);
}
Ok(())
}
}
deserializer.deserialize_seq(SeqInPlaceVisitor(place))
}
}
}

View File

@ -1,112 +0,0 @@
//! Fast, non-cryptographic hash used by rustc and Firefox.
use core::default::Default;
use core::hash::{BuildHasherDefault, Hasher};
use core::mem::size_of;
use core::ops::BitXor;
use byteorder::{ByteOrder, NativeEndian};
/// Type alias for a `HashBuilder` using the `fx` hash algorithm.
pub type FxHashBuilder = BuildHasherDefault<FxHasher>;
/// A speedy hash algorithm for use within rustc. The hashmap in liballoc
/// by default uses SipHash which isn't quite as speedy as we want. In the
/// compiler we're not really worried about DOS attempts, so we use a fast
/// non-cryptographic hash.
///
/// This is the same as the algorithm used by Firefox -- which is a homespun
/// one not based on any widely-known algorithm -- though modified to produce
/// 64-bit hash values instead of 32-bit hash values. It consistently
/// out-performs an FNV-based hash within rustc itself -- the collision rate is
/// similar or slightly worse than FNV, but the speed of the hash function
/// itself is much higher because it works on up to 8 bytes at a time.
pub struct FxHasher {
hash: usize,
}
#[cfg(target_pointer_width = "32")]
const K: usize = 0x9e3779b9;
#[cfg(target_pointer_width = "64")]
const K: usize = 0x517cc1b727220a95;
impl Default for FxHasher {
#[inline]
fn default() -> FxHasher {
FxHasher { hash: 0 }
}
}
impl FxHasher {
#[inline]
fn add_to_hash(&mut self, i: usize) {
self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K);
}
}
impl Hasher for FxHasher {
#[inline]
fn write(&mut self, mut bytes: &[u8]) {
#[cfg(target_pointer_width = "32")]
let read_usize = |bytes| NativeEndian::read_u32(bytes);
#[cfg(target_pointer_width = "64")]
let read_usize = |bytes| NativeEndian::read_u64(bytes);
let mut hash = FxHasher { hash: self.hash };
assert!(size_of::<usize>() <= 8);
while bytes.len() >= size_of::<usize>() {
hash.add_to_hash(read_usize(bytes) as usize);
bytes = &bytes[size_of::<usize>()..];
}
if (size_of::<usize>() > 4) && (bytes.len() >= 4) {
hash.add_to_hash(NativeEndian::read_u32(bytes) as usize);
bytes = &bytes[4..];
}
if (size_of::<usize>() > 2) && bytes.len() >= 2 {
hash.add_to_hash(NativeEndian::read_u16(bytes) as usize);
bytes = &bytes[2..];
}
if (size_of::<usize>() > 1) && bytes.len() >= 1 {
hash.add_to_hash(bytes[0] as usize);
}
self.hash = hash.hash;
}
#[inline]
fn write_u8(&mut self, i: u8) {
self.add_to_hash(i as usize);
}
#[inline]
fn write_u16(&mut self, i: u16) {
self.add_to_hash(i as usize);
}
#[inline]
fn write_u32(&mut self, i: u32) {
self.add_to_hash(i as usize);
}
#[cfg(target_pointer_width = "32")]
#[inline]
fn write_u64(&mut self, i: u64) {
self.add_to_hash(i as usize);
self.add_to_hash((i >> 32) as usize);
}
#[cfg(target_pointer_width = "64")]
#[inline]
fn write_u64(&mut self, i: u64) {
self.add_to_hash(i as usize);
}
#[inline]
fn write_usize(&mut self, i: usize) {
self.add_to_hash(i);
}
#[inline]
fn finish(&self) -> u64 {
self.hash as u64
}
}

View File

@ -1,92 +0,0 @@
//! This crate is a Rust port of Google's high-performance [SwissTable] hash
//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
//! and `HashSet` types.
//!
//! The original C++ version of SwissTable can be found [here], and this
//! [CppCon talk] gives an overview of how the algorithm works.
//!
//! [SwissTable]: https://abseil.io/blog/20180927-swisstables
//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
#![no_std]
#![cfg_attr(
feature = "nightly",
feature(
alloc,
alloc_layout_extra,
allocator_api,
ptr_offset_from,
test,
core_intrinsics,
dropck_eyepatch
)
)]
#![warn(missing_docs)]
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate rand;
#[cfg(feature = "nightly")]
#[cfg_attr(test, macro_use)]
extern crate alloc;
extern crate byteorder;
#[cfg(feature = "rayon")]
extern crate rayon;
extern crate scopeguard;
#[cfg(feature = "serde")]
extern crate serde;
#[cfg(not(feature = "nightly"))]
#[cfg_attr(test, macro_use)]
extern crate std as alloc;
mod external_trait_impls;
mod fx;
mod map;
mod raw;
mod set;
pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
pub use map::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash maps.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use external_trait_impls::rayon::map::*;
}
}
pub mod hash_set {
//! A hash set implemented as a `HashMap` where the value is `()`.
pub use set::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash sets.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use external_trait_impls::rayon::set::*;
}
}
pub use map::HashMap;
pub use set::HashSet;
/// Augments `AllocErr` with a CapacityOverflow variant.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum CollectionAllocErr {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// Error due to the allocator (see the `AllocErr` type's docs).
AllocErr,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,106 +0,0 @@
use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE};
#[cfg(feature = "nightly")]
use core::intrinsics;
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
///
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
/// For implementation reasons, the bits in the set may be sparsely packed, so
/// that there is only one bit-per-byte used (the high bit, 7). If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
#[derive(Copy, Clone)]
pub struct BitMask(pub BitMaskWord);
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
pub fn invert(self) -> BitMask {
BitMask(self.0 ^ BITMASK_MASK)
}
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
pub fn remove_lowest_bit(self) -> BitMask {
BitMask(self.0 & (self.0 - 1))
}
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
pub fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
pub fn lowest_set_bit(self) -> Option<usize> {
if self.0 == 0 {
None
} else {
Some(unsafe { self.lowest_set_bit_nonzero() })
}
}
/// Returns the first set bit in the `BitMask`, if there is one. The
/// bitmask must not be empty.
#[inline]
#[cfg(feature = "nightly")]
pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE
}
#[cfg(not(feature = "nightly"))]
pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
self.trailing_zeros()
}
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
pub fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
// instead. Since we only have 1 bit set in each byte on ARM, we can
// use swap_bytes (REV) + leading_zeroes instead.
if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE
} else {
self.0.trailing_zeros() as usize / BITMASK_STRIDE
}
}
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
pub fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
impl IntoIterator for BitMask {
type Item = usize;
type IntoIter = BitMaskIter;
#[inline]
fn into_iter(self) -> BitMaskIter {
BitMaskIter(self)
}
}
/// Iterator over the contents of a `BitMask`, returning the indicies of set
/// bits.
pub struct BitMaskIter(BitMask);
impl Iterator for BitMaskIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
let bit = self.0.lowest_set_bit()?;
self.0 = self.0.remove_lowest_bit();
Some(bit)
}
}

View File

@ -1,141 +0,0 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
#[cfg(any(
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
))]
type GroupWord = u64;
#[cfg(all(
target_pointer_width = "32",
not(target_arch = "aarch64"),
not(target_arch = "x86_64"),
))]
type GroupWord = u32;
pub type BitMaskWord = GroupWord;
pub const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each byte for the mask.
pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080u64 as GroupWord;
/// Helper function to replicate a byte across a `GroupWord`.
#[inline]
fn repeat(byte: u8) -> GroupWord {
let repeat = byte as GroupWord;
let repeat = repeat | repeat.wrapping_shl(8);
let repeat = repeat | repeat.wrapping_shl(16);
// This last line is a no-op with a 32-bit GroupWord
repeat | repeat.wrapping_shl(32)
}
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
pub struct Group(GroupWord);
// We perform all operations in the native endianess, and convert to
// little-endian just before creating a BitMask. The can potentially
// enable the compiler to eliminate unnecessary byte swaps if we are
// only checking whether a BitMask is empty.
impl Group {
/// Number of bytes in the group.
pub const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
pub fn static_empty() -> &'static [u8] {
union AlignedBytes {
_align: Group,
bytes: [u8; Group::WIDTH],
};
const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
bytes: [EMPTY; Group::WIDTH],
};
unsafe { &ALIGNED_BYTES.bytes }
}
/// Loads a group of bytes starting at the given address.
#[inline]
pub unsafe fn load(ptr: *const u8) -> Group {
Group(ptr::read_unaligned(ptr as *const _))
}
/// Loads a group of bytes starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
pub unsafe fn load_aligned(ptr: *const u8) -> Group {
debug_assert_eq!(ptr as usize & (mem::align_of::<Group>() - 1), 0);
Group(ptr::read(ptr as *const _))
}
/// Stores the group of bytes to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
pub unsafe fn store_aligned(&self, ptr: *mut u8) {
debug_assert_eq!(ptr as usize & (mem::align_of::<Group>() - 1), 0);
ptr::write(ptr as *mut _, self.0);
}
/// Returns a `BitMask` indicating all bytes in the group which *may*
/// have the given value.
///
/// This function may return a false positive in certain cases where
/// the byte in the group differs from the searched value only in its
/// lowest bit. This is fine because:
/// - This never happens for `EMPTY` and `DELETED`, only full entries.
/// - The check for key equality will catch these.
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
pub fn match_byte(&self, byte: u8) -> BitMask {
// This algorithm is derived from
// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
pub fn match_empty(&self) -> BitMask {
// If the high bit is set, then the byte must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub fn match_empty_or_deleted(&self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(0x80)).to_le())
}
/// Performs the following transformation on all bytes in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub fn convert_special_to_empty_and_full_to_deleted(&self) -> Group {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let full = 1000_0000 (true) or 0000_0000 (false)
// !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry)
// !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry)
let full = !self.0 & repeat(0x80);
Group(!full + (full >> 7))
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,107 +0,0 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::mem;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
pub type BitMaskWord = u16;
pub const BITMASK_STRIDE: usize = 1;
pub const BITMASK_MASK: BitMaskWord = 0xffff;
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
pub struct Group(x86::__m128i);
impl Group {
/// Number of bytes in the group.
pub const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
pub fn static_empty() -> &'static [u8] {
union AlignedBytes {
_align: Group,
bytes: [u8; Group::WIDTH],
};
const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
bytes: [EMPTY; Group::WIDTH],
};
unsafe { &ALIGNED_BYTES.bytes }
}
/// Loads a group of bytes starting at the given address.
#[inline]
pub unsafe fn load(ptr: *const u8) -> Group {
Group(x86::_mm_loadu_si128(ptr as *const _))
}
/// Loads a group of bytes starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
pub unsafe fn load_aligned(ptr: *const u8) -> Group {
debug_assert_eq!(ptr as usize & (mem::align_of::<Group>() - 1), 0);
Group(x86::_mm_load_si128(ptr as *const _))
}
/// Stores the group of bytes to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
pub unsafe fn store_aligned(&self, ptr: *mut u8) {
debug_assert_eq!(ptr as usize & (mem::align_of::<Group>() - 1), 0);
x86::_mm_store_si128(ptr as *mut _, self.0);
}
/// Returns a `BitMask` indicating all bytes in the group which have
/// the given value.
#[inline]
pub fn match_byte(&self, byte: u8) -> BitMask {
unsafe {
let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8));
BitMask(x86::_mm_movemask_epi8(cmp) as u16)
}
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
pub fn match_empty(&self) -> BitMask {
self.match_byte(EMPTY)
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub fn match_empty_or_deleted(&self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
unsafe { BitMask(x86::_mm_movemask_epi8(self.0) as u16) }
}
/// Performs the following transformation on all bytes in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub fn convert_special_to_empty_and_full_to_deleted(&self) -> Group {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false)
// 1111_1111 | 1000_0000 = 1111_1111
// 0000_0000 | 1000_0000 = 1000_0000
unsafe {
let zero = x86::_mm_setzero_si128();
let special = x86::_mm_cmpgt_epi8(zero, self.0);
Group(x86::_mm_or_si128(special, x86::_mm_set1_epi8(0x80u8 as i8)))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,535 +0,0 @@
#![cfg(feature = "rayon")]
extern crate hashbrown;
#[macro_use]
extern crate lazy_static;
extern crate rayon;
use hashbrown::{HashMap, HashSet};
use rayon::iter::{
IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelExtend,
ParallelIterator,
};
macro_rules! assert_eq3 {
($e1:expr, $e2:expr, $e3:expr) => {{
assert_eq!($e1, $e2);
assert_eq!($e1, $e3);
assert_eq!($e2, $e3);
}};
}
lazy_static! {
static ref MAP_EMPTY: HashMap<char, u32> = HashMap::new();
static ref MAP: HashMap<char, u32> = {
let mut m = HashMap::new();
m.insert('b', 20);
m.insert('a', 10);
m.insert('c', 30);
m.insert('e', 50);
m.insert('f', 60);
m.insert('d', 40);
m
};
}
#[test]
fn map_seq_par_equivalence_iter_empty() {
let vec_seq = MAP_EMPTY.iter().collect::<Vec<_>>();
let vec_par = MAP_EMPTY.par_iter().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn map_seq_par_equivalence_iter() {
let mut vec_seq = MAP.iter().collect::<Vec<_>>();
let mut vec_par = MAP.par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [
(&'a', &10),
(&'b', &20),
(&'c', &30),
(&'d', &40),
(&'e', &50),
(&'f', &60),
];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_keys_empty() {
let vec_seq = MAP_EMPTY.keys().collect::<Vec<&char>>();
let vec_par = MAP_EMPTY.par_keys().collect::<Vec<&char>>();
let expected: [&char; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn map_seq_par_equivalence_keys() {
let mut vec_seq = MAP.keys().collect::<Vec<_>>();
let mut vec_par = MAP.par_keys().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f'];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_values_empty() {
let vec_seq = MAP_EMPTY.values().collect::<Vec<_>>();
let vec_par = MAP_EMPTY.par_values().collect::<Vec<_>>();
let expected: [&u32; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn map_seq_par_equivalence_values() {
let mut vec_seq = MAP.values().collect::<Vec<_>>();
let mut vec_par = MAP.par_values().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&10, &20, &30, &40, &50, &60];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_iter_mut_empty() {
let mut map1 = MAP_EMPTY.clone();
let mut map2 = MAP_EMPTY.clone();
let vec_seq = map1.iter_mut().collect::<Vec<_>>();
let vec_par = map2.par_iter_mut().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn map_seq_par_equivalence_iter_mut() {
let mut map1 = MAP.clone();
let mut map2 = MAP.clone();
let mut vec_seq = map1.iter_mut().collect::<Vec<_>>();
let mut vec_par = map2.par_iter_mut().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [
(&'a', &mut 10),
(&'b', &mut 20),
(&'c', &mut 30),
(&'d', &mut 40),
(&'e', &mut 50),
(&'f', &mut 60),
];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_values_mut_empty() {
let mut map1 = MAP_EMPTY.clone();
let mut map2 = MAP_EMPTY.clone();
let vec_seq = map1.values_mut().collect::<Vec<_>>();
let vec_par = map2.par_values_mut().collect::<Vec<_>>();
let expected: [&u32; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn map_seq_par_equivalence_values_mut() {
let mut map1 = MAP.clone();
let mut map2 = MAP.clone();
let mut vec_seq = map1.values_mut().collect::<Vec<_>>();
let mut vec_par = map2.par_values_mut().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&mut 10, &mut 20, &mut 30, &mut 40, &mut 50, &mut 60];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_into_iter_empty() {
let vec_seq = MAP_EMPTY.clone().into_iter().collect::<Vec<_>>();
let vec_par = MAP_EMPTY.clone().into_par_iter().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn map_seq_par_equivalence_into_iter() {
let mut vec_seq = MAP.clone().into_iter().collect::<Vec<_>>();
let mut vec_par = MAP.clone().into_par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [
('a', 10),
('b', 20),
('c', 30),
('d', 40),
('e', 50),
('f', 60),
];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
lazy_static! {
static ref MAP_VEC_EMPTY: Vec<(char, u32)> = vec![];
static ref MAP_VEC: Vec<(char, u32)> = vec![
('b', 20),
('a', 10),
('c', 30),
('e', 50),
('f', 60),
('d', 40),
];
}
#[test]
fn map_seq_par_equivalence_collect_empty() {
let map_expected = MAP_EMPTY.clone();
let map_seq = MAP_VEC_EMPTY.clone().into_iter().collect::<HashMap<_, _>>();
let map_par = MAP_VEC_EMPTY
.clone()
.into_par_iter()
.collect::<HashMap<_, _>>();
assert_eq!(map_seq, map_par);
assert_eq!(map_seq, map_expected);
assert_eq!(map_par, map_expected);
}
#[test]
fn map_seq_par_equivalence_collect() {
let map_expected = MAP.clone();
let map_seq = MAP_VEC.clone().into_iter().collect::<HashMap<_, _>>();
let map_par = MAP_VEC.clone().into_par_iter().collect::<HashMap<_, _>>();
assert_eq!(map_seq, map_par);
assert_eq!(map_seq, map_expected);
assert_eq!(map_par, map_expected);
}
lazy_static! {
static ref MAP_EXISTING_EMPTY: HashMap<char, u32> = HashMap::new();
static ref MAP_EXISTING: HashMap<char, u32> = {
let mut m = HashMap::new();
m.insert('b', 20);
m.insert('a', 10);
m
};
static ref MAP_EXTENSION_EMPTY: Vec<(char, u32)> = vec![];
static ref MAP_EXTENSION: Vec<(char, u32)> = vec![('c', 30), ('e', 50), ('f', 60), ('d', 40),];
}
#[test]
fn map_seq_par_equivalence_existing_empty_extend_empty() {
let expected = HashMap::new();
let mut map_seq = MAP_EXISTING_EMPTY.clone();
let mut map_par = MAP_EXISTING_EMPTY.clone();
map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned());
map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_empty_extend() {
let expected = MAP_EXTENSION.iter().cloned().collect::<HashMap<_, _>>();
let mut map_seq = MAP_EXISTING_EMPTY.clone();
let mut map_par = MAP_EXISTING_EMPTY.clone();
map_seq.extend(MAP_EXTENSION.iter().cloned());
map_par.par_extend(MAP_EXTENSION.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_extend_empty() {
let expected = MAP_EXISTING.clone();
let mut map_seq = MAP_EXISTING.clone();
let mut map_par = MAP_EXISTING.clone();
map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned());
map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_extend() {
let expected = MAP.clone();
let mut map_seq = MAP_EXISTING.clone();
let mut map_par = MAP_EXISTING.clone();
map_seq.extend(MAP_EXTENSION.iter().cloned());
map_par.par_extend(MAP_EXTENSION.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
lazy_static! {
static ref SET_EMPTY: HashSet<char> = HashSet::new();
static ref SET: HashSet<char> = {
let mut s = HashSet::new();
s.insert('b');
s.insert('a');
s.insert('c');
s.insert('e');
s.insert('f');
s.insert('d');
s
};
}
#[test]
fn set_seq_par_equivalence_iter_empty() {
let vec_seq = SET_EMPTY.iter().collect::<Vec<_>>();
let vec_par = SET_EMPTY.par_iter().collect::<Vec<_>>();
let expected: [&char; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn set_seq_par_equivalence_iter() {
let mut vec_seq = SET.iter().collect::<Vec<_>>();
let mut vec_par = SET.par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f'];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn set_seq_par_equivalence_into_iter_empty() {
let vec_seq = SET_EMPTY.clone().into_iter().collect::<Vec<_>>();
let vec_par = SET_EMPTY.clone().into_par_iter().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn set_seq_par_equivalence_into_iter() {
let mut vec_seq = SET.clone().into_iter().collect::<Vec<_>>();
let mut vec_par = SET.clone().into_par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = ['a', 'b', 'c', 'd', 'e', 'f'];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
lazy_static! {
static ref SET_VEC_EMPTY: Vec<char> = vec![];
static ref SET_VEC: Vec<char> = vec!['b', 'a', 'c', 'e', 'f', 'd',];
}
#[test]
fn set_seq_par_equivalence_collect_empty() {
let set_expected = SET_EMPTY.clone();
let set_seq = SET_VEC_EMPTY.clone().into_iter().collect::<HashSet<_>>();
let set_par = SET_VEC_EMPTY
.clone()
.into_par_iter()
.collect::<HashSet<_>>();
assert_eq!(set_seq, set_par);
assert_eq!(set_seq, set_expected);
assert_eq!(set_par, set_expected);
}
#[test]
fn set_seq_par_equivalence_collect() {
let set_expected = SET.clone();
let set_seq = SET_VEC.clone().into_iter().collect::<HashSet<_>>();
let set_par = SET_VEC.clone().into_par_iter().collect::<HashSet<_>>();
assert_eq!(set_seq, set_par);
assert_eq!(set_seq, set_expected);
assert_eq!(set_par, set_expected);
}
lazy_static! {
static ref SET_EXISTING_EMPTY: HashSet<char> = HashSet::new();
static ref SET_EXISTING: HashSet<char> = {
let mut s = HashSet::new();
s.insert('b');
s.insert('a');
s
};
static ref SET_EXTENSION_EMPTY: Vec<char> = vec![];
static ref SET_EXTENSION: Vec<char> = vec!['c', 'e', 'f', 'd',];
}
#[test]
fn set_seq_par_equivalence_existing_empty_extend_empty() {
let expected = HashSet::new();
let mut set_seq = SET_EXISTING_EMPTY.clone();
let mut set_par = SET_EXISTING_EMPTY.clone();
set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned());
set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_empty_extend() {
let expected = SET_EXTENSION.iter().cloned().collect::<HashSet<_>>();
let mut set_seq = SET_EXISTING_EMPTY.clone();
let mut set_par = SET_EXISTING_EMPTY.clone();
set_seq.extend(SET_EXTENSION.iter().cloned());
set_par.par_extend(SET_EXTENSION.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_extend_empty() {
let expected = SET_EXISTING.clone();
let mut set_seq = SET_EXISTING.clone();
let mut set_par = SET_EXISTING.clone();
set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned());
set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_extend() {
let expected = SET.clone();
let mut set_seq = SET_EXISTING.clone();
let mut set_par = SET_EXISTING.clone();
set_seq.extend(SET_EXTENSION.iter().cloned());
set_par.par_extend(SET_EXTENSION.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
lazy_static! {
static ref SET_A: HashSet<char> = ['a', 'b', 'c', 'd'].iter().cloned().collect();
static ref SET_B: HashSet<char> = ['a', 'b', 'e', 'f'].iter().cloned().collect();
static ref SET_DIFF_AB: HashSet<char> = ['c', 'd'].iter().cloned().collect();
static ref SET_DIFF_BA: HashSet<char> = ['e', 'f'].iter().cloned().collect();
static ref SET_SYMM_DIFF_AB: HashSet<char> = ['c', 'd', 'e', 'f'].iter().cloned().collect();
static ref SET_INTERSECTION_AB: HashSet<char> = ['a', 'b'].iter().cloned().collect();
static ref SET_UNION_AB: HashSet<char> =
['a', 'b', 'c', 'd', 'e', 'f'].iter().cloned().collect();
}
#[test]
fn set_seq_par_equivalence_difference() {
let diff_ab_seq = SET_A.difference(&*SET_B).cloned().collect::<HashSet<_>>();
let diff_ab_par = SET_A
.par_difference(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(diff_ab_seq, diff_ab_par, *SET_DIFF_AB);
let diff_ba_seq = SET_B.difference(&*SET_A).cloned().collect::<HashSet<_>>();
let diff_ba_par = SET_B
.par_difference(&*SET_A)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(diff_ba_seq, diff_ba_par, *SET_DIFF_BA);
}
#[test]
fn set_seq_par_equivalence_symmetric_difference() {
let symm_diff_ab_seq = SET_A
.symmetric_difference(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
let symm_diff_ab_par = SET_A
.par_symmetric_difference(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(symm_diff_ab_seq, symm_diff_ab_par, *SET_SYMM_DIFF_AB);
}
#[test]
fn set_seq_par_equivalence_intersection() {
let intersection_ab_seq = SET_A.intersection(&*SET_B).cloned().collect::<HashSet<_>>();
let intersection_ab_par = SET_A
.par_intersection(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(
intersection_ab_seq,
intersection_ab_par,
*SET_INTERSECTION_AB
);
}
#[test]
fn set_seq_par_equivalence_union() {
let union_ab_seq = SET_A.union(&*SET_B).cloned().collect::<HashSet<_>>();
let union_ab_par = SET_A.par_union(&*SET_B).cloned().collect::<HashSet<_>>();
assert_eq3!(union_ab_seq, union_ab_par, *SET_UNION_AB);
}

View File

@ -1,62 +0,0 @@
#![cfg(feature = "serde")]
extern crate hashbrown;
extern crate serde_test;
use hashbrown::{HashMap, HashSet};
use serde_test::{assert_tokens, Token};
#[test]
fn map_serde_tokens_empty() {
let map = HashMap::<char, u32>::new();
assert_tokens(&map, &[Token::Map { len: Some(0) }, Token::MapEnd]);
}
#[test]
fn map_serde_tokens() {
let mut map = HashMap::new();
map.insert('b', 20);
map.insert('a', 10);
map.insert('c', 30);
assert_tokens(
&map,
&[
Token::Map { len: Some(3) },
Token::Char('a'),
Token::I32(10),
Token::Char('b'),
Token::I32(20),
Token::Char('c'),
Token::I32(30),
Token::MapEnd,
],
);
}
#[test]
fn set_serde_tokens_empty() {
let set = HashSet::<u32>::new();
assert_tokens(&set, &[Token::Seq { len: Some(0) }, Token::SeqEnd]);
}
#[test]
fn set_serde_tokens() {
let mut set = HashSet::new();
set.insert(20);
set.insert(10);
set.insert(30);
assert_tokens(
&set,
&[
Token::Seq { len: Some(3) },
Token::I32(20),
Token::I32(10),
Token::I32(30),
Token::SeqEnd,
],
);
}

View File

@ -1,32 +0,0 @@
extern crate hashbrown;
extern crate rand;
use hashbrown::HashSet;
use rand::{distributions::Alphanumeric, Rng, SeedableRng, XorShiftRng};
#[test]
fn test_hashset_insert_remove() {
let mut m: HashSet<Vec<char>> = HashSet::new();
//let num: u32 = 4096;
//let tx: Vec<Vec<u8>> = (0..num).map(|i| (i..(16 + i)).collect()).collect();
let seed: [u8; 16] = [
130, 220, 246, 217, 111, 124, 221, 189, 190, 234, 121, 93, 67, 95, 100, 43,
];
let mut rng: XorShiftRng = SeedableRng::from_seed(seed);
//let mut rng: XorShiftRng = XorShiftRng::new_unseeded();
let tx: Vec<Vec<char>> = (0..4096)
.map(|_| (rng.sample_iter(&Alphanumeric).take(32).collect()))
.collect();
for _ in 0..32 {
for i in 0..4096 {
assert_eq!(m.contains(&tx[i].clone()), false);
assert_eq!(m.insert(tx[i].clone()), true);
}
for i in 0..4096 {
println!("removing {} {:?}", i, tx[i]);
assert_eq!(m.remove(&tx[i]), true);
}
}
}

View File

@ -1 +1 @@
{"files":{"Cargo.toml":"62733232b8d4a9f167d51d248d9907c303cb806455a36a442917dc9774d254d3","LICENSE.txt":"0041560f5d419c30e1594567f3b7ac2bc078ff6a68f437e0348ba85d9cf99112","README.md":"2d1ccfee41e041a9d2f39c5337086593cb0da100ee2b316afcc3d51dbff060b1","appveyor.yml":"e4869d4badf63843e322477ccdd117e3e3b29f28dd76f2c5733156f6c30780ef","src/checker.rs":"3187d4f53a03430c548c213df5271f33f5238c29408be10e0d6cb6256c2bffb4","src/error.rs":"d579a2e37c3a1a39f229d882bf1975271100a379df98c991607815056c384610","src/finder.rs":"de85f10fa1e9df8c682c988e334dc1fb9e752741c89e7af35d76c08c8230e7bd","src/helper.rs":"1ea08b0c4675ac46ee71ef2225bdef8a6d055a9644d5d42ba94f0c8e9d2d3279","src/lib.rs":"9c336275288f113826d909ada1a8a83bd040499213bec1d34c526b1bfc368b60"},"package":"b57acb10231b9493c8472b20cb57317d0679a49e0bdbee44b3b803a6473af164"}
{"files":{"Cargo.toml":"72267e6b8f7e153fc8adb396837ffd1fd46bf3c5ec51908f830ac92327d812e5","LICENSE.txt":"0041560f5d419c30e1594567f3b7ac2bc078ff6a68f437e0348ba85d9cf99112","README.md":"b300f303f88ca776a8f5f38050ca7c25ce9cc866dcb4a69e414aa1d45c6bed14","src/lib.rs":"4ead66ddef1b6824a656ff1be692b74dc099aae944fbb677352ad2a0c78782d3"},"package":"4be6cfa54dab45266e98b5d7be2f8ce959ddd49abd141a05d52dce4b07f803bb"}

View File

@ -12,20 +12,14 @@
[package]
name = "which"
version = "2.0.1"
authors = ["Harry Fei <tiziyuanfang@gmail.com>"]
version = "1.0.3"
authors = ["fangyuanziti <tiziyuanfang@gmail.com>"]
description = "A Rust equivalent of Unix command \"which\". Locate installed execuable in cross platforms."
documentation = "https://docs.rs/which/"
readme = "README.md"
keywords = ["which", "which-rs", "unix", "command"]
categories = ["os", "filesystem"]
license = "MIT"
repository = "https://github.com/harryfei/which-rs.git"
[dependencies.failure]
version = "0.1.1"
features = ["std"]
default-features = false
repository = "https://github.com/fangyuanziti/which-rs.git"
[dependencies.libc]
version = "0.2.10"
[dev-dependencies.tempdir]

View File

@ -1,5 +1,4 @@
[![Travis Build Status](https://travis-ci.org/harryfei/which-rs.svg?branch=master)](https://travis-ci.org/harryfei/which-rs)
[![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1y40b135iaixs9x6?svg=true)](https://ci.appveyor.com/project/HarryFei/which-rs)
[![Build Status](https://travis-ci.org/fangyuanziti/which-rs.svg?branch=master)](https://travis-ci.org/fangyuanziti/which-rs)
# which

View File

@ -1,23 +0,0 @@
os: Visual Studio 2015
environment:
matrix:
- channel: stable
target: x86_64-pc-windows-msvc
- channel: stable
target: i686-pc-windows-msvc
- channel: stable
target: x86_64-pc-windows-gnu
- channel: stable
target: i686-pc-windows-gnu
install:
- appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
- rustup-init -yv --default-toolchain %channel% --default-host %target%
- set PATH=%PATH%;%USERPROFILE%\.cargo\bin
- rustc -vV
- cargo -vV
build: false
test_script:
- cargo test

View File

@ -1,70 +0,0 @@
use std::fs;
#[cfg(unix)]
use std::ffi::CString;
#[cfg(unix)]
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
#[cfg(unix)]
use libc;
use finder::Checker;
pub struct ExecutableChecker;
impl ExecutableChecker {
pub fn new() -> ExecutableChecker {
ExecutableChecker
}
}
impl Checker for ExecutableChecker {
#[cfg(unix)]
fn is_valid(&self, path: &Path) -> bool {
CString::new(path.as_os_str().as_bytes())
.and_then(|c| Ok(unsafe { libc::access(c.as_ptr(), libc::X_OK) == 0 }))
.unwrap_or(false)
}
#[cfg(windows)]
fn is_valid(&self, _path: &Path) -> bool {
true
}
}
pub struct ExistedChecker;
impl ExistedChecker {
pub fn new() -> ExistedChecker {
ExistedChecker
}
}
impl Checker for ExistedChecker {
fn is_valid(&self, path: &Path) -> bool {
fs::metadata(path)
.map(|metadata| metadata.is_file())
.unwrap_or(false)
}
}
pub struct CompositeChecker {
checkers: Vec<Box<Checker>>,
}
impl CompositeChecker {
pub fn new() -> CompositeChecker {
CompositeChecker {
checkers: Vec::new(),
}
}
pub fn add_checker(mut self, checker: Box<Checker>) -> CompositeChecker {
self.checkers.push(checker);
self
}
}
impl Checker for CompositeChecker {
fn is_valid(&self, path: &Path) -> bool {
self.checkers.iter().all(|checker| checker.is_valid(path))
}
}

View File

@ -1,70 +0,0 @@
use failure::{Backtrace, Context, Fail};
use std;
use std::fmt::{self, Display};
#[derive(Debug)]
pub struct Error {
inner: Context<ErrorKind>,
}
// To suppress false positives from cargo-clippy
#[cfg_attr(feature = "cargo-clippy", allow(empty_line_after_outer_attr))]
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum ErrorKind {
BadAbsolutePath,
BadRelativePath,
CannotFindBinaryPath,
CannotGetCurrentDir,
}
impl Fail for ErrorKind {}
impl Display for ErrorKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let display = match *self {
ErrorKind::BadAbsolutePath => "Bad absolute path",
ErrorKind::BadRelativePath => "Bad relative path",
ErrorKind::CannotFindBinaryPath => "Cannot find binary path",
ErrorKind::CannotGetCurrentDir => "Cannot get current directory",
};
f.write_str(display)
}
}
impl Fail for Error {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.inner, f)
}
}
impl Error {
pub fn kind(&self) -> ErrorKind {
*self.inner.get_context()
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error {
inner: Context::new(kind),
}
}
}
impl From<Context<ErrorKind>> for Error {
fn from(inner: Context<ErrorKind>) -> Error {
Error { inner }
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,155 +0,0 @@
use error::*;
#[cfg(windows)]
use helper::has_executable_extension;
use std::env;
use std::ffi::OsStr;
#[cfg(windows)]
use std::ffi::OsString;
use std::iter;
use std::path::{Path, PathBuf};
pub trait Checker {
fn is_valid(&self, path: &Path) -> bool;
}
trait PathExt {
fn has_separator(&self) -> bool;
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for PathBuf {
fn has_separator(&self) -> bool {
self.components().count() > 1
}
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>,
{
if self.is_absolute() {
self
} else {
let mut new_path = PathBuf::from(cwd.as_ref());
new_path.push(self);
new_path
}
}
}
pub struct Finder;
impl Finder {
pub fn new() -> Finder {
Finder
}
pub fn find<T, U, V>(
&self,
binary_name: T,
paths: Option<U>,
cwd: V,
binary_checker: &Checker,
) -> Result<PathBuf>
where
T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path>,
{
let path = PathBuf::from(&binary_name);
let binary_path_candidates: Box<dyn Iterator<Item = _>> = if path.has_separator() {
// Search binary in cwd if the path have a path separator.
let candidates = Self::cwd_search_candidates(path, cwd).into_iter();
Box::new(candidates)
} else {
// Search binary in PATHs(defined in environment variable).
let p = paths.ok_or(ErrorKind::CannotFindBinaryPath)?;
let paths: Vec<_> = env::split_paths(&p).collect();
let candidates = Self::path_search_candidates(path, paths).into_iter();
Box::new(candidates)
};
for p in binary_path_candidates {
// find a valid binary
if binary_checker.is_valid(&p) {
return Ok(p);
}
}
// can't find any binary
return Err(ErrorKind::CannotFindBinaryPath.into());
}
fn cwd_search_candidates<C>(binary_name: PathBuf, cwd: C) -> impl IntoIterator<Item = PathBuf>
where
C: AsRef<Path>,
{
let path = binary_name.to_absolute(cwd);
Self::append_extension(iter::once(path))
}
fn path_search_candidates<P>(
binary_name: PathBuf,
paths: P,
) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
let new_paths = paths.into_iter().map(move |p| p.join(binary_name.clone()));
Self::append_extension(new_paths)
}
#[cfg(unix)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
paths
}
#[cfg(windows)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
// Read PATHEXT env variable and split it into vector of String
let path_exts =
env::var_os("PATHEXT").unwrap_or(OsString::from(env::consts::EXE_EXTENSION));
let exe_extension_vec = env::split_paths(&path_exts)
.filter_map(|e| e.to_str().map(|e| e.to_owned()))
.collect::<Vec<_>>();
paths
.into_iter()
.flat_map(move |p| -> Box<dyn Iterator<Item = _>> {
// Check if path already have executable extension
if has_executable_extension(&p, &exe_extension_vec) {
Box::new(iter::once(p))
} else {
// Appended paths with windows executable extensions.
// e.g. path `c:/windows/bin` will expend to:
// c:/windows/bin.COM
// c:/windows/bin.EXE
// c:/windows/bin.CMD
// ...
let ps = exe_extension_vec.clone().into_iter().map(move |e| {
// Append the extension.
let mut p = p.clone().to_path_buf().into_os_string();
p.push(e);
PathBuf::from(p)
});
Box::new(ps)
}
})
}
}

View File

@ -1,40 +0,0 @@
use std::path::Path;
/// Check if given path has extension which in the given vector.
pub fn has_executable_extension<T: AsRef<Path>, S: AsRef<str>>(path: T, exts_vec: &Vec<S>) -> bool {
let ext = path.as_ref().extension().and_then(|e| e.to_str());
match ext {
Some(ext) => exts_vec
.iter()
.any(|e| ext.eq_ignore_ascii_case(&e.as_ref()[1..])),
_ => false,
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::PathBuf;
#[test]
fn test_extension_in_extension_vector() {
// Case insensitive
assert!(has_executable_extension(
PathBuf::from("foo.exe"),
&vec![".COM", ".EXE", ".CMD"]
));
assert!(has_executable_extension(
PathBuf::from("foo.CMD"),
&vec![".COM", ".EXE", ".CMD"]
));
}
#[test]
fn test_extension_not_in_extension_vector() {
assert!(!has_executable_extension(
PathBuf::from("foo.bar"),
&vec![".COM", ".EXE", ".CMD"]
));
}
}

View File

@ -13,33 +13,39 @@
//!
//! ```
extern crate failure;
extern crate libc;
#[cfg(test)]
extern crate tempdir;
use failure::ResultExt;
mod checker;
mod error;
mod finder;
#[cfg(windows)]
mod helper;
use std::env;
use std::path::{Path, PathBuf};
// Remove the `AsciiExt` will make `which-rs` build failed in older versions of Rust.
// Please Keep it here though we don't need it in the new Rust version(>=1.23).
#[allow(unused_imports)]
use std::ascii::AsciiExt;
use std::path::{Path,PathBuf};
use std::{env, fs};
#[cfg(unix)]
use std::ffi::CString;
use std::ffi::OsStr;
#[cfg(unix)]
use std::os::unix::ffi::OsStrExt;
/// Like `Path::with_extension`, but don't replace an existing extension.
fn ensure_exe_extension<T: AsRef<Path>>(path: T) -> PathBuf {
if env::consts::EXE_EXTENSION.is_empty() {
// Nothing to do.
path.as_ref().to_path_buf()
} else {
match path.as_ref().extension().and_then(|e| e.to_str()).map(|e| e.eq_ignore_ascii_case(env::consts::EXE_EXTENSION)) {
// Already has the right extension.
Some(true) => path.as_ref().to_path_buf(),
_ => {
// Append the extension.
let mut s = path.as_ref().to_path_buf().into_os_string();
s.push(".");
s.push(env::consts::EXE_EXTENSION);
PathBuf::from(s)
}
}
}
}
use checker::CompositeChecker;
use checker::ExecutableChecker;
use checker::ExistedChecker;
pub use error::*;
use finder::Finder;
/// Find a exectable binary's path by name.
///
@ -62,18 +68,19 @@ use finder::Finder;
/// assert_eq!(result, PathBuf::from("/usr/bin/rustc"));
///
/// ```
pub fn which<T: AsRef<OsStr>>(binary_name: T) -> Result<PathBuf> {
let cwd = env::current_dir().context(ErrorKind::CannotGetCurrentDir)?;
which_in(binary_name, env::var_os("PATH"), &cwd)
pub fn which<T: AsRef<OsStr>>(binary_name: T)
-> Result<PathBuf, &'static str> {
env::current_dir()
.or_else(|_| Err("Couldn't get current directory"))
.and_then(|cwd| which_in(binary_name, env::var_os("PATH"), &cwd))
}
/// Find `binary_name` in the path list `paths`, using `cwd` to resolve relative paths.
pub fn which_in<T, U, V>(binary_name: T, paths: Option<U>, cwd: V) -> Result<PathBuf>
where
T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path>,
{
pub fn which_in<T, U, V>(binary_name: T, paths: Option<U>, cwd: V)
-> Result<PathBuf, &'static str>
where T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path> {
let binary_checker = CompositeChecker::new()
.add_checker(Box::new(ExistedChecker::new()))
.add_checker(Box::new(ExecutableChecker::new()));
@ -83,15 +90,155 @@ where
finder.find(binary_name, paths, cwd, &binary_checker)
}
struct Finder;
impl Finder {
fn new() -> Finder {
Finder
}
fn find<T, U, V>(&self, binary_name: T, paths: Option<U>, cwd: V,
binary_checker: &Checker)
-> Result<PathBuf, &'static str>
where T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path> {
let path = ensure_exe_extension(binary_name.as_ref());
// Does it have a path separator?
if path.components().count() > 1 {
if path.is_absolute() {
if binary_checker.is_valid(&path) {
// Already fine.
Ok(path)
} else {
// Absolute path but it's not usable.
Err("Bad absolute path")
}
} else {
// Try to make it absolute.
let mut new_path = PathBuf::from(cwd.as_ref());
new_path.push(path);
let new_path = ensure_exe_extension(new_path);
if binary_checker.is_valid(&new_path) {
Ok(new_path)
} else {
// File doesn't exist or isn't executable.
Err("Bad relative path")
}
}
} else {
// No separator, look it up in `paths`.
paths.and_then(
|paths|
env::split_paths(paths.as_ref())
.map(|p| ensure_exe_extension(p.join(binary_name.as_ref())))
.skip_while(|p| !(binary_checker.is_valid(&p)))
.next())
.ok_or("Cannot find binary path")
}
}
}
trait Checker {
fn is_valid(&self, path: &Path) -> bool;
}
struct ExecutableChecker;
impl ExecutableChecker {
fn new() -> ExecutableChecker {
ExecutableChecker
}
}
impl Checker for ExecutableChecker {
#[cfg(unix)]
fn is_valid(&self, path: &Path) -> bool {
CString::new(path.as_os_str().as_bytes())
.and_then(|c| {
Ok(unsafe { libc::access(c.as_ptr(), libc::X_OK) == 0 })
})
.unwrap_or(false)
}
#[cfg(not(unix))]
fn is_valid(&self, _path: &Path) -> bool { true }
}
struct ExistedChecker;
impl ExistedChecker {
fn new() -> ExistedChecker {
ExistedChecker
}
}
impl Checker for ExistedChecker {
fn is_valid(&self, path: &Path) -> bool {
fs::metadata(path).map(|metadata|{
metadata.is_file()
}).unwrap_or(false)
}
}
struct CompositeChecker {
checkers: Vec<Box<Checker>>
}
impl CompositeChecker {
fn new() -> CompositeChecker {
CompositeChecker {
checkers: Vec::new()
}
}
fn add_checker(mut self, checker: Box<Checker>) -> CompositeChecker {
self.checkers.push(checker);
self
}
}
impl Checker for CompositeChecker {
fn is_valid(&self, path: &Path) -> bool {
self.checkers.iter()
.all(|checker| checker.is_valid(path))
}
}
#[test]
fn test_exe_extension() {
let expected = PathBuf::from("foo").with_extension(env::consts::EXE_EXTENSION);
assert_eq!(expected, ensure_exe_extension(PathBuf::from("foo")));
let p = expected.clone();
assert_eq!(expected, ensure_exe_extension(p));
}
#[test]
#[cfg(windows)]
fn test_exe_extension_existing_extension() {
assert_eq!(PathBuf::from("foo.bar.exe"),
ensure_exe_extension("foo.bar"));
}
#[test]
#[cfg(windows)]
fn test_exe_extension_existing_extension_uppercase() {
assert_eq!(PathBuf::from("foo.EXE"),
ensure_exe_extension("foo.EXE"));
}
#[cfg(test)]
mod test {
use super::*;
use std::env;
use std::ffi::{OsStr, OsString};
use std::ffi::{OsStr,OsString};
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
use std::path::{Path,PathBuf};
use tempdir::TempDir;
struct TestFixture {
@ -107,10 +254,10 @@ mod test {
const BIN_NAME: &'static str = "bin";
#[cfg(unix)]
fn mk_bin(dir: &Path, path: &str, extension: &str) -> io::Result<PathBuf> {
fn mk_bin(dir: &Path, path: &str) -> io::Result<PathBuf> {
use libc;
use std::os::unix::fs::OpenOptionsExt;
let bin = dir.join(path).with_extension(extension);
let bin = dir.join(path).with_extension(env::consts::EXE_EXTENSION);
fs::OpenOptions::new()
.write(true)
.create(true)
@ -119,38 +266,28 @@ mod test {
.and_then(|_f| bin.canonicalize())
}
fn touch(dir: &Path, path: &str, extension: &str) -> io::Result<PathBuf> {
let b = dir.join(path).with_extension(extension);
fs::File::create(&b).and_then(|_f| b.canonicalize())
fn touch(dir: &Path, path: &str) -> io::Result<PathBuf> {
let b = dir.join(path).with_extension(env::consts::EXE_EXTENSION);
fs::File::create(&b)
.and_then(|_f| b.canonicalize())
}
#[cfg(windows)]
fn mk_bin(dir: &Path, path: &str, extension: &str) -> io::Result<PathBuf> {
touch(dir, path, extension)
#[cfg(not(unix))]
fn mk_bin(dir: &Path, path: &str) -> io::Result<PathBuf> {
touch(dir, path)
}
impl TestFixture {
// tmp/a/bin
// tmp/a/bin.exe
// tmp/a/bin.cmd
// tmp/b/bin
// tmp/b/bin.exe
// tmp/b/bin.cmd
// tmp/c/bin
// tmp/c/bin.exe
// tmp/c/bin.cmd
pub fn new() -> TestFixture {
let tempdir = TempDir::new("which_tests").unwrap();
let mut builder = fs::DirBuilder::new();
builder.recursive(true);
let mut paths = vec![];
let mut bins = vec![];
let mut paths = vec!();
let mut bins = vec!();
for d in SUBDIRS.iter() {
let p = tempdir.path().join(d);
builder.create(&p).unwrap();
bins.push(mk_bin(&p, &BIN_NAME, "").unwrap());
bins.push(mk_bin(&p, &BIN_NAME, "exe").unwrap());
bins.push(mk_bin(&p, &BIN_NAME, "cmd").unwrap());
bins.push(mk_bin(&p, &BIN_NAME).unwrap());
paths.push(p);
}
TestFixture {
@ -161,16 +298,16 @@ mod test {
}
#[allow(dead_code)]
pub fn touch(&self, path: &str, extension: &str) -> io::Result<PathBuf> {
touch(self.tempdir.path(), &path, &extension)
pub fn touch(&self, path: &str) -> io::Result<PathBuf> {
touch(self.tempdir.path(), &path)
}
pub fn mk_bin(&self, path: &str, extension: &str) -> io::Result<PathBuf> {
mk_bin(self.tempdir.path(), &path, &extension)
pub fn mk_bin(&self, path: &str) -> io::Result<PathBuf> {
mk_bin(self.tempdir.path(), &path)
}
}
fn _which<T: AsRef<OsStr>>(f: &TestFixture, path: T) -> Result<PathBuf> {
fn _which<T: AsRef<OsStr>>(f: &TestFixture, path: T) -> Result<PathBuf, &'static str> {
which_in(path, Some(f.paths.clone()), f.tempdir.path())
}
@ -181,50 +318,27 @@ mod test {
let result = which("rustc");
assert!(result.is_ok());
let which_result = Command::new("which").arg("rustc").output();
let which_result = Command::new("which")
.arg("rustc")
.output();
assert_eq!(
String::from(result.unwrap().to_str().unwrap()),
String::from_utf8(which_result.unwrap().stdout)
.unwrap()
.trim()
);
assert_eq!(String::from(result.unwrap().to_str().unwrap()),
String::from_utf8(which_result.unwrap().stdout).unwrap().trim());
}
#[test]
#[cfg(unix)]
fn test_which() {
let f = TestFixture::new();
assert_eq!(
_which(&f, &BIN_NAME).unwrap().canonicalize().unwrap(),
f.bins[0]
)
assert_eq!(_which(&f, &BIN_NAME).unwrap().canonicalize().unwrap(),
f.bins[0])
}
#[test]
#[cfg(windows)]
fn test_which() {
let f = TestFixture::new();
assert_eq!(
_which(&f, &BIN_NAME).unwrap().canonicalize().unwrap(),
f.bins[1]
)
}
#[test]
#[cfg(unix)]
fn test_which_extension() {
let f = TestFixture::new();
let b = Path::new(&BIN_NAME).with_extension("");
assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(), f.bins[0])
}
#[test]
#[cfg(windows)]
fn test_which_extension() {
let f = TestFixture::new();
let b = Path::new(&BIN_NAME).with_extension("cmd");
assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(), f.bins[2])
let b = Path::new(&BIN_NAME).with_extension(env::consts::EXE_EXTENSION);
assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[0])
}
#[test]
@ -236,28 +350,15 @@ mod test {
#[test]
fn test_which_second() {
let f = TestFixture::new();
let b = f.mk_bin("b/another", env::consts::EXE_EXTENSION).unwrap();
let b = f.mk_bin("b/another").unwrap();
assert_eq!(_which(&f, "another").unwrap().canonicalize().unwrap(), b);
}
#[test]
#[cfg(unix)]
fn test_which_absolute() {
let f = TestFixture::new();
assert_eq!(
_which(&f, &f.bins[3]).unwrap().canonicalize().unwrap(),
f.bins[3].canonicalize().unwrap()
);
}
#[test]
#[cfg(windows)]
fn test_which_absolute() {
let f = TestFixture::new();
assert_eq!(
_which(&f, &f.bins[4]).unwrap().canonicalize().unwrap(),
f.bins[4].canonicalize().unwrap()
);
assert_eq!(_which(&f, &f.bins[1]).unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
@ -266,81 +367,35 @@ mod test {
// Test that an absolute path with an uppercase extension
// is accepted.
let f = TestFixture::new();
let p = &f.bins[4];
assert_eq!(
_which(&f, &p).unwrap().canonicalize().unwrap(),
f.bins[4].canonicalize().unwrap()
);
let p = f.bins[1].with_extension("EXE");
assert_eq!(_which(&f, &p).unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
#[cfg(unix)]
fn test_which_absolute_extension() {
let f = TestFixture::new();
// Don't append EXE_EXTENSION here.
let b = f.bins[3].parent().unwrap().join(&BIN_NAME);
assert_eq!(
_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[3].canonicalize().unwrap()
);
let b = f.bins[1].parent().unwrap().join(&BIN_NAME);
assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
#[cfg(windows)]
fn test_which_absolute_extension() {
let f = TestFixture::new();
// Don't append EXE_EXTENSION here.
let b = f.bins[4].parent().unwrap().join(&BIN_NAME);
assert_eq!(
_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[4].canonicalize().unwrap()
);
}
#[test]
#[cfg(unix)]
fn test_which_relative() {
let f = TestFixture::new();
assert_eq!(
_which(&f, "b/bin").unwrap().canonicalize().unwrap(),
f.bins[3].canonicalize().unwrap()
);
assert_eq!(_which(&f, "b/bin").unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
#[cfg(windows)]
fn test_which_relative() {
let f = TestFixture::new();
assert_eq!(
_which(&f, "b/bin").unwrap().canonicalize().unwrap(),
f.bins[4].canonicalize().unwrap()
);
}
#[test]
#[cfg(unix)]
fn test_which_relative_extension() {
// test_which_relative tests a relative path without an extension,
// so test a relative path with an extension here.
let f = TestFixture::new();
let b = Path::new("b/bin").with_extension(env::consts::EXE_EXTENSION);
assert_eq!(
_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[3].canonicalize().unwrap()
);
}
#[test]
#[cfg(windows)]
fn test_which_relative_extension() {
// test_which_relative tests a relative path without an extension,
// so test a relative path with an extension here.
let f = TestFixture::new();
let b = Path::new("b/bin").with_extension("cmd");
assert_eq!(
_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[5].canonicalize().unwrap()
);
assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
@ -350,30 +405,15 @@ mod test {
// is accepted.
let f = TestFixture::new();
let b = Path::new("b/bin").with_extension("EXE");
assert_eq!(
_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[4].canonicalize().unwrap()
);
assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
#[cfg(unix)]
fn test_which_relative_leading_dot() {
let f = TestFixture::new();
assert_eq!(
_which(&f, "./b/bin").unwrap().canonicalize().unwrap(),
f.bins[3].canonicalize().unwrap()
);
}
#[test]
#[cfg(windows)]
fn test_which_relative_leading_dot() {
let f = TestFixture::new();
assert_eq!(
_which(&f, "./b/bin").unwrap().canonicalize().unwrap(),
f.bins[4].canonicalize().unwrap()
);
assert_eq!(_which(&f, "./b/bin").unwrap().canonicalize().unwrap(),
f.bins[1].canonicalize().unwrap());
}
#[test]
@ -381,7 +421,7 @@ mod test {
fn test_which_non_executable() {
// Shouldn't return non-executable files.
let f = TestFixture::new();
f.touch("b/another", "").unwrap();
f.touch("b/another").unwrap();
assert!(_which(&f, "another").is_err());
}
@ -390,7 +430,7 @@ mod test {
fn test_which_absolute_non_executable() {
// Shouldn't return non-executable files, even if given an absolute path.
let f = TestFixture::new();
let b = f.touch("b/another", "").unwrap();
let b = f.touch("b/another").unwrap();
assert!(_which(&f, &b).is_err());
}
@ -399,20 +439,7 @@ mod test {
fn test_which_relative_non_executable() {
// Shouldn't return non-executable files.
let f = TestFixture::new();
f.touch("b/another", "").unwrap();
f.touch("b/another").unwrap();
assert!(_which(&f, "b/another").is_err());
}
#[test]
fn test_failure() {
let f = TestFixture::new();
let run = || -> std::result::Result<PathBuf, failure::Error> {
// Test the conversion to failure
let p = _which(&f, "./b/bin")?;
Ok(p)
};
let _ = run();
}
}