Bug 1648405 - Vendor crossbeam dependency. r=gw

Differential Revision: https://phabricator.services.mozilla.com/D81041
This commit is contained in:
Nicolas Silva 2020-07-28 09:45:33 +00:00
parent 255da40c5d
commit 643ec22ebc
43 changed files with 19576 additions and 0 deletions

11
Cargo.lock generated
View File

@ -826,6 +826,16 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "crossbeam-channel"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ee0cc8804d5393478d743b035099520087a5186f3b93fa58cec08fa62407b6"
dependencies = [
"cfg-if",
"crossbeam-utils 0.7.0",
]
[[package]]
name = "crossbeam-deque"
version = "0.7.2"
@ -5550,6 +5560,7 @@ dependencies = [
"byteorder",
"core-foundation",
"core-graphics",
"crossbeam-channel",
"derive_more",
"euclid",
"malloc_size_of_derive",

View File

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"c41ddf971d55da04567cdb2e574c14acb5cfe905e8c41b5fa161af0c50c0dd7f","Cargo.lock":"14fcb36b266d560ed631a7c608994811f74df44d17f036b5cb66766c1ed54679","Cargo.toml":"d25a14eafef76836a3f458e963c4472ae43a3ef3d1778aad99a2943bdad9b931","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","LICENSE-THIRD-PARTY":"924a49392dc8304def57586be4ebd69aaf51e16fd245b55b4b69ad2cce6b715a","README.md":"d2c041f6fa5da30a6f87377421a8de4010c0c002dbff777e18ad2f8b36a65378","benches/crossbeam.rs":"f5720508d3458f2451271b9887f7557823304bd38288c928b0d6aa1f459865e5","examples/fibonacci.rs":"6a26ecd74c7493d2c93f4280c0804afc19adc612b77b3d9fea433119ff472a44","examples/matching.rs":"63c250e164607a7a9f643d46f107bb5da846d49e89cf9069909562d20e530f71","examples/stopwatch.rs":"87e4613e24083e877d97aaefdc6c78ee0308c3a2994974b2cbc7df042a82d488","src/channel.rs":"4fd8e2d7c889f39447e0039ee1441e81362137dbc2a3549cf9f3f730cde52c25","src/context.rs":"ad24cabfc50dd5e6ae84aa46a0246da12da1f1a6fa19043244ad25136075c6ca","src/counter.rs":"8b6d4d69db59bc992ddc7ed33f709788c3fab482521bb1adf985f77499506c09","src/err.rs":"c4c7602c5faa3123f063bf5b52382a63c3eab5c79c3d1d8f67baeb762c442fb9","src/flavors/after.rs":"f50a0032e44962b454db826c13192a99197bbf1acc971435deac5722a3242f49","src/flavors/array.rs":"da229912febc4088c04c4767483ac099810ff2348c66ac55e7904c42de280433","src/flavors/list.rs":"fa6667a33e30fad94a8432b78dd70d143e0809ebf80fa881bb6221aca55b4229","src/flavors/mod.rs":"a5af9b6105207e293c0d64928b4486fb1da9bfe0318354c66c8b5069e41ec31f","src/flavors/never.rs":"59ac4a46b5e32780b2b2f338064544a8b4ec1bc8b78a8a9cd0a683bcd01d5d36","src/flavors/tick.rs":"667fcaacb07d7556350a7e59b5906956224c57b1f43b7b0018a5e2dad0d91ff8","src/flavors/zero.rs":"97c13c89bce0ffd4f761a5ecd73e3221bdd320817061ef7bc7d76010da021b77","src/lib.rs":"24e16d1855e89f5749c564533daca544a424d0dfa6bdadc0a95e47649734bddd","src/select.rs":"70c54434f033e999dce48655b524b6f5ed990b5d424b2d7fb122f3c9ac7d41be","src/select_macro.rs":"511d75151a5bc2ccc294098e79c0494a5dfa06cb0e473f5703dbc239ead4f235","src/utils.rs":"4ec0d30835f42dffc7614a18c8f005abb40dd33f2b127809b297959c6aa43004","src/waker.rs":"27eb84dcd0eb58f7d0d79d412bb293becc2740f6166d2fc64acb0ad94cfb523e","tests/after.rs":"324c7d773f72bef62d150171f74ba7b7ac1b06f6030b3d4d2b1a35d211956b21","tests/array.rs":"62290dfd66740a1d1017daea8048df09a11ee8ff8eb614a3a0aa5f4d456a7c39","tests/golang.rs":"0d41155fde12de50876dbbe775648598b63f6254169c44a781a30050cad1c890","tests/iter.rs":"7563dc7fdf4c63e31dd74ee3fedecdd3aed490f7ef599b98f6f75f929cf79edb","tests/list.rs":"6a9645c00aed88c1ad07d2e416b4f137ccef2e50f389c71589c6ac7d57373e5a","tests/mpsc.rs":"0c4c6b056f5cec77ca19eca45f99b083632700a4b67133e88071a1d22a61d6fe","tests/never.rs":"665441a9fb004f7cd44047619637ebe6766cf2faf58e68e6481397bbfc682e11","tests/ready.rs":"3848ee8bb16cc83269a462d830659ff29f91834eaab0749bca7be02f43d7db51","tests/same_channel.rs":"2bab761443671e841e1b2476bd8082d75533a2f6be7946f5dbcee67cdc82dccb","tests/select.rs":"58aa5421475bd98b3adc02e4241fc77669ca910cf9cac7a8b0a212b2e92cb7c7","tests/select_macro.rs":"00dd7963f79b96abf30851fdab29e86c8424b502a8a7d34abf4bc1714f493ecf","tests/thread_locals.rs":"3611db5502e6af0a8d15187d09fd195381819795544208b946e9f99b04579a81","tests/tick.rs":"06f205ace5fc44daaf1b6900a2e05bb5bda1c6071c1a07524f45769d8855968b","tests/zero.rs":"368eac99c6d9fb679f8dfbe93cdb96b01d77d1b6f840aa4e156b06f1786bb882"},"package":"09ee0cc8804d5393478d743b035099520087a5186f3b93fa58cec08fa62407b6"}

View File

@ -0,0 +1,162 @@
# Version 0.4.3
- Change license to "MIT OR Apache-2.0".
# Version 0.4.2
- Fix bug in release (yanking 0.4.1)
# Version 0.4.1
- Avoid time drift in `channel::tick`. (#456)
- Fix unsoundness issues by adopting `MaybeUninit`. (#458)
# Version 0.4.0
- Bump the minimum required version to 1.28.
- Bump `crossbeam-utils` to `0.7`.
# Version 0.3.9
- Fix a bug in reference counting.
- Optimize `recv_timeout()`.
- Add `Select::remove()`.
- Various small improvements, code cleanup, more tests.
# Version 0.3.8
- Bump the minimum required version of `crossbeam-utils`.
# Version 0.3.7
- Remove `parking_lot` and `rand` dependencies.
- Expand documentation.
- Implement `Default` for `Select`.
- Make `size_of::<Receiver<T>>()` smaller.
- Several minor optimizations.
- Add more tests.
# Version 0.3.6
- Fix a bug in initialization of unbounded channels.
# Version 0.3.5
- New implementation for unbounded channels.
- A number of small performance improvements.
- Remove `crossbeam-epoch` dependency.
# Version 0.3.4
- Bump `crossbeam-epoch` to `0.7`.
- Improve documentation.
# Version 0.3.3
- Relax the lifetime in `SelectedOperation<'_>`.
- Add `Select::try_ready()`, `Select::ready()`, and `Select::ready_timeout()`.
- Update licensing notices.
- Improve documentation.
- Add methods `is_disconnected()`, `is_timeout()`, `is_empty()`, and `is_full()` on error types.
# Version 0.3.2
- More elaborate licensing notices.
# Version 0.3.1
- Update `crossbeam-utils` to `0.6`.
# Version 0.3.0
- Add a special `never` channel type.
- Dropping all receivers now closes the channel.
- The interface of sending and receiving methods is now very similar to those in v0.1.
- The syntax for `send` in `select!` is now `send(sender, msg) -> res => body`.
- The syntax for `recv` in `select!` is now `recv(receiver) -> res => body`.
- New, more efficient interface for `Select` without callbacks.
- Timeouts can be specified in `select!`.
# Version 0.2.6
- `Select` struct that can add cases dynamically.
- More documentation (in particular, the FAQ section).
- Optimize contended sends/receives in unbounded channels.
# Version 0.2.5
- Use `LocalKey::try_with` instead of `LocalKey::with`.
- Remove helper macros `__crossbeam_channel*`.
# Version 0.2.4
- Make `select!` linearizable with other channel operations.
- Update `crossbeam-utils` to `0.5.0`.
- Update `parking_lot` to `0.6.3`.
- Remove Mac OS X tests.
# Version 0.2.3
- Add Mac OS X tests.
- Lower some memory orderings.
- Eliminate calls to `mem::unitialized`, which caused bugs with ZST.
# Version 0.2.2
- Add more tests.
- Update `crossbeam-epoch` to 0.5.0
- Initialize the RNG seed to a random value.
- Replace `libc::abort` with `std::process::abort`.
- Ignore clippy warnings in `select!`.
- Better interaction of `select!` with the NLL borrow checker.
# Version 0.2.1
- Fix compilation errors when using `select!` with `#[deny(unsafe_code)]`.
# Version 0.2.0
- Implement `IntoIterator<Item = T>` for `Receiver<T>`.
- Add a new `select!` macro.
- Add special channels `after` and `tick`.
- Dropping receivers doesn't close the channel anymore.
- Change the signature of `recv`, `send`, and `try_recv`.
- Remove `Sender::is_closed` and `Receiver::is_closed`.
- Remove `Sender::close` and `Receiver::close`.
- Remove `Sender::send_timeout` and `Receiver::recv_timeout`.
- Remove `Sender::try_send`.
- Remove `Select` and `select_loop!`.
- Remove all error types.
- Remove `Iter`, `TryIter`, and `IntoIter`.
- Remove the `nightly` feature.
- Remove ordering operators for `Sender` and `Receiver`.
# Version 0.1.3
- Add `Sender::disconnect` and `Receiver::disconnect`.
- Implement comparison operators for `Sender` and `Receiver`.
- Allow arbitrary patterns in place of `msg` in `recv(r, msg)`.
- Add a few conversion impls between error types.
- Add benchmarks for `atomicring` and `mpmc`.
- Add benchmarks for different message sizes.
- Documentation improvements.
- Update `crossbeam-epoch` to 0.4.0
- Update `crossbeam-utils` to 0.3.0
- Update `parking_lot` to 0.5
- Update `rand` to 0.4
# Version 0.1.2
- Allow conditional cases in `select_loop!` macro.
- Fix typos in documentation.
- Fix deadlock in selection when all channels are disconnected and a timeout is specified.
# Version 0.1.1
- Implement `Debug` for `Sender`, `Receiver`, `Iter`, `TryIter`, `IntoIter`, and `Select`.
- Implement `Default` for `Select`.
# Version 0.1.0
- First implementation of the channels.
- Add `select_loop!` macro by @TimNN.

156
third_party/rust/crossbeam-channel/Cargo.lock generated vendored Normal file
View File

@ -0,0 +1,156 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "arc-swap"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034"
[[package]]
name = "autocfg"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "crossbeam-channel"
version = "0.4.3"
dependencies = [
"cfg-if",
"crossbeam-utils",
"num_cpus",
"rand",
"signal-hook",
]
[[package]]
name = "crossbeam-utils"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
"autocfg",
"cfg-if",
"lazy_static",
]
[[package]]
name = "getrandom"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "hermit-abi"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9"
dependencies = [
"libc",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd7d4bd64732af4bf3a67f367c27df8520ad7e230c5817b8ff485864d80242b9"
[[package]]
name = "num_cpus"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "ppv-lite86"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom",
"libc",
"rand_chacha",
"rand_core",
"rand_hc",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core",
]
[[package]]
name = "signal-hook"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "604508c1418b99dfe1925ca9224829bb2a8a9a04dda655cc01fcad46f4ab05ed"
dependencies = [
"libc",
"signal-hook-registry",
]
[[package]]
name = "signal-hook-registry"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41"
dependencies = [
"arc-swap",
"libc",
]
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"

View File

@ -0,0 +1,44 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "crossbeam-channel"
version = "0.4.3"
authors = ["The Crossbeam Project Developers"]
description = "Multi-producer multi-consumer channels for message passing"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel"
documentation = "https://docs.rs/crossbeam-channel"
readme = "README.md"
keywords = ["channel", "mpmc", "select", "golang", "message"]
categories = ["algorithms", "concurrency", "data-structures"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.cfg-if]
version = "0.1.10"
[dependencies.crossbeam-utils]
version = "0.7"
optional = true
default-features = false
[dev-dependencies.num_cpus]
version = "1.13.0"
[dev-dependencies.rand]
version = "0.7.3"
[dev-dependencies.signal-hook]
version = "0.1.15"
[features]
default = ["std"]
std = ["crossbeam-utils/std"]

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,27 @@
The MIT License (MIT)
Copyright (c) 2019 The Crossbeam Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,625 @@
===============================================================================
Bounded MPMC queue
http://www.1024cores.net/home/code-license
Copyright (c) 2010-2011 Dmitry Vyukov.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Dmitry Vyukov.
===============================================================================
matching.go
https://creativecommons.org/licenses/by/3.0/legalcode
Creative Commons Legal Code
Attribution 3.0 Unported
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR
DAMAGES RESULTING FROM ITS USE.
License
THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
CONDITIONS.
1. Definitions
a. "Adaptation" means a work based upon the Work, or upon the Work and
other pre-existing works, such as a translation, adaptation,
derivative work, arrangement of music or other alterations of a
literary or artistic work, or phonogram or performance and includes
cinematographic adaptations or any other form in which the Work may be
recast, transformed, or adapted including in any form recognizably
derived from the original, except that a work that constitutes a
Collection will not be considered an Adaptation for the purpose of
this License. For the avoidance of doubt, where the Work is a musical
work, performance or phonogram, the synchronization of the Work in
timed-relation with a moving image ("synching") will be considered an
Adaptation for the purpose of this License.
b. "Collection" means a collection of literary or artistic works, such as
encyclopedias and anthologies, or performances, phonograms or
broadcasts, or other works or subject matter other than works listed
in Section 1(f) below, which, by reason of the selection and
arrangement of their contents, constitute intellectual creations, in
which the Work is included in its entirety in unmodified form along
with one or more other contributions, each constituting separate and
independent works in themselves, which together are assembled into a
collective whole. A work that constitutes a Collection will not be
considered an Adaptation (as defined above) for the purposes of this
License.
c. "Distribute" means to make available to the public the original and
copies of the Work or Adaptation, as appropriate, through sale or
other transfer of ownership.
d. "Licensor" means the individual, individuals, entity or entities that
offer(s) the Work under the terms of this License.
e. "Original Author" means, in the case of a literary or artistic work,
the individual, individuals, entity or entities who created the Work
or if no individual or entity can be identified, the publisher; and in
addition (i) in the case of a performance the actors, singers,
musicians, dancers, and other persons who act, sing, deliver, declaim,
play in, interpret or otherwise perform literary or artistic works or
expressions of folklore; (ii) in the case of a phonogram the producer
being the person or legal entity who first fixes the sounds of a
performance or other sounds; and, (iii) in the case of broadcasts, the
organization that transmits the broadcast.
f. "Work" means the literary and/or artistic work offered under the terms
of this License including without limitation any production in the
literary, scientific and artistic domain, whatever may be the mode or
form of its expression including digital form, such as a book,
pamphlet and other writing; a lecture, address, sermon or other work
of the same nature; a dramatic or dramatico-musical work; a
choreographic work or entertainment in dumb show; a musical
composition with or without words; a cinematographic work to which are
assimilated works expressed by a process analogous to cinematography;
a work of drawing, painting, architecture, sculpture, engraving or
lithography; a photographic work to which are assimilated works
expressed by a process analogous to photography; a work of applied
art; an illustration, map, plan, sketch or three-dimensional work
relative to geography, topography, architecture or science; a
performance; a broadcast; a phonogram; a compilation of data to the
extent it is protected as a copyrightable work; or a work performed by
a variety or circus performer to the extent it is not otherwise
considered a literary or artistic work.
g. "You" means an individual or entity exercising rights under this
License who has not previously violated the terms of this License with
respect to the Work, or who has received express permission from the
Licensor to exercise rights under this License despite a previous
violation.
h. "Publicly Perform" means to perform public recitations of the Work and
to communicate to the public those public recitations, by any means or
process, including by wire or wireless means or public digital
performances; to make available to the public Works in such a way that
members of the public may access these Works from a place and at a
place individually chosen by them; to perform the Work to the public
by any means or process and the communication to the public of the
performances of the Work, including by public digital performance; to
broadcast and rebroadcast the Work by any means including signs,
sounds or images.
i. "Reproduce" means to make copies of the Work by any means including
without limitation by sound or visual recordings and the right of
fixation and reproducing fixations of the Work, including storage of a
protected performance or phonogram in digital form or other electronic
medium.
2. Fair Dealing Rights. Nothing in this License is intended to reduce,
limit, or restrict any uses free from copyright or rights arising from
limitations or exceptions that are provided for in connection with the
copyright protection under copyright law or other applicable laws.
3. License Grant. Subject to the terms and conditions of this License,
Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
perpetual (for the duration of the applicable copyright) license to
exercise the rights in the Work as stated below:
a. to Reproduce the Work, to incorporate the Work into one or more
Collections, and to Reproduce the Work as incorporated in the
Collections;
b. to create and Reproduce Adaptations provided that any such Adaptation,
including any translation in any medium, takes reasonable steps to
clearly label, demarcate or otherwise identify that changes were made
to the original Work. For example, a translation could be marked "The
original work was translated from English to Spanish," or a
modification could indicate "The original work has been modified.";
c. to Distribute and Publicly Perform the Work including as incorporated
in Collections; and,
d. to Distribute and Publicly Perform Adaptations.
e. For the avoidance of doubt:
i. Non-waivable Compulsory License Schemes. In those jurisdictions in
which the right to collect royalties through any statutory or
compulsory licensing scheme cannot be waived, the Licensor
reserves the exclusive right to collect such royalties for any
exercise by You of the rights granted under this License;
ii. Waivable Compulsory License Schemes. In those jurisdictions in
which the right to collect royalties through any statutory or
compulsory licensing scheme can be waived, the Licensor waives the
exclusive right to collect such royalties for any exercise by You
of the rights granted under this License; and,
iii. Voluntary License Schemes. The Licensor waives the right to
collect royalties, whether individually or, in the event that the
Licensor is a member of a collecting society that administers
voluntary licensing schemes, via that society, from any exercise
by You of the rights granted under this License.
The above rights may be exercised in all media and formats whether now
known or hereafter devised. The above rights include the right to make
such modifications as are technically necessary to exercise the rights in
other media and formats. Subject to Section 8(f), all rights not expressly
granted by Licensor are hereby reserved.
4. Restrictions. The license granted in Section 3 above is expressly made
subject to and limited by the following restrictions:
a. You may Distribute or Publicly Perform the Work only under the terms
of this License. You must include a copy of, or the Uniform Resource
Identifier (URI) for, this License with every copy of the Work You
Distribute or Publicly Perform. You may not offer or impose any terms
on the Work that restrict the terms of this License or the ability of
the recipient of the Work to exercise the rights granted to that
recipient under the terms of the License. You may not sublicense the
Work. You must keep intact all notices that refer to this License and
to the disclaimer of warranties with every copy of the Work You
Distribute or Publicly Perform. When You Distribute or Publicly
Perform the Work, You may not impose any effective technological
measures on the Work that restrict the ability of a recipient of the
Work from You to exercise the rights granted to that recipient under
the terms of the License. This Section 4(a) applies to the Work as
incorporated in a Collection, but this does not require the Collection
apart from the Work itself to be made subject to the terms of this
License. If You create a Collection, upon notice from any Licensor You
must, to the extent practicable, remove from the Collection any credit
as required by Section 4(b), as requested. If You create an
Adaptation, upon notice from any Licensor You must, to the extent
practicable, remove from the Adaptation any credit as required by
Section 4(b), as requested.
b. If You Distribute, or Publicly Perform the Work or any Adaptations or
Collections, You must, unless a request has been made pursuant to
Section 4(a), keep intact all copyright notices for the Work and
provide, reasonable to the medium or means You are utilizing: (i) the
name of the Original Author (or pseudonym, if applicable) if supplied,
and/or if the Original Author and/or Licensor designate another party
or parties (e.g., a sponsor institute, publishing entity, journal) for
attribution ("Attribution Parties") in Licensor's copyright notice,
terms of service or by other reasonable means, the name of such party
or parties; (ii) the title of the Work if supplied; (iii) to the
extent reasonably practicable, the URI, if any, that Licensor
specifies to be associated with the Work, unless such URI does not
refer to the copyright notice or licensing information for the Work;
and (iv) , consistent with Section 3(b), in the case of an Adaptation,
a credit identifying the use of the Work in the Adaptation (e.g.,
"French translation of the Work by Original Author," or "Screenplay
based on original Work by Original Author"). The credit required by
this Section 4 (b) may be implemented in any reasonable manner;
provided, however, that in the case of a Adaptation or Collection, at
a minimum such credit will appear, if a credit for all contributing
authors of the Adaptation or Collection appears, then as part of these
credits and in a manner at least as prominent as the credits for the
other contributing authors. For the avoidance of doubt, You may only
use the credit required by this Section for the purpose of attribution
in the manner set out above and, by exercising Your rights under this
License, You may not implicitly or explicitly assert or imply any
connection with, sponsorship or endorsement by the Original Author,
Licensor and/or Attribution Parties, as appropriate, of You or Your
use of the Work, without the separate, express prior written
permission of the Original Author, Licensor and/or Attribution
Parties.
c. Except as otherwise agreed in writing by the Licensor or as may be
otherwise permitted by applicable law, if You Reproduce, Distribute or
Publicly Perform the Work either by itself or as part of any
Adaptations or Collections, You must not distort, mutilate, modify or
take other derogatory action in relation to the Work which would be
prejudicial to the Original Author's honor or reputation. Licensor
agrees that in those jurisdictions (e.g. Japan), in which any exercise
of the right granted in Section 3(b) of this License (the right to
make Adaptations) would be deemed to be a distortion, mutilation,
modification or other derogatory action prejudicial to the Original
Author's honor and reputation, the Licensor will waive or not assert,
as appropriate, this Section, to the fullest extent permitted by the
applicable national law, to enable You to reasonably exercise Your
right under Section 3(b) of this License (right to make Adaptations)
but not otherwise.
5. Representations, Warranties and Disclaimer
UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. Termination
a. This License and the rights granted hereunder will terminate
automatically upon any breach by You of the terms of this License.
Individuals or entities who have received Adaptations or Collections
from You under this License, however, will not have their licenses
terminated provided such individuals or entities remain in full
compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
survive any termination of this License.
b. Subject to the above terms and conditions, the license granted here is
perpetual (for the duration of the applicable copyright in the Work).
Notwithstanding the above, Licensor reserves the right to release the
Work under different license terms or to stop distributing the Work at
any time; provided, however that any such election will not serve to
withdraw this License (or any other license that has been, or is
required to be, granted under the terms of this License), and this
License will continue in full force and effect unless terminated as
stated above.
8. Miscellaneous
a. Each time You Distribute or Publicly Perform the Work or a Collection,
the Licensor offers to the recipient a license to the Work on the same
terms and conditions as the license granted to You under this License.
b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
offers to the recipient a license to the original Work on the same
terms and conditions as the license granted to You under this License.
c. If any provision of this License is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of
the remainder of the terms of this License, and without further action
by the parties to this agreement, such provision shall be reformed to
the minimum extent necessary to make such provision valid and
enforceable.
d. No term or provision of this License shall be deemed waived and no
breach consented to unless such waiver or consent shall be in writing
and signed by the party to be charged with such waiver or consent.
e. This License constitutes the entire agreement between the parties with
respect to the Work licensed here. There are no understandings,
agreements or representations with respect to the Work not specified
here. Licensor shall not be bound by any additional provisions that
may appear in any communication from You. This License may not be
modified without the mutual written agreement of the Licensor and You.
f. The rights granted under, and the subject matter referenced, in this
License were drafted utilizing the terminology of the Berne Convention
for the Protection of Literary and Artistic Works (as amended on
September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
and the Universal Copyright Convention (as revised on July 24, 1971).
These rights and subject matter take effect in the relevant
jurisdiction in which the License terms are sought to be enforced
according to the corresponding provisions of the implementation of
those treaty provisions in the applicable national law. If the
standard suite of rights granted under applicable copyright law
includes additional rights not granted under this License, such
additional rights are deemed to be included in the License; this
License is not intended to restrict the license of any rights under
applicable law.
Creative Commons Notice
Creative Commons is not a party to this License, and makes no warranty
whatsoever in connection with the Work. Creative Commons will not be
liable to You or any party on any legal theory for any damages
whatsoever, including without limitation any general, special,
incidental or consequential damages arising in connection to this
license. Notwithstanding the foregoing two (2) sentences, if Creative
Commons has expressly identified itself as the Licensor hereunder, it
shall have all rights and obligations of Licensor.
Except for the limited purpose of indicating to the public that the
Work is licensed under the CCPL, Creative Commons does not authorize
the use by either party of the trademark "Creative Commons" or any
related trademark or logo of Creative Commons without the prior
written consent of Creative Commons. Any permitted use will be in
compliance with Creative Commons' then-current trademark usage
guidelines, as may be published on its website or otherwise made
available upon request from time to time. For the avoidance of doubt,
this trademark restriction does not form part of this License.
Creative Commons may be contacted at https://creativecommons.org/.
===============================================================================
The Go Programming Language
https://golang.org/LICENSE
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
===============================================================================
The Rust Programming Language
https://github.com/rust-lang/rust/blob/master/LICENSE-MIT
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
===============================================================================
The Rust Programming Language
https://github.com/rust-lang/rust/blob/master/LICENSE-APACHE
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,88 @@
# Crossbeam Channel
[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)](
https://github.com/crossbeam-rs/crossbeam/actions)
[![License](https://img.shields.io/badge/license-MIT%20OR%20Apache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel#license)
[![Cargo](https://img.shields.io/crates/v/crossbeam-channel.svg)](
https://crates.io/crates/crossbeam-channel)
[![Documentation](https://docs.rs/crossbeam-channel/badge.svg)](
https://docs.rs/crossbeam-channel)
[![Rust 1.36+](https://img.shields.io/badge/rust-1.36+-lightgray.svg)](
https://www.rust-lang.org)
[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.gg/BBYwKq)
This crate provides multi-producer multi-consumer channels for message passing.
It is an alternative to [`std::sync::mpsc`] with more features and better performance.
Some highlights:
* [`Sender`]s and [`Receiver`]s can be cloned and shared among threads.
* Two main kinds of channels are [`bounded`] and [`unbounded`].
* Convenient extra channels like [`after`], [`never`], and [`tick`].
* The [`select!`] macro can block on multiple channel operations.
* [`Select`] can select over a dynamically built list of channel operations.
* Channels use locks very sparingly for maximum [performance](benchmarks).
[`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html
[`Sender`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Sender.html
[`Receiver`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Receiver.html
[`bounded`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.bounded.html
[`unbounded`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.unbounded.html
[`after`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.after.html
[`never`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.never.html
[`tick`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.tick.html
[`select!`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/macro.select.html
[`Select`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Select.html
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-channel = "0.4"
```
## Compatibility
Crossbeam Channel supports stable Rust releases going back at least six months,
and every time the minimum supported Rust version is increased, a new minor
version is released. Currently, the minimum supported Rust version is 1.36.
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
#### Third party software
This product includes copies and modifications of software developed by third parties:
* [examples/matching.rs](examples/matching.rs) includes
[matching.go](http://www.nada.kth.se/~snilsson/concurrency/src/matching.go) by Stefan Nilsson,
licensed under Creative Commons Attribution 3.0 Unported License.
* [src/flavors/array.rs](src/flavors/array.rs) is based on
[Bounded MPMC queue](http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue)
by Dmitry Vyukov, licensed under the Simplified BSD License and the Apache License, Version 2.0.
* [tests/mpsc.rs](tests/mpsc.rs) includes modifications of code from The Rust Programming Language,
licensed under the MIT License and the Apache License, Version 2.0.
* [tests/golang.rs](tests/golang.rs) is based on code from The Go Programming Language, licensed
under the 3-Clause BSD License.
See the source code files for more details.
Copies of third party licenses can be found in [LICENSE-THIRD-PARTY](LICENSE-THIRD-PARTY).

View File

@ -0,0 +1,712 @@
#![feature(test)]
extern crate test;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread::scope;
use test::Bencher;
const TOTAL_STEPS: usize = 40_000;
mod unbounded {
use super::*;
#[bench]
fn create(b: &mut Bencher) {
b.iter(|| unbounded::<i32>());
}
#[bench]
fn oneshot(b: &mut Bencher) {
b.iter(|| {
let (s, r) = unbounded::<i32>();
s.send(0).unwrap();
r.recv().unwrap();
});
}
#[bench]
fn inout(b: &mut Bencher) {
let (s, r) = unbounded::<i32>();
b.iter(|| {
s.send(0).unwrap();
r.recv().unwrap();
});
}
#[bench]
fn par_inout(b: &mut Bencher) {
let threads = num_cpus::get();
let steps = TOTAL_STEPS / threads;
let (s, r) = unbounded::<i32>();
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn spsc(b: &mut Bencher) {
let steps = TOTAL_STEPS;
let (s, r) = unbounded::<i32>();
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
b.iter(|| {
s1.send(()).unwrap();
for _ in 0..steps {
r.recv().unwrap();
}
r2.recv().unwrap();
});
drop(s1);
})
.unwrap();
}
#[bench]
fn spmc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = unbounded::<i32>();
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for i in 0..steps * threads {
s.send(i as i32).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpsc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = unbounded::<i32>();
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..steps * threads {
r.recv().unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpmc(b: &mut Bencher) {
let threads = num_cpus::get();
let steps = TOTAL_STEPS / threads;
let (s, r) = unbounded::<i32>();
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
}
mod bounded_n {
use super::*;
#[bench]
fn spsc(b: &mut Bencher) {
let steps = TOTAL_STEPS;
let (s, r) = bounded::<i32>(steps);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
b.iter(|| {
s1.send(()).unwrap();
for _ in 0..steps {
r.recv().unwrap();
}
r2.recv().unwrap();
});
drop(s1);
})
.unwrap();
}
#[bench]
fn spmc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(steps * threads);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for i in 0..steps * threads {
s.send(i as i32).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpsc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(steps * threads);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..steps * threads {
r.recv().unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn par_inout(b: &mut Bencher) {
let threads = num_cpus::get();
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(threads);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpmc(b: &mut Bencher) {
let threads = num_cpus::get();
assert_eq!(threads % 2, 0);
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(steps * threads);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
}
mod bounded_1 {
use super::*;
#[bench]
fn create(b: &mut Bencher) {
b.iter(|| bounded::<i32>(1));
}
#[bench]
fn oneshot(b: &mut Bencher) {
b.iter(|| {
let (s, r) = bounded::<i32>(1);
s.send(0).unwrap();
r.recv().unwrap();
});
}
#[bench]
fn spsc(b: &mut Bencher) {
let steps = TOTAL_STEPS;
let (s, r) = bounded::<i32>(1);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
b.iter(|| {
s1.send(()).unwrap();
for _ in 0..steps {
r.recv().unwrap();
}
r2.recv().unwrap();
});
drop(s1);
})
.unwrap();
}
#[bench]
fn spmc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(1);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for i in 0..steps * threads {
s.send(i as i32).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpsc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(1);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..steps * threads {
r.recv().unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpmc(b: &mut Bencher) {
let threads = num_cpus::get();
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(1);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
}
mod bounded_0 {
use super::*;
#[bench]
fn create(b: &mut Bencher) {
b.iter(|| bounded::<i32>(0));
}
#[bench]
fn spsc(b: &mut Bencher) {
let steps = TOTAL_STEPS;
let (s, r) = bounded::<i32>(0);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
b.iter(|| {
s1.send(()).unwrap();
for _ in 0..steps {
r.recv().unwrap();
}
r2.recv().unwrap();
});
drop(s1);
})
.unwrap();
}
#[bench]
fn spmc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(0);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for i in 0..steps * threads {
s.send(i as i32).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpsc(b: &mut Bencher) {
let threads = num_cpus::get() - 1;
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(0);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..steps * threads {
r.recv().unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
#[bench]
fn mpmc(b: &mut Bencher) {
let threads = num_cpus::get();
let steps = TOTAL_STEPS / threads;
let (s, r) = bounded::<i32>(0);
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for i in 0..steps {
s.send(i as i32).unwrap();
}
s2.send(()).unwrap();
}
});
}
for _ in 0..threads / 2 {
scope.spawn(|_| {
while r1.recv().is_ok() {
for _ in 0..steps {
r.recv().unwrap();
}
s2.send(()).unwrap();
}
});
}
b.iter(|| {
for _ in 0..threads {
s1.send(()).unwrap();
}
for _ in 0..threads {
r2.recv().unwrap();
}
});
drop(s1);
})
.unwrap();
}
}

View File

@ -0,0 +1,25 @@
//! An asynchronous fibonacci sequence generator.
use std::thread;
use crossbeam_channel::{bounded, Sender};
// Sends the Fibonacci sequence into the channel until it becomes disconnected.
fn fibonacci(sender: Sender<u64>) {
let (mut x, mut y) = (0, 1);
while sender.send(x).is_ok() {
let tmp = x;
x = y;
y = tmp + y;
}
}
fn main() {
let (s, r) = bounded(0);
thread::spawn(|| fibonacci(s));
// Print the first 20 Fibonacci numbers.
for num in r.iter().take(20) {
println!("{}", num);
}
}

View File

@ -0,0 +1,72 @@
//! Using `select!` to send and receive on the same channel at the same time.
//!
//! This example is based on the following program in Go.
//!
//! Source:
//! - https://web.archive.org/web/20171209034309/https://www.nada.kth.se/~snilsson/concurrency
//! - http://www.nada.kth.se/~snilsson/concurrency/src/matching.go
//!
//! Copyright & License:
//! - Stefan Nilsson
//! - Creative Commons Attribution 3.0 Unported License
//! - https://creativecommons.org/licenses/by/3.0/
//!
//! ```go
//! func main() {
//! people := []string{"Anna", "Bob", "Cody", "Dave", "Eva"}
//! match := make(chan string, 1) // Make room for one unmatched send.
//! wg := new(sync.WaitGroup)
//! for _, name := range people {
//! wg.Add(1)
//! go Seek(name, match, wg)
//! }
//! wg.Wait()
//! select {
//! case name := <-match:
//! fmt.Printf("No one received %ss message.\n", name)
//! default:
//! // There was no pending send operation.
//! }
//! }
//!
//! // Seek either sends or receives, whichever possible, a name on the match
//! // channel and notifies the wait group when done.
//! func Seek(name string, match chan string, wg *sync.WaitGroup) {
//! select {
//! case peer := <-match:
//! fmt.Printf("%s received a message from %s.\n", name, peer)
//! case match <- name:
//! // Wait for someone to receive my message.
//! }
//! wg.Done()
//! }
//! ```
use crossbeam_channel::{bounded, select};
use crossbeam_utils::thread;
fn main() {
let people = vec!["Anna", "Bob", "Cody", "Dave", "Eva"];
let (s, r) = bounded(1); // Make room for one unmatched send.
// Either send my name into the channel or receive someone else's, whatever happens first.
let seek = |name, s, r| {
select! {
recv(r) -> peer => println!("{} received a message from {}.", name, peer.unwrap()),
send(s, name) -> _ => {}, // Wait for someone to receive my message.
}
};
thread::scope(|scope| {
for name in people {
let (s, r) = (s.clone(), r.clone());
scope.spawn(move |_| seek(name, s, r));
}
})
.unwrap();
// Check if there is a pending send operation.
if let Ok(name) = r.try_recv() {
println!("No one received {}s message.", name);
}
}

View File

@ -0,0 +1,60 @@
//! Prints the elapsed time every 1 second and quits on Ctrl+C.
#[cfg(windows)] // signal_hook::iterator does not work on windows
fn main() {
println!("This example does not work on Windows");
}
#[cfg(not(windows))]
fn main() {
use std::io;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{bounded, select, tick, Receiver};
use signal_hook::iterator::Signals;
use signal_hook::SIGINT;
// Creates a channel that gets a message every time `SIGINT` is signalled.
fn sigint_notifier() -> io::Result<Receiver<()>> {
let (s, r) = bounded(100);
let signals = Signals::new(&[SIGINT])?;
thread::spawn(move || {
for _ in signals.forever() {
if s.send(()).is_err() {
break;
}
}
});
Ok(r)
}
// Prints the elapsed time.
fn show(dur: Duration) {
println!(
"Elapsed: {}.{:03} sec",
dur.as_secs(),
dur.subsec_nanos() / 1_000_000
);
}
let start = Instant::now();
let update = tick(Duration::from_secs(1));
let ctrl_c = sigint_notifier().unwrap();
loop {
select! {
recv(update) -> _ => {
show(start.elapsed());
}
recv(ctrl_c) -> _ => {
println!();
println!("Goodbye!");
show(start.elapsed());
break;
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,191 @@
//! Thread-local context used in select.
use std::cell::Cell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread::{self, Thread, ThreadId};
use std::time::Instant;
use crossbeam_utils::Backoff;
use crate::select::Selected;
/// Thread-local context used in select.
#[derive(Debug, Clone)]
pub struct Context {
inner: Arc<Inner>,
}
/// Inner representation of `Context`.
#[derive(Debug)]
struct Inner {
/// Selected operation.
select: AtomicUsize,
/// A slot into which another thread may store a pointer to its `Packet`.
packet: AtomicUsize,
/// Thread handle.
thread: Thread,
/// Thread id.
thread_id: ThreadId,
}
impl Context {
/// Creates a new context for the duration of the closure.
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&Context) -> R,
{
thread_local! {
/// Cached thread-local context.
static CONTEXT: Cell<Option<Context>> = Cell::new(Some(Context::new()));
}
let mut f = Some(f);
let mut f = move |cx: &Context| -> R {
let f = f.take().unwrap();
f(cx)
};
CONTEXT
.try_with(|cell| match cell.take() {
None => f(&Context::new()),
Some(cx) => {
cx.reset();
let res = f(&cx);
cell.set(Some(cx));
res
}
})
.unwrap_or_else(|_| f(&Context::new()))
}
/// Creates a new `Context`.
#[cold]
fn new() -> Context {
Context {
inner: Arc::new(Inner {
select: AtomicUsize::new(Selected::Waiting.into()),
packet: AtomicUsize::new(0),
thread: thread::current(),
thread_id: thread::current().id(),
}),
}
}
/// Resets `select` and `packet`.
#[inline]
fn reset(&self) {
self.inner
.select
.store(Selected::Waiting.into(), Ordering::Release);
self.inner.packet.store(0, Ordering::Release);
}
/// Attempts to select an operation.
///
/// On failure, the previously selected operation is returned.
#[inline]
pub fn try_select(&self, select: Selected) -> Result<(), Selected> {
self.inner
.select
.compare_exchange(
Selected::Waiting.into(),
select.into(),
Ordering::AcqRel,
Ordering::Acquire,
)
.map(|_| ())
.map_err(|e| e.into())
}
/// Returns the selected operation.
#[inline]
pub fn selected(&self) -> Selected {
Selected::from(self.inner.select.load(Ordering::Acquire))
}
/// Stores a packet.
///
/// This method must be called after `try_select` succeeds and there is a packet to provide.
#[inline]
pub fn store_packet(&self, packet: usize) {
if packet != 0 {
self.inner.packet.store(packet, Ordering::Release);
}
}
/// Waits until a packet is provided and returns it.
#[inline]
pub fn wait_packet(&self) -> usize {
let backoff = Backoff::new();
loop {
let packet = self.inner.packet.load(Ordering::Acquire);
if packet != 0 {
return packet;
}
backoff.snooze();
}
}
/// Waits until an operation is selected and returns it.
///
/// If the deadline is reached, `Selected::Aborted` will be selected.
#[inline]
pub fn wait_until(&self, deadline: Option<Instant>) -> Selected {
// Spin for a short time, waiting until an operation is selected.
let backoff = Backoff::new();
loop {
let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
if sel != Selected::Waiting {
return sel;
}
if backoff.is_completed() {
break;
} else {
backoff.snooze();
}
}
loop {
// Check whether an operation has been selected.
let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
if sel != Selected::Waiting {
return sel;
}
// If there's a deadline, park the current thread until the deadline is reached.
if let Some(end) = deadline {
let now = Instant::now();
if now < end {
thread::park_timeout(end - now);
} else {
// The deadline has been reached. Try aborting select.
return match self.try_select(Selected::Aborted) {
Ok(()) => Selected::Aborted,
Err(s) => s,
};
}
} else {
thread::park();
}
}
}
/// Unparks the thread this context belongs to.
#[inline]
pub fn unpark(&self) {
self.inner.thread.unpark();
}
/// Returns the id of the thread this context belongs to.
#[inline]
pub fn thread_id(&self) -> ThreadId {
self.inner.thread_id
}
}

View File

@ -0,0 +1,144 @@
//! Reference counter for channels.
use std::isize;
use std::ops;
use std::process;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
/// Reference counter internals.
struct Counter<C> {
/// The number of senders associated with the channel.
senders: AtomicUsize,
/// The number of receivers associated with the channel.
receivers: AtomicUsize,
/// Set to `true` if the last sender or the last receiver reference deallocates the channel.
destroy: AtomicBool,
/// The internal channel.
chan: C,
}
/// Wraps a channel into the reference counter.
pub fn new<C>(chan: C) -> (Sender<C>, Receiver<C>) {
let counter = Box::into_raw(Box::new(Counter {
senders: AtomicUsize::new(1),
receivers: AtomicUsize::new(1),
destroy: AtomicBool::new(false),
chan,
}));
let s = Sender { counter };
let r = Receiver { counter };
(s, r)
}
/// The sending side.
pub struct Sender<C> {
counter: *mut Counter<C>,
}
impl<C> Sender<C> {
/// Returns the internal `Counter`.
fn counter(&self) -> &Counter<C> {
unsafe { &*self.counter }
}
/// Acquires another sender reference.
pub fn acquire(&self) -> Sender<C> {
let count = self.counter().senders.fetch_add(1, Ordering::Relaxed);
// Cloning senders and calling `mem::forget` on the clones could potentially overflow the
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
// just abort when the count becomes very large.
if count > isize::MAX as usize {
process::abort();
}
Sender {
counter: self.counter,
}
}
/// Releases the sender reference.
///
/// Function `disconnect` will be called if this is the last sender reference.
pub unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 {
disconnect(&self.counter().chan);
if self.counter().destroy.swap(true, Ordering::AcqRel) {
drop(Box::from_raw(self.counter));
}
}
}
}
impl<C> ops::Deref for Sender<C> {
type Target = C;
fn deref(&self) -> &C {
&self.counter().chan
}
}
impl<C> PartialEq for Sender<C> {
fn eq(&self, other: &Sender<C>) -> bool {
self.counter == other.counter
}
}
/// The receiving side.
pub struct Receiver<C> {
counter: *mut Counter<C>,
}
impl<C> Receiver<C> {
/// Returns the internal `Counter`.
fn counter(&self) -> &Counter<C> {
unsafe { &*self.counter }
}
/// Acquires another receiver reference.
pub fn acquire(&self) -> Receiver<C> {
let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed);
// Cloning receivers and calling `mem::forget` on the clones could potentially overflow the
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
// just abort when the count becomes very large.
if count > isize::MAX as usize {
process::abort();
}
Receiver {
counter: self.counter,
}
}
/// Releases the receiver reference.
///
/// Function `disconnect` will be called if this is the last receiver reference.
pub unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 {
disconnect(&self.counter().chan);
if self.counter().destroy.swap(true, Ordering::AcqRel) {
drop(Box::from_raw(self.counter));
}
}
}
}
impl<C> ops::Deref for Receiver<C> {
type Target = C;
fn deref(&self) -> &C {
&self.counter().chan
}
}
impl<C> PartialEq for Receiver<C> {
fn eq(&self, other: &Receiver<C>) -> bool {
self.counter == other.counter
}
}

View File

@ -0,0 +1,382 @@
use std::error;
use std::fmt;
/// An error returned from the [`send`] method.
///
/// The message could not be sent because the channel is disconnected.
///
/// The error contains the message so it can be recovered.
///
/// [`send`]: struct.Sender.html#method.send
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct SendError<T>(pub T);
/// An error returned from the [`try_send`] method.
///
/// The error contains the message being sent so it can be recovered.
///
/// [`try_send`]: struct.Sender.html#method.try_send
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum TrySendError<T> {
/// The message could not be sent because the channel is full.
///
/// If this is a zero-capacity channel, then the error indicates that there was no receiver
/// available to receive the message at the time.
Full(T),
/// The message could not be sent because the channel is disconnected.
Disconnected(T),
}
/// An error returned from the [`send_timeout`] method.
///
/// The error contains the message being sent so it can be recovered.
///
/// [`send_timeout`]: struct.Sender.html#method.send_timeout
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum SendTimeoutError<T> {
/// The message could not be sent because the channel is full and the operation timed out.
///
/// If this is a zero-capacity channel, then the error indicates that there was no receiver
/// available to receive the message and the operation timed out.
Timeout(T),
/// The message could not be sent because the channel is disconnected.
Disconnected(T),
}
/// An error returned from the [`recv`] method.
///
/// A message could not be received because the channel is empty and disconnected.
///
/// [`recv`]: struct.Receiver.html#method.recv
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct RecvError;
/// An error returned from the [`try_recv`] method.
///
/// [`try_recv`]: struct.Receiver.html#method.recv
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum TryRecvError {
/// A message could not be received because the channel is empty.
///
/// If this is a zero-capacity channel, then the error indicates that there was no sender
/// available to send a message at the time.
Empty,
/// The message could not be received because the channel is empty and disconnected.
Disconnected,
}
/// An error returned from the [`recv_timeout`] method.
///
/// [`recv_timeout`]: struct.Receiver.html#method.recv_timeout
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum RecvTimeoutError {
/// A message could not be received because the channel is empty and the operation timed out.
///
/// If this is a zero-capacity channel, then the error indicates that there was no sender
/// available to send a message and the operation timed out.
Timeout,
/// The message could not be received because the channel is empty and disconnected.
Disconnected,
}
/// An error returned from the [`try_select`] method.
///
/// Failed because none of the channel operations were ready.
///
/// [`try_select`]: struct.Select.html#method.try_select
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct TrySelectError;
/// An error returned from the [`select_timeout`] method.
///
/// Failed because none of the channel operations became ready before the timeout.
///
/// [`select_timeout`]: struct.Select.html#method.select_timeout
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct SelectTimeoutError;
/// An error returned from the [`try_ready`] method.
///
/// Failed because none of the channel operations were ready.
///
/// [`try_ready`]: struct.Select.html#method.try_ready
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct TryReadyError;
/// An error returned from the [`ready_timeout`] method.
///
/// Failed because none of the channel operations became ready before the timeout.
///
/// [`ready_timeout`]: struct.Select.html#method.ready_timeout
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct ReadyTimeoutError;
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"SendError(..)".fmt(f)
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"sending on a disconnected channel".fmt(f)
}
}
impl<T: Send> error::Error for SendError<T> {}
impl<T> SendError<T> {
/// Unwraps the message.
///
/// # Examples
///
/// ```
/// use crossbeam_channel::unbounded;
///
/// let (s, r) = unbounded();
/// drop(r);
///
/// if let Err(err) = s.send("foo") {
/// assert_eq!(err.into_inner(), "foo");
/// }
/// ```
pub fn into_inner(self) -> T {
self.0
}
}
impl<T> fmt::Debug for TrySendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TrySendError::Full(..) => "Full(..)".fmt(f),
TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f),
}
}
}
impl<T> fmt::Display for TrySendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TrySendError::Full(..) => "sending on a full channel".fmt(f),
TrySendError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
}
}
}
impl<T: Send> error::Error for TrySendError<T> {}
impl<T> From<SendError<T>> for TrySendError<T> {
fn from(err: SendError<T>) -> TrySendError<T> {
match err {
SendError(t) => TrySendError::Disconnected(t),
}
}
}
impl<T> TrySendError<T> {
/// Unwraps the message.
///
/// # Examples
///
/// ```
/// use crossbeam_channel::bounded;
///
/// let (s, r) = bounded(0);
///
/// if let Err(err) = s.try_send("foo") {
/// assert_eq!(err.into_inner(), "foo");
/// }
/// ```
pub fn into_inner(self) -> T {
match self {
TrySendError::Full(v) => v,
TrySendError::Disconnected(v) => v,
}
}
/// Returns `true` if the send operation failed because the channel is full.
pub fn is_full(&self) -> bool {
match self {
TrySendError::Full(_) => true,
_ => false,
}
}
/// Returns `true` if the send operation failed because the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
match self {
TrySendError::Disconnected(_) => true,
_ => false,
}
}
}
impl<T> fmt::Debug for SendTimeoutError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"SendTimeoutError(..)".fmt(f)
}
}
impl<T> fmt::Display for SendTimeoutError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f),
SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
}
}
}
impl<T: Send> error::Error for SendTimeoutError<T> {}
impl<T> From<SendError<T>> for SendTimeoutError<T> {
fn from(err: SendError<T>) -> SendTimeoutError<T> {
match err {
SendError(e) => SendTimeoutError::Disconnected(e),
}
}
}
impl<T> SendTimeoutError<T> {
/// Unwraps the message.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use crossbeam_channel::unbounded;
///
/// let (s, r) = unbounded();
///
/// if let Err(err) = s.send_timeout("foo", Duration::from_secs(1)) {
/// assert_eq!(err.into_inner(), "foo");
/// }
/// ```
pub fn into_inner(self) -> T {
match self {
SendTimeoutError::Timeout(v) => v,
SendTimeoutError::Disconnected(v) => v,
}
}
/// Returns `true` if the send operation timed out.
pub fn is_timeout(&self) -> bool {
match self {
SendTimeoutError::Timeout(_) => true,
_ => false,
}
}
/// Returns `true` if the send operation failed because the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
match self {
SendTimeoutError::Disconnected(_) => true,
_ => false,
}
}
}
impl fmt::Display for RecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"receiving on an empty and disconnected channel".fmt(f)
}
}
impl error::Error for RecvError {}
impl fmt::Display for TryRecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TryRecvError::Empty => "receiving on an empty channel".fmt(f),
TryRecvError::Disconnected => "receiving on an empty and disconnected channel".fmt(f),
}
}
}
impl error::Error for TryRecvError {}
impl From<RecvError> for TryRecvError {
fn from(err: RecvError) -> TryRecvError {
match err {
RecvError => TryRecvError::Disconnected,
}
}
}
impl TryRecvError {
/// Returns `true` if the receive operation failed because the channel is empty.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn is_empty(&self) -> bool {
match self {
TryRecvError::Empty => true,
_ => false,
}
}
/// Returns `true` if the receive operation failed because the channel is disconnected.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn is_disconnected(&self) -> bool {
match self {
TryRecvError::Disconnected => true,
_ => false,
}
}
}
impl fmt::Display for RecvTimeoutError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
RecvTimeoutError::Timeout => "timed out waiting on receive operation".fmt(f),
RecvTimeoutError::Disconnected => "channel is empty and disconnected".fmt(f),
}
}
}
impl error::Error for RecvTimeoutError {}
impl From<RecvError> for RecvTimeoutError {
fn from(err: RecvError) -> RecvTimeoutError {
match err {
RecvError => RecvTimeoutError::Disconnected,
}
}
}
impl RecvTimeoutError {
/// Returns `true` if the receive operation timed out.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn is_timeout(&self) -> bool {
match self {
RecvTimeoutError::Timeout => true,
_ => false,
}
}
/// Returns `true` if the receive operation failed because the channel is disconnected.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn is_disconnected(&self) -> bool {
match self {
RecvTimeoutError::Disconnected => true,
_ => false,
}
}
}
impl fmt::Display for TrySelectError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"all operations in select would block".fmt(f)
}
}
impl error::Error for TrySelectError {}
impl fmt::Display for SelectTimeoutError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"timed out waiting on select".fmt(f)
}
}
impl error::Error for SelectTimeoutError {}

View File

@ -0,0 +1,200 @@
//! Channel that delivers a message after a certain amount of time.
//!
//! Messages cannot be sent into this kind of channel; they are materialized on demand.
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::{Duration, Instant};
use crate::context::Context;
use crate::err::{RecvTimeoutError, TryRecvError};
use crate::select::{Operation, SelectHandle, Token};
use crate::utils;
/// Result of a receive operation.
pub type AfterToken = Option<Instant>;
/// Channel that delivers a message after a certain amount of time.
pub struct Channel {
/// The instant at which the message will be delivered.
delivery_time: Instant,
/// `true` if the message has been received.
received: AtomicBool,
}
impl Channel {
/// Creates a channel that delivers a message after a certain duration of time.
#[inline]
pub fn new(dur: Duration) -> Self {
Channel {
delivery_time: Instant::now() + dur,
received: AtomicBool::new(false),
}
}
/// Attempts to receive a message without blocking.
#[inline]
pub fn try_recv(&self) -> Result<Instant, TryRecvError> {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
// The message has already been received.
return Err(TryRecvError::Empty);
}
if Instant::now() < self.delivery_time {
// The message was not delivered yet.
return Err(TryRecvError::Empty);
}
// Try receiving the message if it is still available.
if !self.received.swap(true, Ordering::SeqCst) {
// Success! Return delivery time as the message.
Ok(self.delivery_time)
} else {
// The message was already received.
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
#[inline]
pub fn recv(&self, deadline: Option<Instant>) -> Result<Instant, RecvTimeoutError> {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
// The message has already been received.
utils::sleep_until(deadline);
return Err(RecvTimeoutError::Timeout);
}
// Wait until the message is received or the deadline is reached.
loop {
let now = Instant::now();
// Check if we can receive the next message.
if now >= self.delivery_time {
break;
}
// Check if the deadline has been reached.
if let Some(d) = deadline {
if now >= d {
return Err(RecvTimeoutError::Timeout);
}
thread::sleep(self.delivery_time.min(d) - now);
} else {
thread::sleep(self.delivery_time - now);
}
}
// Try receiving the message if it is still available.
if !self.received.swap(true, Ordering::SeqCst) {
// Success! Return the message, which is the instant at which it was delivered.
Ok(self.delivery_time)
} else {
// The message was already received. Block forever.
utils::sleep_until(None);
unreachable!()
}
}
/// Reads a message from the channel.
#[inline]
pub unsafe fn read(&self, token: &mut Token) -> Result<Instant, ()> {
token.after.ok_or(())
}
/// Returns `true` if the channel is empty.
#[inline]
pub fn is_empty(&self) -> bool {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
return true;
}
// If the delivery time hasn't been reached yet, the channel is empty.
if Instant::now() < self.delivery_time {
return true;
}
// The delivery time has been reached. The channel is empty only if the message has already
// been received.
self.received.load(Ordering::SeqCst)
}
/// Returns `true` if the channel is full.
#[inline]
pub fn is_full(&self) -> bool {
!self.is_empty()
}
/// Returns the number of messages in the channel.
#[inline]
pub fn len(&self) -> usize {
if self.is_empty() {
0
} else {
1
}
}
/// Returns the capacity of the channel.
#[inline]
pub fn capacity(&self) -> Option<usize> {
Some(1)
}
}
impl SelectHandle for Channel {
#[inline]
fn try_select(&self, token: &mut Token) -> bool {
match self.try_recv() {
Ok(msg) => {
token.after = Some(msg);
true
}
Err(TryRecvError::Disconnected) => {
token.after = None;
true
}
Err(TryRecvError::Empty) => false,
}
}
#[inline]
fn deadline(&self) -> Option<Instant> {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
None
} else {
Some(self.delivery_time)
}
}
#[inline]
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unregister(&self, _oper: Operation) {}
#[inline]
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
#[inline]
fn is_ready(&self) -> bool {
!self.is_empty()
}
#[inline]
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unwatch(&self, _oper: Operation) {}
}

View File

@ -0,0 +1,637 @@
//! Bounded channel based on a preallocated array.
//!
//! This flavor has a fixed, positive capacity.
//!
//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
//!
//! Source:
//! - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
//! - https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub
//!
//! Copyright & License:
//! - Copyright (c) 2010-2011 Dmitry Vyukov
//! - Simplified BSD License and Apache License, Version 2.0
//! - http://www.1024cores.net/home/code-license
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::ptr;
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::time::Instant;
use crossbeam_utils::{Backoff, CachePadded};
use crate::context::Context;
use crate::err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError};
use crate::select::{Operation, SelectHandle, Selected, Token};
use crate::waker::SyncWaker;
/// A slot in a channel.
struct Slot<T> {
/// The current stamp.
stamp: AtomicUsize,
/// The message in this slot.
msg: UnsafeCell<MaybeUninit<T>>,
}
/// The token type for the array flavor.
#[derive(Debug)]
pub struct ArrayToken {
/// Slot to read from or write to.
slot: *const u8,
/// Stamp to store into the slot after reading or writing.
stamp: usize,
}
impl Default for ArrayToken {
#[inline]
fn default() -> Self {
ArrayToken {
slot: ptr::null(),
stamp: 0,
}
}
}
/// Bounded channel based on a preallocated array.
pub struct Channel<T> {
/// The head of the channel.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit in the head is always zero.
///
/// Messages are popped from the head of the channel.
head: CachePadded<AtomicUsize>,
/// The tail of the channel.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit indicates that the channel is disconnected.
///
/// Messages are pushed into the tail of the channel.
tail: CachePadded<AtomicUsize>,
/// The buffer holding slots.
buffer: *mut Slot<T>,
/// The channel capacity.
cap: usize,
/// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
one_lap: usize,
/// If this bit is set in the tail, that means the channel is disconnected.
mark_bit: usize,
/// Senders waiting while the channel is full.
senders: SyncWaker,
/// Receivers waiting while the channel is empty and not disconnected.
receivers: SyncWaker,
/// Indicates that dropping a `Channel<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Creates a bounded channel of capacity `cap`.
pub fn with_capacity(cap: usize) -> Self {
assert!(cap > 0, "capacity must be positive");
// Compute constants `mark_bit` and `one_lap`.
let mark_bit = (cap + 1).next_power_of_two();
let one_lap = mark_bit * 2;
// Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let head = 0;
// Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let tail = 0;
// Allocate a buffer of `cap` slots initialized
// with stamps.
let buffer = {
let mut v: Vec<Slot<T>> = (0..cap)
.map(|i| {
// Set the stamp to `{ lap: 0, mark: 0, index: i }`.
Slot {
stamp: AtomicUsize::new(i),
msg: UnsafeCell::new(MaybeUninit::uninit()),
}
})
.collect();
let ptr = v.as_mut_ptr();
mem::forget(v);
ptr
};
Channel {
buffer,
cap,
one_lap,
mark_bit,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
senders: SyncWaker::new(),
receivers: SyncWaker::new(),
_marker: PhantomData,
}
}
/// Returns a receiver handle to the channel.
pub fn receiver(&self) -> Receiver<'_, T> {
Receiver(self)
}
/// Returns a sender handle to the channel.
pub fn sender(&self) -> Sender<'_, T> {
Sender(self)
}
/// Attempts to reserve a slot for sending a message.
fn start_send(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut tail = self.tail.load(Ordering::Relaxed);
loop {
// Check if the channel is disconnected.
if tail & self.mark_bit != 0 {
token.array.slot = ptr::null();
token.array.stamp = 0;
return true;
}
// Deconstruct the tail.
let index = tail & (self.mark_bit - 1);
let lap = tail & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
let new_tail = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the tail.
match self.tail.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Prepare the token for the follow-up call to `write`.
token.array.slot = slot as *const Slot<T> as *const u8;
token.array.stamp = tail + 1;
return true;
}
Err(t) => {
tail = t;
backoff.spin();
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
atomic::fence(Ordering::SeqCst);
let head = self.head.load(Ordering::Relaxed);
// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the channel is full.
return false;
}
backoff.spin();
tail = self.tail.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
tail = self.tail.load(Ordering::Relaxed);
}
}
}
/// Writes a message into the channel.
pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
// If there is no slot, the channel is disconnected.
if token.array.slot.is_null() {
return Err(msg);
}
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
// Write the message into the slot and update the stamp.
slot.msg.get().write(MaybeUninit::new(msg));
slot.stamp.store(token.array.stamp, Ordering::Release);
// Wake a sleeping receiver.
self.receivers.notify();
Ok(())
}
/// Attempts to reserve a slot for receiving a message.
fn start_recv(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut head = self.head.load(Ordering::Relaxed);
loop {
// Deconstruct the head.
let index = head & (self.mark_bit - 1);
let lap = head & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the head.
match self.head.compare_exchange_weak(
head,
new,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Prepare the token for the follow-up call to `read`.
token.array.slot = slot as *const Slot<T> as *const u8;
token.array.stamp = head.wrapping_add(self.one_lap);
return true;
}
Err(h) => {
head = h;
backoff.spin();
}
}
} else if stamp == head {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);
// If the tail equals the head, that means the channel is empty.
if (tail & !self.mark_bit) == head {
// If the channel is disconnected...
if tail & self.mark_bit != 0 {
// ...then receive an error.
token.array.slot = ptr::null();
token.array.stamp = 0;
return true;
} else {
// Otherwise, the receive operation is not ready.
return false;
}
}
backoff.spin();
head = self.head.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
head = self.head.load(Ordering::Relaxed);
}
}
}
/// Reads a message from the channel.
pub unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
if token.array.slot.is_null() {
// The channel is disconnected.
return Err(());
}
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
// Read the message from the slot and update the stamp.
let msg = slot.msg.get().read().assume_init();
slot.stamp.store(token.array.stamp, Ordering::Release);
// Wake a sleeping sender.
self.senders.notify();
Ok(msg)
}
/// Attempts to send a message into the channel.
pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
let token = &mut Token::default();
if self.start_send(token) {
unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) }
} else {
Err(TrySendError::Full(msg))
}
}
/// Sends a message into the channel.
pub fn send(&self, msg: T, deadline: Option<Instant>) -> Result<(), SendTimeoutError<T>> {
let token = &mut Token::default();
loop {
// Try sending a message several times.
let backoff = Backoff::new();
loop {
if self.start_send(token) {
let res = unsafe { self.write(token, msg) };
return res.map_err(SendTimeoutError::Disconnected);
}
if backoff.is_completed() {
break;
} else {
backoff.snooze();
}
}
if let Some(d) = deadline {
if Instant::now() >= d {
return Err(SendTimeoutError::Timeout(msg));
}
}
Context::with(|cx| {
// Prepare for blocking until a receiver wakes us up.
let oper = Operation::hook(token);
self.senders.register(oper, cx);
// Has the channel become ready just now?
if !self.is_full() || self.is_disconnected() {
let _ = cx.try_select(Selected::Aborted);
}
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted | Selected::Disconnected => {
self.senders.unregister(oper).unwrap();
}
Selected::Operation(_) => {}
}
});
}
}
/// Attempts to receive a message without blocking.
pub fn try_recv(&self) -> Result<T, TryRecvError> {
let token = &mut Token::default();
if self.start_recv(token) {
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
} else {
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
let token = &mut Token::default();
loop {
// Try receiving a message several times.
let backoff = Backoff::new();
loop {
if self.start_recv(token) {
let res = unsafe { self.read(token) };
return res.map_err(|_| RecvTimeoutError::Disconnected);
}
if backoff.is_completed() {
break;
} else {
backoff.snooze();
}
}
if let Some(d) = deadline {
if Instant::now() >= d {
return Err(RecvTimeoutError::Timeout);
}
}
Context::with(|cx| {
// Prepare for blocking until a sender wakes us up.
let oper = Operation::hook(token);
self.receivers.register(oper, cx);
// Has the channel become ready just now?
if !self.is_empty() || self.is_disconnected() {
let _ = cx.try_select(Selected::Aborted);
}
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted | Selected::Disconnected => {
self.receivers.unregister(oper).unwrap();
// If the channel was disconnected, we still have to check for remaining
// messages.
}
Selected::Operation(_) => {}
}
});
}
}
/// Returns the current number of messages inside the channel.
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.mark_bit - 1);
let tix = tail & (self.mark_bit - 1);
return if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if (tail & !self.mark_bit) == head {
0
} else {
self.cap
};
}
}
}
/// Returns the capacity of the channel.
pub fn capacity(&self) -> Option<usize> {
Some(self.cap)
}
/// Disconnects the channel and wakes up all blocked senders and receivers.
///
/// Returns `true` if this call disconnected the channel.
pub fn disconnect(&self) -> bool {
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
if tail & self.mark_bit == 0 {
self.senders.disconnect();
self.receivers.disconnect();
true
} else {
false
}
}
/// Returns `true` if the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
}
/// Returns `true` if the channel is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the channel was not empty, so it is safe to just return `false`.
(tail & !self.mark_bit) == head
}
/// Returns `true` if the channel is full.
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the channel was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
}
}
impl<T> Drop for Channel<T> {
fn drop(&mut self) {
// Get the index of the head.
let hix = self.head.load(Ordering::Relaxed) & (self.mark_bit - 1);
// Loop over all slots that hold a message and drop them.
for i in 0..self.len() {
// Compute the index of the next slot holding a message.
let index = if hix + i < self.cap {
hix + i
} else {
hix + i - self.cap
};
unsafe {
let p = {
let slot = &mut *self.buffer.add(index);
let msg = &mut *slot.msg.get();
msg.as_mut_ptr()
};
p.drop_in_place();
}
}
// Finally, deallocate the buffer, but don't run any destructors.
unsafe {
Vec::from_raw_parts(self.buffer, 0, self.cap);
}
}
}
/// Receiver handle to a channel.
pub struct Receiver<'a, T>(&'a Channel<T>);
/// Sender handle to a channel.
pub struct Sender<'a, T>(&'a Channel<T>);
impl<T> SelectHandle for Receiver<'_, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_recv(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.register(oper, cx);
self.is_ready()
}
fn unregister(&self, oper: Operation) {
self.0.receivers.unregister(oper);
}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
!self.0.is_empty() || self.0.is_disconnected()
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.watch(oper, cx);
self.is_ready()
}
fn unwatch(&self, oper: Operation) {
self.0.receivers.unwatch(oper);
}
}
impl<T> SelectHandle for Sender<'_, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_send(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
self.0.senders.register(oper, cx);
self.is_ready()
}
fn unregister(&self, oper: Operation) {
self.0.senders.unregister(oper);
}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
!self.0.is_full() || self.0.is_disconnected()
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
self.0.senders.watch(oper, cx);
self.is_ready()
}
fn unwatch(&self, oper: Operation) {
self.0.senders.unwatch(oper);
}
}

View File

@ -0,0 +1,669 @@
//! Unbounded channel implemented as a linked list.
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use std::time::Instant;
use crossbeam_utils::{Backoff, CachePadded};
use crate::context::Context;
use crate::err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError};
use crate::select::{Operation, SelectHandle, Selected, Token};
use crate::waker::SyncWaker;
// TODO(stjepang): Once we bump the minimum required Rust version to 1.28 or newer, re-apply the
// following changes by @kleimkuhler:
//
// 1. https://github.com/crossbeam-rs/crossbeam-channel/pull/100
// 2. https://github.com/crossbeam-rs/crossbeam-channel/pull/101
// Bits indicating the state of a slot:
// * If a message has been written into the slot, `WRITE` is set.
// * If a message has been read from the slot, `READ` is set.
// * If the block is being destroyed, `DESTROY` is set.
const WRITE: usize = 1;
const READ: usize = 2;
const DESTROY: usize = 4;
// Each block covers one "lap" of indices.
const LAP: usize = 32;
// The maximum number of messages a block can hold.
const BLOCK_CAP: usize = LAP - 1;
// How many lower bits are reserved for metadata.
const SHIFT: usize = 1;
// Has two different purposes:
// * If set in head, indicates that the block is not the last one.
// * If set in tail, indicates that the channel is disconnected.
const MARK_BIT: usize = 1;
/// A slot in a block.
struct Slot<T> {
/// The message.
msg: UnsafeCell<MaybeUninit<T>>,
/// The state of the slot.
state: AtomicUsize,
}
impl<T> Slot<T> {
/// Waits until a message is written into the slot.
fn wait_write(&self) {
let backoff = Backoff::new();
while self.state.load(Ordering::Acquire) & WRITE == 0 {
backoff.snooze();
}
}
}
/// A block in a linked list.
///
/// Each block in the list can hold up to `BLOCK_CAP` messages.
struct Block<T> {
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
/// Slots for messages.
slots: [Slot<T>; BLOCK_CAP],
}
impl<T> Block<T> {
/// Creates an empty block.
fn new() -> Block<T> {
// SAFETY: This is safe because:
// [1] `Block::next` (AtomicPtr) may be safely zero initialized.
// [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
// [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
// holds a MaybeUninit.
// [4] `Slot::state` (AtomicUsize) may be safely zero initialized.
unsafe { MaybeUninit::zeroed().assume_init() }
}
/// Waits until the next pointer is set.
fn wait_next(&self) -> *mut Block<T> {
let backoff = Backoff::new();
loop {
let next = self.next.load(Ordering::Acquire);
if !next.is_null() {
return next;
}
backoff.snooze();
}
}
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
unsafe fn destroy(this: *mut Block<T>, start: usize) {
// It is not necessary to set the `DESTROY` bit in the last slot because that slot has
// begun destruction of the block.
for i in start..BLOCK_CAP - 1 {
let slot = (*this).slots.get_unchecked(i);
// Mark the `DESTROY` bit if a thread is still using the slot.
if slot.state.load(Ordering::Acquire) & READ == 0
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
{
// If a thread is still using the slot, it will continue destruction of the block.
return;
}
}
// No thread is using the block, now it is safe to destroy it.
drop(Box::from_raw(this));
}
}
/// A position in a channel.
#[derive(Debug)]
struct Position<T> {
/// The index in the channel.
index: AtomicUsize,
/// The block in the linked list.
block: AtomicPtr<Block<T>>,
}
/// The token type for the list flavor.
#[derive(Debug)]
pub struct ListToken {
/// The block of slots.
block: *const u8,
/// The offset into the block.
offset: usize,
}
impl Default for ListToken {
#[inline]
fn default() -> Self {
ListToken {
block: ptr::null(),
offset: 0,
}
}
}
/// Unbounded channel implemented as a linked list.
///
/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are
/// represented as numbers of type `usize` and wrap on overflow.
///
/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and
/// improve cache efficiency.
pub struct Channel<T> {
/// The head of the channel.
head: CachePadded<Position<T>>,
/// The tail of the channel.
tail: CachePadded<Position<T>>,
/// Receivers waiting while the channel is empty and not disconnected.
receivers: SyncWaker,
/// Indicates that dropping a `Channel<T>` may drop messages of type `T`.
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Creates a new unbounded channel.
pub fn new() -> Self {
Channel {
head: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
tail: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
receivers: SyncWaker::new(),
_marker: PhantomData,
}
}
/// Returns a receiver handle to the channel.
pub fn receiver(&self) -> Receiver<'_, T> {
Receiver(self)
}
/// Returns a sender handle to the channel.
pub fn sender(&self) -> Sender<'_, T> {
Sender(self)
}
/// Attempts to reserve a slot for sending a message.
fn start_send(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut tail = self.tail.index.load(Ordering::Acquire);
let mut block = self.tail.block.load(Ordering::Acquire);
let mut next_block = None;
loop {
// Check if the channel is disconnected.
if tail & MARK_BIT != 0 {
token.list.block = ptr::null();
return true;
}
// Calculate the offset of the index into the block.
let offset = (tail >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
backoff.snooze();
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
// If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new()));
}
// If this is the first message to be sent into the channel, we need to allocate the
// first block and install it.
if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new()));
if self
.tail
.block
.compare_and_swap(block, new, Ordering::Release)
== block
{
self.head.block.store(new, Ordering::Release);
block = new;
} else {
next_block = unsafe { Some(Box::from_raw(new)) };
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
}
let new_tail = tail + (1 << SHIFT);
// Try advancing the tail forward.
match self.tail.index.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, install the next one.
if offset + 1 == BLOCK_CAP {
let next_block = Box::into_raw(next_block.unwrap());
self.tail.block.store(next_block, Ordering::Release);
self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
(*block).next.store(next_block, Ordering::Release);
}
token.list.block = block as *const u8;
token.list.offset = offset;
return true;
},
Err(t) => {
tail = t;
block = self.tail.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
/// Writes a message into the channel.
pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
// If there is no slot, the channel is disconnected.
if token.list.block.is_null() {
return Err(msg);
}
// Write the message into the slot.
let block = token.list.block as *mut Block<T>;
let offset = token.list.offset;
let slot = (*block).slots.get_unchecked(offset);
slot.msg.get().write(MaybeUninit::new(msg));
slot.state.fetch_or(WRITE, Ordering::Release);
// Wake a sleeping receiver.
self.receivers.notify();
Ok(())
}
/// Attempts to reserve a slot for receiving a message.
fn start_recv(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
loop {
// Calculate the offset of the index into the block.
let offset = (head >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
let mut new_head = head + (1 << SHIFT);
if new_head & MARK_BIT == 0 {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::Relaxed);
// If the tail equals the head, that means the channel is empty.
if head >> SHIFT == tail >> SHIFT {
// If the channel is disconnected...
if tail & MARK_BIT != 0 {
// ...then receive an error.
token.list.block = ptr::null();
return true;
} else {
// Otherwise, the receive operation is not ready.
return false;
}
}
// If head and tail are not in the same block, set `MARK_BIT` in head.
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
new_head |= MARK_BIT;
}
}
// The block can be null here only if the first message is being sent into the channel.
// In that case, just wait until it gets initialized.
if block.is_null() {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
// Try moving the head index forward.
match self.head.index.compare_exchange_weak(
head,
new_head,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, move to the next one.
if offset + 1 == BLOCK_CAP {
let next = (*block).wait_next();
let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
if !(*next).next.load(Ordering::Relaxed).is_null() {
next_index |= MARK_BIT;
}
self.head.block.store(next, Ordering::Release);
self.head.index.store(next_index, Ordering::Release);
}
token.list.block = block as *const u8;
token.list.offset = offset;
return true;
},
Err(h) => {
head = h;
block = self.head.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
/// Reads a message from the channel.
pub unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
if token.list.block.is_null() {
// The channel is disconnected.
return Err(());
}
// Read the message.
let block = token.list.block as *mut Block<T>;
let offset = token.list.offset;
let slot = (*block).slots.get_unchecked(offset);
slot.wait_write();
let msg = slot.msg.get().read().assume_init();
// Destroy the block if we've reached the end, or if another thread wanted to destroy but
// couldn't because we were busy reading from the slot.
if offset + 1 == BLOCK_CAP {
Block::destroy(block, 0);
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
Block::destroy(block, offset + 1);
}
Ok(msg)
}
/// Attempts to send a message into the channel.
pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
self.send(msg, None).map_err(|err| match err {
SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg),
SendTimeoutError::Timeout(_) => unreachable!(),
})
}
/// Sends a message into the channel.
pub fn send(&self, msg: T, _deadline: Option<Instant>) -> Result<(), SendTimeoutError<T>> {
let token = &mut Token::default();
assert!(self.start_send(token));
unsafe {
self.write(token, msg)
.map_err(SendTimeoutError::Disconnected)
}
}
/// Attempts to receive a message without blocking.
pub fn try_recv(&self) -> Result<T, TryRecvError> {
let token = &mut Token::default();
if self.start_recv(token) {
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
} else {
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
let token = &mut Token::default();
loop {
// Try receiving a message several times.
let backoff = Backoff::new();
loop {
if self.start_recv(token) {
unsafe {
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
}
}
if backoff.is_completed() {
break;
} else {
backoff.snooze();
}
}
if let Some(d) = deadline {
if Instant::now() >= d {
return Err(RecvTimeoutError::Timeout);
}
}
// Prepare for blocking until a sender wakes us up.
Context::with(|cx| {
let oper = Operation::hook(token);
self.receivers.register(oper, cx);
// Has the channel become ready just now?
if !self.is_empty() || self.is_disconnected() {
let _ = cx.try_select(Selected::Aborted);
}
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted | Selected::Disconnected => {
self.receivers.unregister(oper).unwrap();
// If the channel was disconnected, we still have to check for remaining
// messages.
}
Selected::Operation(_) => {}
}
});
}
}
/// Returns the current number of messages inside the channel.
pub fn len(&self) -> usize {
loop {
// Load the tail index, then load the head index.
let mut tail = self.tail.index.load(Ordering::SeqCst);
let mut head = self.head.index.load(Ordering::SeqCst);
// If the tail index didn't change, we've got consistent indices to work with.
if self.tail.index.load(Ordering::SeqCst) == tail {
// Erase the lower bits.
tail &= !((1 << SHIFT) - 1);
head &= !((1 << SHIFT) - 1);
// Fix up indices if they fall onto block ends.
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
tail = tail.wrapping_add(1 << SHIFT);
}
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
head = head.wrapping_add(1 << SHIFT);
}
// Rotate indices so that head falls into the first block.
let lap = (head >> SHIFT) / LAP;
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
head = head.wrapping_sub((lap * LAP) << SHIFT);
// Remove the lower bits.
tail >>= SHIFT;
head >>= SHIFT;
// Return the difference minus the number of blocks between tail and head.
return tail - head - tail / LAP;
}
}
}
/// Returns the capacity of the channel.
pub fn capacity(&self) -> Option<usize> {
None
}
/// Disconnects the channel and wakes up all blocked receivers.
///
/// Returns `true` if this call disconnected the channel.
pub fn disconnect(&self) -> bool {
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
if tail & MARK_BIT == 0 {
self.receivers.disconnect();
true
} else {
false
}
}
/// Returns `true` if the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
}
/// Returns `true` if the channel is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.index.load(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::SeqCst);
head >> SHIFT == tail >> SHIFT
}
/// Returns `true` if the channel is full.
pub fn is_full(&self) -> bool {
false
}
}
impl<T> Drop for Channel<T> {
fn drop(&mut self) {
let mut head = self.head.index.load(Ordering::Relaxed);
let mut tail = self.tail.index.load(Ordering::Relaxed);
let mut block = self.head.block.load(Ordering::Relaxed);
// Erase the lower bits.
head &= !((1 << SHIFT) - 1);
tail &= !((1 << SHIFT) - 1);
unsafe {
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
while head != tail {
let offset = (head >> SHIFT) % LAP;
if offset < BLOCK_CAP {
// Drop the message in the slot.
let slot = (*block).slots.get_unchecked(offset);
let p = &mut *slot.msg.get();
p.as_mut_ptr().drop_in_place();
} else {
// Deallocate the block and move to the next one.
let next = (*block).next.load(Ordering::Relaxed);
drop(Box::from_raw(block));
block = next;
}
head = head.wrapping_add(1 << SHIFT);
}
// Deallocate the last remaining block.
if !block.is_null() {
drop(Box::from_raw(block));
}
}
}
}
/// Receiver handle to a channel.
pub struct Receiver<'a, T>(&'a Channel<T>);
/// Sender handle to a channel.
pub struct Sender<'a, T>(&'a Channel<T>);
impl<T> SelectHandle for Receiver<'_, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_recv(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.register(oper, cx);
self.is_ready()
}
fn unregister(&self, oper: Operation) {
self.0.receivers.unregister(oper);
}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
!self.0.is_empty() || self.0.is_disconnected()
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.watch(oper, cx);
self.is_ready()
}
fn unwatch(&self, oper: Operation) {
self.0.receivers.unwatch(oper);
}
}
impl<T> SelectHandle for Sender<'_, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_send(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
fn unregister(&self, _oper: Operation) {}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
true
}
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
fn unwatch(&self, _oper: Operation) {}
}

View File

@ -0,0 +1,17 @@
//! Channel flavors.
//!
//! There are six flavors:
//!
//! 1. `after` - Channel that delivers a message after a certain amount of time.
//! 2. `array` - Bounded channel based on a preallocated array.
//! 3. `list` - Unbounded channel implemented as a linked list.
//! 4. `never` - Channel that never delivers messages.
//! 5. `tick` - Channel that delivers messages periodically.
//! 6. `zero` - Zero-capacity channel.
pub mod after;
pub mod array;
pub mod list;
pub mod never;
pub mod tick;
pub mod zero;

View File

@ -0,0 +1,110 @@
//! Channel that never delivers messages.
//!
//! Messages cannot be sent into this kind of channel.
use std::marker::PhantomData;
use std::time::Instant;
use crate::context::Context;
use crate::err::{RecvTimeoutError, TryRecvError};
use crate::select::{Operation, SelectHandle, Token};
use crate::utils;
/// This flavor doesn't need a token.
pub type NeverToken = ();
/// Channel that never delivers messages.
pub struct Channel<T> {
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Creates a channel that never delivers messages.
#[inline]
pub fn new() -> Self {
Channel {
_marker: PhantomData,
}
}
/// Attempts to receive a message without blocking.
#[inline]
pub fn try_recv(&self) -> Result<T, TryRecvError> {
Err(TryRecvError::Empty)
}
/// Receives a message from the channel.
#[inline]
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
utils::sleep_until(deadline);
Err(RecvTimeoutError::Timeout)
}
/// Reads a message from the channel.
#[inline]
pub unsafe fn read(&self, _token: &mut Token) -> Result<T, ()> {
Err(())
}
/// Returns `true` if the channel is empty.
#[inline]
pub fn is_empty(&self) -> bool {
true
}
/// Returns `true` if the channel is full.
#[inline]
pub fn is_full(&self) -> bool {
true
}
/// Returns the number of messages in the channel.
#[inline]
pub fn len(&self) -> usize {
0
}
/// Returns the capacity of the channel.
#[inline]
pub fn capacity(&self) -> Option<usize> {
Some(0)
}
}
impl<T> SelectHandle for Channel<T> {
#[inline]
fn try_select(&self, _token: &mut Token) -> bool {
false
}
#[inline]
fn deadline(&self) -> Option<Instant> {
None
}
#[inline]
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unregister(&self, _oper: Operation) {}
#[inline]
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
#[inline]
fn is_ready(&self) -> bool {
false
}
#[inline]
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unwatch(&self, _oper: Operation) {}
}

View File

@ -0,0 +1,167 @@
//! Channel that delivers messages periodically.
//!
//! Messages cannot be sent into this kind of channel; they are materialized on demand.
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_utils::atomic::AtomicCell;
use crate::context::Context;
use crate::err::{RecvTimeoutError, TryRecvError};
use crate::select::{Operation, SelectHandle, Token};
/// Result of a receive operation.
pub type TickToken = Option<Instant>;
/// Channel that delivers messages periodically.
pub struct Channel {
/// The instant at which the next message will be delivered.
delivery_time: AtomicCell<Instant>,
/// The time interval in which messages get delivered.
duration: Duration,
}
impl Channel {
/// Creates a channel that delivers messages periodically.
#[inline]
pub fn new(dur: Duration) -> Self {
Channel {
delivery_time: AtomicCell::new(Instant::now() + dur),
duration: dur,
}
}
/// Attempts to receive a message without blocking.
#[inline]
pub fn try_recv(&self) -> Result<Instant, TryRecvError> {
loop {
let now = Instant::now();
let delivery_time = self.delivery_time.load();
if now < delivery_time {
return Err(TryRecvError::Empty);
}
if self
.delivery_time
.compare_exchange(delivery_time, now + self.duration)
.is_ok()
{
return Ok(delivery_time);
}
}
}
/// Receives a message from the channel.
#[inline]
pub fn recv(&self, deadline: Option<Instant>) -> Result<Instant, RecvTimeoutError> {
loop {
let delivery_time = self.delivery_time.load();
let now = Instant::now();
if let Some(d) = deadline {
if d < delivery_time {
if now < d {
thread::sleep(d - now);
}
return Err(RecvTimeoutError::Timeout);
}
}
if self
.delivery_time
.compare_exchange(delivery_time, delivery_time.max(now) + self.duration)
.is_ok()
{
if now < delivery_time {
thread::sleep(delivery_time - now);
}
return Ok(delivery_time);
}
}
}
/// Reads a message from the channel.
#[inline]
pub unsafe fn read(&self, token: &mut Token) -> Result<Instant, ()> {
token.tick.ok_or(())
}
/// Returns `true` if the channel is empty.
#[inline]
pub fn is_empty(&self) -> bool {
Instant::now() < self.delivery_time.load()
}
/// Returns `true` if the channel is full.
#[inline]
pub fn is_full(&self) -> bool {
!self.is_empty()
}
/// Returns the number of messages in the channel.
#[inline]
pub fn len(&self) -> usize {
if self.is_empty() {
0
} else {
1
}
}
/// Returns the capacity of the channel.
#[inline]
pub fn capacity(&self) -> Option<usize> {
Some(1)
}
}
impl SelectHandle for Channel {
#[inline]
fn try_select(&self, token: &mut Token) -> bool {
match self.try_recv() {
Ok(msg) => {
token.tick = Some(msg);
true
}
Err(TryRecvError::Disconnected) => {
token.tick = None;
true
}
Err(TryRecvError::Empty) => false,
}
}
#[inline]
fn deadline(&self) -> Option<Instant> {
Some(self.delivery_time.load())
}
#[inline]
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unregister(&self, _oper: Operation) {}
#[inline]
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
#[inline]
fn is_ready(&self) -> bool {
!self.is_empty()
}
#[inline]
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unwatch(&self, _oper: Operation) {}
}

View File

@ -0,0 +1,466 @@
//! Zero-capacity channel.
//!
//! This kind of channel is also known as *rendezvous* channel.
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use crossbeam_utils::Backoff;
use crate::context::Context;
use crate::err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError};
use crate::select::{Operation, SelectHandle, Selected, Token};
use crate::utils::Spinlock;
use crate::waker::Waker;
/// A pointer to a packet.
pub type ZeroToken = usize;
/// A slot for passing one message from a sender to a receiver.
struct Packet<T> {
/// Equals `true` if the packet is allocated on the stack.
on_stack: bool,
/// Equals `true` once the packet is ready for reading or writing.
ready: AtomicBool,
/// The message.
msg: UnsafeCell<Option<T>>,
}
impl<T> Packet<T> {
/// Creates an empty packet on the stack.
fn empty_on_stack() -> Packet<T> {
Packet {
on_stack: true,
ready: AtomicBool::new(false),
msg: UnsafeCell::new(None),
}
}
/// Creates an empty packet on the heap.
fn empty_on_heap() -> Box<Packet<T>> {
Box::new(Packet {
on_stack: false,
ready: AtomicBool::new(false),
msg: UnsafeCell::new(None),
})
}
/// Creates a packet on the stack, containing a message.
fn message_on_stack(msg: T) -> Packet<T> {
Packet {
on_stack: true,
ready: AtomicBool::new(false),
msg: UnsafeCell::new(Some(msg)),
}
}
/// Waits until the packet becomes ready for reading or writing.
fn wait_ready(&self) {
let backoff = Backoff::new();
while !self.ready.load(Ordering::Acquire) {
backoff.snooze();
}
}
}
/// Inner representation of a zero-capacity channel.
struct Inner {
/// Senders waiting to pair up with a receive operation.
senders: Waker,
/// Receivers waiting to pair up with a send operation.
receivers: Waker,
/// Equals `true` when the channel is disconnected.
is_disconnected: bool,
}
/// Zero-capacity channel.
pub struct Channel<T> {
/// Inner representation of the channel.
inner: Spinlock<Inner>,
/// Indicates that dropping a `Channel<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Constructs a new zero-capacity channel.
pub fn new() -> Self {
Channel {
inner: Spinlock::new(Inner {
senders: Waker::new(),
receivers: Waker::new(),
is_disconnected: false,
}),
_marker: PhantomData,
}
}
/// Returns a receiver handle to the channel.
pub fn receiver(&self) -> Receiver<'_, T> {
Receiver(self)
}
/// Returns a sender handle to the channel.
pub fn sender(&self) -> Sender<'_, T> {
Sender(self)
}
/// Attempts to reserve a slot for sending a message.
fn start_send(&self, token: &mut Token) -> bool {
let mut inner = self.inner.lock();
// If there's a waiting receiver, pair up with it.
if let Some(operation) = inner.receivers.try_select() {
token.zero = operation.packet;
true
} else if inner.is_disconnected {
token.zero = 0;
true
} else {
false
}
}
/// Writes a message into the packet.
pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
// If there is no packet, the channel is disconnected.
if token.zero == 0 {
return Err(msg);
}
let packet = &*(token.zero as *const Packet<T>);
packet.msg.get().write(Some(msg));
packet.ready.store(true, Ordering::Release);
Ok(())
}
/// Attempts to pair up with a sender.
fn start_recv(&self, token: &mut Token) -> bool {
let mut inner = self.inner.lock();
// If there's a waiting sender, pair up with it.
if let Some(operation) = inner.senders.try_select() {
token.zero = operation.packet;
true
} else if inner.is_disconnected {
token.zero = 0;
true
} else {
false
}
}
/// Reads a message from the packet.
pub unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
// If there is no packet, the channel is disconnected.
if token.zero == 0 {
return Err(());
}
let packet = &*(token.zero as *const Packet<T>);
if packet.on_stack {
// The message has been in the packet from the beginning, so there is no need to wait
// for it. However, after reading the message, we need to set `ready` to `true` in
// order to signal that the packet can be destroyed.
let msg = packet.msg.get().replace(None).unwrap();
packet.ready.store(true, Ordering::Release);
Ok(msg)
} else {
// Wait until the message becomes available, then read it and destroy the
// heap-allocated packet.
packet.wait_ready();
let msg = packet.msg.get().replace(None).unwrap();
drop(Box::from_raw(packet as *const Packet<T> as *mut Packet<T>));
Ok(msg)
}
}
/// Attempts to send a message into the channel.
pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting receiver, pair up with it.
if let Some(operation) = inner.receivers.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe {
self.write(token, msg).ok().unwrap();
}
Ok(())
} else if inner.is_disconnected {
Err(TrySendError::Disconnected(msg))
} else {
Err(TrySendError::Full(msg))
}
}
/// Sends a message into the channel.
pub fn send(&self, msg: T, deadline: Option<Instant>) -> Result<(), SendTimeoutError<T>> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting receiver, pair up with it.
if let Some(operation) = inner.receivers.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe {
self.write(token, msg).ok().unwrap();
}
return Ok(());
}
if inner.is_disconnected {
return Err(SendTimeoutError::Disconnected(msg));
}
Context::with(|cx| {
// Prepare for blocking until a receiver wakes us up.
let oper = Operation::hook(token);
let packet = Packet::<T>::message_on_stack(msg);
inner
.senders
.register_with_packet(oper, &packet as *const Packet<T> as usize, cx);
inner.receivers.notify();
drop(inner);
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted => {
self.inner.lock().senders.unregister(oper).unwrap();
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
Err(SendTimeoutError::Timeout(msg))
}
Selected::Disconnected => {
self.inner.lock().senders.unregister(oper).unwrap();
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
Err(SendTimeoutError::Disconnected(msg))
}
Selected::Operation(_) => {
// Wait until the message is read, then drop the packet.
packet.wait_ready();
Ok(())
}
}
})
}
/// Attempts to receive a message without blocking.
pub fn try_recv(&self) -> Result<T, TryRecvError> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting sender, pair up with it.
if let Some(operation) = inner.senders.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
} else if inner.is_disconnected {
Err(TryRecvError::Disconnected)
} else {
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting sender, pair up with it.
if let Some(operation) = inner.senders.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe {
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
}
}
if inner.is_disconnected {
return Err(RecvTimeoutError::Disconnected);
}
Context::with(|cx| {
// Prepare for blocking until a sender wakes us up.
let oper = Operation::hook(token);
let packet = Packet::<T>::empty_on_stack();
inner
.receivers
.register_with_packet(oper, &packet as *const Packet<T> as usize, cx);
inner.senders.notify();
drop(inner);
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted => {
self.inner.lock().receivers.unregister(oper).unwrap();
Err(RecvTimeoutError::Timeout)
}
Selected::Disconnected => {
self.inner.lock().receivers.unregister(oper).unwrap();
Err(RecvTimeoutError::Disconnected)
}
Selected::Operation(_) => {
// Wait until the message is provided, then read it.
packet.wait_ready();
unsafe { Ok(packet.msg.get().replace(None).unwrap()) }
}
}
})
}
/// Disconnects the channel and wakes up all blocked senders and receivers.
///
/// Returns `true` if this call disconnected the channel.
pub fn disconnect(&self) -> bool {
let mut inner = self.inner.lock();
if !inner.is_disconnected {
inner.is_disconnected = true;
inner.senders.disconnect();
inner.receivers.disconnect();
true
} else {
false
}
}
/// Returns the current number of messages inside the channel.
pub fn len(&self) -> usize {
0
}
/// Returns the capacity of the channel.
pub fn capacity(&self) -> Option<usize> {
Some(0)
}
/// Returns `true` if the channel is empty.
pub fn is_empty(&self) -> bool {
true
}
/// Returns `true` if the channel is full.
pub fn is_full(&self) -> bool {
true
}
}
/// Receiver handle to a channel.
pub struct Receiver<'a, T>(&'a Channel<T>);
/// Sender handle to a channel.
pub struct Sender<'a, T>(&'a Channel<T>);
impl<T> SelectHandle for Receiver<'_, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_recv(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
let packet = Box::into_raw(Packet::<T>::empty_on_heap());
let mut inner = self.0.inner.lock();
inner
.receivers
.register_with_packet(oper, packet as usize, cx);
inner.senders.notify();
inner.senders.can_select() || inner.is_disconnected
}
fn unregister(&self, oper: Operation) {
if let Some(operation) = self.0.inner.lock().receivers.unregister(oper) {
unsafe {
drop(Box::from_raw(operation.packet as *mut Packet<T>));
}
}
}
fn accept(&self, token: &mut Token, cx: &Context) -> bool {
token.zero = cx.wait_packet();
true
}
fn is_ready(&self) -> bool {
let inner = self.0.inner.lock();
inner.senders.can_select() || inner.is_disconnected
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
let mut inner = self.0.inner.lock();
inner.receivers.watch(oper, cx);
inner.senders.can_select() || inner.is_disconnected
}
fn unwatch(&self, oper: Operation) {
let mut inner = self.0.inner.lock();
inner.receivers.unwatch(oper);
}
}
impl<T> SelectHandle for Sender<'_, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_send(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
let packet = Box::into_raw(Packet::<T>::empty_on_heap());
let mut inner = self.0.inner.lock();
inner
.senders
.register_with_packet(oper, packet as usize, cx);
inner.receivers.notify();
inner.receivers.can_select() || inner.is_disconnected
}
fn unregister(&self, oper: Operation) {
if let Some(operation) = self.0.inner.lock().senders.unregister(oper) {
unsafe {
drop(Box::from_raw(operation.packet as *mut Packet<T>));
}
}
}
fn accept(&self, token: &mut Token, cx: &Context) -> bool {
token.zero = cx.wait_packet();
true
}
fn is_ready(&self) -> bool {
let inner = self.0.inner.lock();
inner.receivers.can_select() || inner.is_disconnected
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
let mut inner = self.0.inner.lock();
inner.senders.watch(oper, cx);
inner.receivers.can_select() || inner.is_disconnected
}
fn unwatch(&self, oper: Operation) {
let mut inner = self.0.inner.lock();
inner.senders.unwatch(oper);
}
}

View File

@ -0,0 +1,376 @@
//! Multi-producer multi-consumer channels for message passing.
//!
//! This crate is an alternative to [`std::sync::mpsc`] with more features and better performance.
//!
//! # Hello, world!
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! // Create a channel of unbounded capacity.
//! let (s, r) = unbounded();
//!
//! // Send a message into the channel.
//! s.send("Hello, world!").unwrap();
//!
//! // Receive the message from the channel.
//! assert_eq!(r.recv(), Ok("Hello, world!"));
//! ```
//!
//! # Channel types
//!
//! Channels can be created using two functions:
//!
//! * [`bounded`] creates a channel of bounded capacity, i.e. there is a limit to how many messages
//! it can hold at a time.
//!
//! * [`unbounded`] creates a channel of unbounded capacity, i.e. it can hold any number of
//! messages at a time.
//!
//! Both functions return a [`Sender`] and a [`Receiver`], which represent the two opposite sides
//! of a channel.
//!
//! Creating a bounded channel:
//!
//! ```
//! use crossbeam_channel::bounded;
//!
//! // Create a channel that can hold at most 5 messages at a time.
//! let (s, r) = bounded(5);
//!
//! // Can send only 5 messages without blocking.
//! for i in 0..5 {
//! s.send(i).unwrap();
//! }
//!
//! // Another call to `send` would block because the channel is full.
//! // s.send(5).unwrap();
//! ```
//!
//! Creating an unbounded channel:
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! // Create an unbounded channel.
//! let (s, r) = unbounded();
//!
//! // Can send any number of messages into the channel without blocking.
//! for i in 0..1000 {
//! s.send(i).unwrap();
//! }
//! ```
//!
//! A special case is zero-capacity channel, which cannot hold any messages. Instead, send and
//! receive operations must appear at the same time in order to pair up and pass the message over:
//!
//! ```
//! use std::thread;
//! use crossbeam_channel::bounded;
//!
//! // Create a zero-capacity channel.
//! let (s, r) = bounded(0);
//!
//! // Sending blocks until a receive operation appears on the other side.
//! thread::spawn(move || s.send("Hi!").unwrap());
//!
//! // Receiving blocks until a send operation appears on the other side.
//! assert_eq!(r.recv(), Ok("Hi!"));
//! ```
//!
//! # Sharing channels
//!
//! Senders and receivers can be cloned and sent to other threads:
//!
//! ```
//! use std::thread;
//! use crossbeam_channel::bounded;
//!
//! let (s1, r1) = bounded(0);
//! let (s2, r2) = (s1.clone(), r1.clone());
//!
//! // Spawn a thread that receives a message and then sends one.
//! thread::spawn(move || {
//! r2.recv().unwrap();
//! s2.send(2).unwrap();
//! });
//!
//! // Send a message and then receive one.
//! s1.send(1).unwrap();
//! r1.recv().unwrap();
//! ```
//!
//! Note that cloning only creates a new handle to the same sending or receiving side. It does not
//! create a separate stream of messages in any way:
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! let (s1, r1) = unbounded();
//! let (s2, r2) = (s1.clone(), r1.clone());
//! let (s3, r3) = (s2.clone(), r2.clone());
//!
//! s1.send(10).unwrap();
//! s2.send(20).unwrap();
//! s3.send(30).unwrap();
//!
//! assert_eq!(r3.recv(), Ok(10));
//! assert_eq!(r1.recv(), Ok(20));
//! assert_eq!(r2.recv(), Ok(30));
//! ```
//!
//! It's also possible to share senders and receivers by reference:
//!
//! ```
//! use crossbeam_channel::bounded;
//! use crossbeam_utils::thread::scope;
//!
//! let (s, r) = bounded(0);
//!
//! scope(|scope| {
//! // Spawn a thread that receives a message and then sends one.
//! scope.spawn(|_| {
//! r.recv().unwrap();
//! s.send(2).unwrap();
//! });
//!
//! // Send a message and then receive one.
//! s.send(1).unwrap();
//! r.recv().unwrap();
//! }).unwrap();
//! ```
//!
//! # Disconnection
//!
//! When all senders or all receivers associated with a channel get dropped, the channel becomes
//! disconnected. No more messages can be sent, but any remaining messages can still be received.
//! Send and receive operations on a disconnected channel never block.
//!
//! ```
//! use crossbeam_channel::{unbounded, RecvError};
//!
//! let (s, r) = unbounded();
//! s.send(1).unwrap();
//! s.send(2).unwrap();
//! s.send(3).unwrap();
//!
//! // The only sender is dropped, disconnecting the channel.
//! drop(s);
//!
//! // The remaining messages can be received.
//! assert_eq!(r.recv(), Ok(1));
//! assert_eq!(r.recv(), Ok(2));
//! assert_eq!(r.recv(), Ok(3));
//!
//! // There are no more messages in the channel.
//! assert!(r.is_empty());
//!
//! // Note that calling `r.recv()` does not block.
//! // Instead, `Err(RecvError)` is returned immediately.
//! assert_eq!(r.recv(), Err(RecvError));
//! ```
//!
//! # Blocking operations
//!
//! Send and receive operations come in three flavors:
//!
//! * Non-blocking (returns immediately with success or failure).
//! * Blocking (waits until the operation succeeds or the channel becomes disconnected).
//! * Blocking with a timeout (blocks only for a certain duration of time).
//!
//! A simple example showing the difference between non-blocking and blocking operations:
//!
//! ```
//! use crossbeam_channel::{bounded, RecvError, TryRecvError};
//!
//! let (s, r) = bounded(1);
//!
//! // Send a message into the channel.
//! s.send("foo").unwrap();
//!
//! // This call would block because the channel is full.
//! // s.send("bar").unwrap();
//!
//! // Receive the message.
//! assert_eq!(r.recv(), Ok("foo"));
//!
//! // This call would block because the channel is empty.
//! // r.recv();
//!
//! // Try receiving a message without blocking.
//! assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
//!
//! // Disconnect the channel.
//! drop(s);
//!
//! // This call doesn't block because the channel is now disconnected.
//! assert_eq!(r.recv(), Err(RecvError));
//! ```
//!
//! # Iteration
//!
//! Receivers can be used as iterators. For example, method [`iter`] creates an iterator that
//! receives messages until the channel becomes empty and disconnected. Note that iteration may
//! block waiting for next message to arrive.
//!
//! ```
//! use std::thread;
//! use crossbeam_channel::unbounded;
//!
//! let (s, r) = unbounded();
//!
//! thread::spawn(move || {
//! s.send(1).unwrap();
//! s.send(2).unwrap();
//! s.send(3).unwrap();
//! drop(s); // Disconnect the channel.
//! });
//!
//! // Collect all messages from the channel.
//! // Note that the call to `collect` blocks until the sender is dropped.
//! let v: Vec<_> = r.iter().collect();
//!
//! assert_eq!(v, [1, 2, 3]);
//! ```
//!
//! A non-blocking iterator can be created using [`try_iter`], which receives all available
//! messages without blocking:
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! let (s, r) = unbounded();
//! s.send(1).unwrap();
//! s.send(2).unwrap();
//! s.send(3).unwrap();
//! // No need to drop the sender.
//!
//! // Receive all messages currently in the channel.
//! let v: Vec<_> = r.try_iter().collect();
//!
//! assert_eq!(v, [1, 2, 3]);
//! ```
//!
//! # Selection
//!
//! The [`select!`] macro allows you to define a set of channel operations, wait until any one of
//! them becomes ready, and finally execute it. If multiple operations are ready at the same time,
//! a random one among them is selected.
//!
//! It is also possible to define a `default` case that gets executed if none of the operations are
//! ready, either right away or for a certain duration of time.
//!
//! An operation is considered to be ready if it doesn't have to block. Note that it is ready even
//! when it will simply return an error because the channel is disconnected.
//!
//! An example of receiving a message from two channels:
//!
//! ```
//! use std::thread;
//! use std::time::Duration;
//! use crossbeam_channel::{select, unbounded};
//!
//! let (s1, r1) = unbounded();
//! let (s2, r2) = unbounded();
//!
//! thread::spawn(move || s1.send(10).unwrap());
//! thread::spawn(move || s2.send(20).unwrap());
//!
//! // At most one of these two receive operations will be executed.
//! select! {
//! recv(r1) -> msg => assert_eq!(msg, Ok(10)),
//! recv(r2) -> msg => assert_eq!(msg, Ok(20)),
//! default(Duration::from_secs(1)) => println!("timed out"),
//! }
//! ```
//!
//! If you need to select over a dynamically created list of channel operations, use [`Select`]
//! instead. The [`select!`] macro is just a convenience wrapper around [`Select`].
//!
//! # Extra channels
//!
//! Three functions can create special kinds of channels, all of which return just a [`Receiver`]
//! handle:
//!
//! * [`after`] creates a channel that delivers a single message after a certain duration of time.
//! * [`tick`] creates a channel that delivers messages periodically.
//! * [`never`] creates a channel that never delivers messages.
//!
//! These channels are very efficient because messages get lazily generated on receive operations.
//!
//! An example that prints elapsed time every 50 milliseconds for the duration of 1 second:
//!
//! ```
//! use std::time::{Duration, Instant};
//! use crossbeam_channel::{after, select, tick};
//!
//! let start = Instant::now();
//! let ticker = tick(Duration::from_millis(50));
//! let timeout = after(Duration::from_secs(1));
//!
//! loop {
//! select! {
//! recv(ticker) -> _ => println!("elapsed: {:?}", start.elapsed()),
//! recv(timeout) -> _ => break,
//! }
//! }
//! ```
//!
//! [`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html
//! [`unbounded`]: fn.unbounded.html
//! [`bounded`]: fn.bounded.html
//! [`after`]: fn.after.html
//! [`tick`]: fn.tick.html
//! [`never`]: fn.never.html
//! [`send`]: struct.Sender.html#method.send
//! [`recv`]: struct.Receiver.html#method.recv
//! [`iter`]: struct.Receiver.html#method.iter
//! [`try_iter`]: struct.Receiver.html#method.try_iter
//! [`select!`]: macro.select.html
//! [`Select`]: struct.Select.html
//! [`Sender`]: struct.Sender.html
//! [`Receiver`]: struct.Receiver.html
#![doc(test(
no_crate_inject,
attr(
deny(warnings, rust_2018_idioms),
allow(dead_code, unused_assignments, unused_variables)
)
))]
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![cfg_attr(not(feature = "std"), no_std)]
use cfg_if::cfg_if;
cfg_if! {
if #[cfg(feature = "std")] {
mod channel;
mod context;
mod counter;
mod err;
mod flavors;
mod select;
mod select_macro;
mod utils;
mod waker;
/// Crate internals used by the `select!` macro.
#[doc(hidden)]
pub mod internal {
pub use crate::select::SelectHandle;
pub use crate::select::{select, select_timeout, try_select};
}
pub use crate::channel::{after, never, tick};
pub use crate::channel::{bounded, unbounded};
pub use crate::channel::{IntoIter, Iter, TryIter};
pub use crate::channel::{Receiver, Sender};
pub use crate::select::{Select, SelectedOperation};
pub use crate::err::{ReadyTimeoutError, SelectTimeoutError, TryReadyError, TrySelectError};
pub use crate::err::{RecvError, RecvTimeoutError, TryRecvError};
pub use crate::err::{SendError, SendTimeoutError, TrySendError};
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,112 @@
//! Miscellaneous utilities.
use std::cell::{Cell, UnsafeCell};
use std::num::Wrapping;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_utils::Backoff;
/// Randomly shuffles a slice.
pub fn shuffle<T>(v: &mut [T]) {
let len = v.len();
if len <= 1 {
return;
}
thread_local! {
static RNG: Cell<Wrapping<u32>> = Cell::new(Wrapping(1_406_868_647));
}
let _ = RNG.try_with(|rng| {
for i in 1..len {
// This is the 32-bit variant of Xorshift.
//
// Source: https://en.wikipedia.org/wiki/Xorshift
let mut x = rng.get();
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
rng.set(x);
let x = x.0;
let n = i + 1;
// This is a fast alternative to `let j = x % n`.
//
// Author: Daniel Lemire
// Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize;
v.swap(i, j);
}
});
}
/// Sleeps until the deadline, or forever if the deadline isn't specified.
pub fn sleep_until(deadline: Option<Instant>) {
loop {
match deadline {
None => thread::sleep(Duration::from_secs(1000)),
Some(d) => {
let now = Instant::now();
if now >= d {
break;
}
thread::sleep(d - now);
}
}
}
}
/// A simple spinlock.
pub struct Spinlock<T> {
flag: AtomicBool,
value: UnsafeCell<T>,
}
impl<T> Spinlock<T> {
/// Returns a new spinlock initialized with `value`.
pub fn new(value: T) -> Spinlock<T> {
Spinlock {
flag: AtomicBool::new(false),
value: UnsafeCell::new(value),
}
}
/// Locks the spinlock.
pub fn lock(&self) -> SpinlockGuard<'_, T> {
let backoff = Backoff::new();
while self.flag.swap(true, Ordering::Acquire) {
backoff.snooze();
}
SpinlockGuard { parent: self }
}
}
/// A guard holding a spinlock locked.
pub struct SpinlockGuard<'a, T> {
parent: &'a Spinlock<T>,
}
impl<T> Drop for SpinlockGuard<'_, T> {
fn drop(&mut self) {
self.parent.flag.store(false, Ordering::Release);
}
}
impl<T> Deref for SpinlockGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.parent.value.get() }
}
}
impl<T> DerefMut for SpinlockGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.parent.value.get() }
}
}

View File

@ -0,0 +1,287 @@
//! Waking mechanism for threads blocked on channel operations.
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread::{self, ThreadId};
use crate::context::Context;
use crate::select::{Operation, Selected};
use crate::utils::Spinlock;
/// Represents a thread blocked on a specific channel operation.
pub struct Entry {
/// The operation.
pub oper: Operation,
/// Optional packet.
pub packet: usize,
/// Context associated with the thread owning this operation.
pub cx: Context,
}
/// A queue of threads blocked on channel operations.
///
/// This data structure is used by threads to register blocking operations and get woken up once
/// an operation becomes ready.
pub struct Waker {
/// A list of select operations.
selectors: Vec<Entry>,
/// A list of operations waiting to be ready.
observers: Vec<Entry>,
}
impl Waker {
/// Creates a new `Waker`.
#[inline]
pub fn new() -> Self {
Waker {
selectors: Vec::new(),
observers: Vec::new(),
}
}
/// Registers a select operation.
#[inline]
pub fn register(&mut self, oper: Operation, cx: &Context) {
self.register_with_packet(oper, 0, cx);
}
/// Registers a select operation and a packet.
#[inline]
pub fn register_with_packet(&mut self, oper: Operation, packet: usize, cx: &Context) {
self.selectors.push(Entry {
oper,
packet,
cx: cx.clone(),
});
}
/// Unregisters a select operation.
#[inline]
pub fn unregister(&mut self, oper: Operation) -> Option<Entry> {
if let Some((i, _)) = self
.selectors
.iter()
.enumerate()
.find(|&(_, entry)| entry.oper == oper)
{
let entry = self.selectors.remove(i);
Some(entry)
} else {
None
}
}
/// Attempts to find another thread's entry, select the operation, and wake it up.
#[inline]
pub fn try_select(&mut self) -> Option<Entry> {
let mut entry = None;
if !self.selectors.is_empty() {
let thread_id = current_thread_id();
for i in 0..self.selectors.len() {
// Does the entry belong to a different thread?
if self.selectors[i].cx.thread_id() != thread_id {
// Try selecting this operation.
let sel = Selected::Operation(self.selectors[i].oper);
let res = self.selectors[i].cx.try_select(sel);
if res.is_ok() {
// Provide the packet.
self.selectors[i].cx.store_packet(self.selectors[i].packet);
// Wake the thread up.
self.selectors[i].cx.unpark();
// Remove the entry from the queue to keep it clean and improve
// performance.
entry = Some(self.selectors.remove(i));
break;
}
}
}
}
entry
}
/// Returns `true` if there is an entry which can be selected by the current thread.
#[inline]
pub fn can_select(&self) -> bool {
if self.selectors.is_empty() {
false
} else {
let thread_id = current_thread_id();
self.selectors.iter().any(|entry| {
entry.cx.thread_id() != thread_id && entry.cx.selected() == Selected::Waiting
})
}
}
/// Registers an operation waiting to be ready.
#[inline]
pub fn watch(&mut self, oper: Operation, cx: &Context) {
self.observers.push(Entry {
oper,
packet: 0,
cx: cx.clone(),
});
}
/// Unregisters an operation waiting to be ready.
#[inline]
pub fn unwatch(&mut self, oper: Operation) {
self.observers.retain(|e| e.oper != oper);
}
/// Notifies all operations waiting to be ready.
#[inline]
pub fn notify(&mut self) {
for entry in self.observers.drain(..) {
if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() {
entry.cx.unpark();
}
}
}
/// Notifies all registered operations that the channel is disconnected.
#[inline]
pub fn disconnect(&mut self) {
for entry in self.selectors.iter() {
if entry.cx.try_select(Selected::Disconnected).is_ok() {
// Wake the thread up.
//
// Here we don't remove the entry from the queue. Registered threads must
// unregister from the waker by themselves. They might also want to recover the
// packet value and destroy it, if necessary.
entry.cx.unpark();
}
}
self.notify();
}
}
impl Drop for Waker {
#[inline]
fn drop(&mut self) {
debug_assert_eq!(self.selectors.len(), 0);
debug_assert_eq!(self.observers.len(), 0);
}
}
/// A waker that can be shared among threads without locking.
///
/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization.
pub struct SyncWaker {
/// The inner `Waker`.
inner: Spinlock<Waker>,
/// `true` if the waker is empty.
is_empty: AtomicBool,
}
impl SyncWaker {
/// Creates a new `SyncWaker`.
#[inline]
pub fn new() -> Self {
SyncWaker {
inner: Spinlock::new(Waker::new()),
is_empty: AtomicBool::new(true),
}
}
/// Registers the current thread with an operation.
#[inline]
pub fn register(&self, oper: Operation, cx: &Context) {
let mut inner = self.inner.lock();
inner.register(oper, cx);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
/// Unregisters an operation previously registered by the current thread.
#[inline]
pub fn unregister(&self, oper: Operation) -> Option<Entry> {
let mut inner = self.inner.lock();
let entry = inner.unregister(oper);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
entry
}
/// Attempts to find one thread (not the current one), select its operation, and wake it up.
#[inline]
pub fn notify(&self) {
if !self.is_empty.load(Ordering::SeqCst) {
let mut inner = self.inner.lock();
if !self.is_empty.load(Ordering::SeqCst) {
inner.try_select();
inner.notify();
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
}
}
/// Registers an operation waiting to be ready.
#[inline]
pub fn watch(&self, oper: Operation, cx: &Context) {
let mut inner = self.inner.lock();
inner.watch(oper, cx);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
/// Unregisters an operation waiting to be ready.
#[inline]
pub fn unwatch(&self, oper: Operation) {
let mut inner = self.inner.lock();
inner.unwatch(oper);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
/// Notifies all threads that the channel is disconnected.
#[inline]
pub fn disconnect(&self) {
let mut inner = self.inner.lock();
inner.disconnect();
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
}
impl Drop for SyncWaker {
#[inline]
fn drop(&mut self) {
debug_assert_eq!(self.is_empty.load(Ordering::SeqCst), true);
}
}
/// Returns the id of the current thread.
#[inline]
fn current_thread_id() -> ThreadId {
thread_local! {
/// Cached thread-local id.
static THREAD_ID: ThreadId = thread::current().id();
}
THREAD_ID
.try_with(|id| *id)
.unwrap_or_else(|_| thread::current().id())
}

View File

@ -0,0 +1,334 @@
//! Tests for the after channel flavor.
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{after, select, Select, TryRecvError};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn fire() {
let start = Instant::now();
let r = after(ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(100));
let fired = r.try_recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired >= ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
select! {
recv(r) -> _ => panic!(),
default => {}
}
select! {
recv(r) -> _ => panic!(),
recv(after(ms(200))) -> _ => {}
}
}
#[test]
fn capacity() {
const COUNT: usize = 10;
for i in 0..COUNT {
let r = after(ms(i as u64));
assert_eq!(r.capacity(), Some(1));
}
}
#[test]
fn len_empty_full() {
let r = after(ms(50));
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
thread::sleep(ms(100));
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), true);
r.try_recv().unwrap();
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let r = after(ms(200));
assert!(r.try_recv().is_err());
thread::sleep(ms(100));
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_ok());
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_err());
}
#[test]
fn recv() {
let start = Instant::now();
let r = after(ms(50));
let fired = r.recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired < fired - start);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn recv_timeout() {
let start = Instant::now();
let r = after(ms(200));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(100));
assert!(now - start <= ms(150));
let fired = r.recv_timeout(ms(200)).unwrap();
assert!(fired - start >= ms(200));
assert!(fired - start <= ms(250));
assert!(r.recv_timeout(ms(200)).is_err());
let now = Instant::now();
assert!(now - start >= ms(400));
assert!(now - start <= ms(450));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn recv_two() {
let r1 = after(ms(50));
let r2 = after(ms(50));
scope(|scope| {
scope.spawn(|_| {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
});
scope.spawn(|_| {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
});
})
.unwrap();
}
#[test]
fn recv_race() {
select! {
recv(after(ms(50))) -> _ => {}
recv(after(ms(100))) -> _ => panic!(),
}
select! {
recv(after(ms(100))) -> _ => panic!(),
recv(after(ms(50))) -> _ => {}
}
}
#[test]
fn stress_default() {
const COUNT: usize = 10;
for _ in 0..COUNT {
select! {
recv(after(ms(0))) -> _ => {}
default => panic!(),
}
}
for _ in 0..COUNT {
select! {
recv(after(ms(100))) -> _ => panic!(),
default => {}
}
}
}
#[test]
fn select() {
const THREADS: usize = 4;
const COUNT: usize = 1000;
const TIMEOUT_MS: u64 = 100;
let v = (0..COUNT)
.map(|i| after(ms(i as u64 / TIMEOUT_MS / 2)))
.collect::<Vec<_>>();
let hits = AtomicUsize::new(0);
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let v: Vec<&_> = v.iter().collect();
loop {
let timeout = after(ms(TIMEOUT_MS));
let mut sel = Select::new();
for r in &v {
sel.recv(r);
}
let oper_timeout = sel.recv(&timeout);
let oper = sel.select();
match oper.index() {
i if i == oper_timeout => {
oper.recv(&timeout).unwrap();
break;
}
i => {
oper.recv(&v[i]).unwrap();
hits.fetch_add(1, Ordering::SeqCst);
}
}
}
});
}
})
.unwrap();
assert_eq!(hits.load(Ordering::SeqCst), COUNT);
}
#[test]
fn ready() {
const THREADS: usize = 4;
const COUNT: usize = 1000;
const TIMEOUT_MS: u64 = 100;
let v = (0..COUNT)
.map(|i| after(ms(i as u64 / TIMEOUT_MS / 2)))
.collect::<Vec<_>>();
let hits = AtomicUsize::new(0);
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let v: Vec<&_> = v.iter().collect();
loop {
let timeout = after(ms(TIMEOUT_MS));
let mut sel = Select::new();
for r in &v {
sel.recv(r);
}
let oper_timeout = sel.recv(&timeout);
loop {
let i = sel.ready();
if i == oper_timeout {
timeout.try_recv().unwrap();
return;
} else if v[i].try_recv().is_ok() {
hits.fetch_add(1, Ordering::SeqCst);
break;
}
}
}
});
}
})
.unwrap();
assert_eq!(hits.load(Ordering::SeqCst), COUNT);
}
#[test]
fn stress_clone() {
const RUNS: usize = 1000;
const THREADS: usize = 10;
const COUNT: usize = 50;
for i in 0..RUNS {
let r = after(ms(i as u64));
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let r = r.clone();
let _ = r.try_recv();
for _ in 0..COUNT {
drop(r.clone());
thread::yield_now();
}
});
}
})
.unwrap();
}
}
#[test]
fn fairness() {
const COUNT: usize = 1000;
for &dur in &[0, 1] {
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(after(ms(dur))) -> _ => hits[0] += 1,
recv(after(ms(dur))) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 1000;
for &dur in &[0, 1] {
let mut hits = [0usize; 5];
for _ in 0..COUNT {
let r = after(ms(dur));
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}

View File

@ -0,0 +1,654 @@
//! Tests for the array channel flavor.
use std::any::Any;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crossbeam_channel::{bounded, select, Receiver};
use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError};
use crossbeam_channel::{SendError, SendTimeoutError, TrySendError};
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
let (s, r) = bounded(1);
s.send(7).unwrap();
assert_eq!(r.try_recv(), Ok(7));
s.send(8).unwrap();
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
}
#[test]
fn capacity() {
for i in 1..10 {
let (s, r) = bounded::<()>(i);
assert_eq!(s.capacity(), Some(i));
assert_eq!(r.capacity(), Some(i));
}
}
#[test]
fn len_empty_full() {
let (s, r) = bounded(2);
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
s.send(()).unwrap();
assert_eq!(s.len(), 1);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), false);
s.send(()).unwrap();
assert_eq!(s.len(), 2);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), true);
assert_eq!(r.len(), 2);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), true);
r.recv().unwrap();
assert_eq!(s.len(), 1);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let (s, r) = bounded(100);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(1500));
assert_eq!(r.try_recv(), Ok(7));
thread::sleep(ms(500));
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
s.send(7).unwrap();
});
})
.unwrap();
}
#[test]
fn recv() {
let (s, r) = bounded(100);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Ok(7));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(9));
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
s.send(8).unwrap();
s.send(9).unwrap();
});
})
.unwrap();
}
#[test]
fn recv_timeout() {
let (s, r) = bounded::<i32>(100);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
assert_eq!(r.recv_timeout(ms(1000)), Ok(7));
assert_eq!(
r.recv_timeout(ms(1000)),
Err(RecvTimeoutError::Disconnected)
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
});
})
.unwrap();
}
#[test]
fn try_send() {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.try_send(1), Ok(()));
assert_eq!(s.try_send(2), Err(TrySendError::Full(2)));
thread::sleep(ms(1500));
assert_eq!(s.try_send(3), Ok(()));
thread::sleep(ms(500));
assert_eq!(s.try_send(4), Err(TrySendError::Disconnected(4)));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
assert_eq!(r.try_recv(), Ok(1));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
assert_eq!(r.recv(), Ok(3));
});
})
.unwrap();
}
#[test]
fn send() {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(|_| {
s.send(7).unwrap();
thread::sleep(ms(1000));
s.send(8).unwrap();
thread::sleep(ms(1000));
s.send(9).unwrap();
thread::sleep(ms(1000));
s.send(10).unwrap();
});
scope.spawn(|_| {
thread::sleep(ms(1500));
assert_eq!(r.recv(), Ok(7));
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.recv(), Ok(9));
});
})
.unwrap();
}
#[test]
fn send_timeout() {
let (s, r) = bounded(2);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.send_timeout(1, ms(1000)), Ok(()));
assert_eq!(s.send_timeout(2, ms(1000)), Ok(()));
assert_eq!(
s.send_timeout(3, ms(500)),
Err(SendTimeoutError::Timeout(3))
);
thread::sleep(ms(1000));
assert_eq!(s.send_timeout(4, ms(1000)), Ok(()));
thread::sleep(ms(1000));
assert_eq!(s.send(5), Err(SendError(5)));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(1));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(2));
assert_eq!(r.recv(), Ok(4));
});
})
.unwrap();
}
#[test]
fn send_after_disconnect() {
let (s, r) = bounded(100);
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(r);
assert_eq!(s.send(4), Err(SendError(4)));
assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5)));
assert_eq!(
s.send_timeout(6, ms(500)),
Err(SendTimeoutError::Disconnected(6))
);
}
#[test]
fn recv_after_disconnect() {
let (s, r) = bounded(100);
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(s);
assert_eq!(r.recv(), Ok(1));
assert_eq!(r.recv(), Ok(2));
assert_eq!(r.recv(), Ok(3));
assert_eq!(r.recv(), Err(RecvError));
}
#[test]
fn len() {
const COUNT: usize = 25_000;
const CAP: usize = 1000;
let (s, r) = bounded(CAP);
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
for _ in 0..CAP / 10 {
for i in 0..50 {
s.send(i).unwrap();
assert_eq!(s.len(), i + 1);
}
for i in 0..50 {
r.recv().unwrap();
assert_eq!(r.len(), 50 - i - 1);
}
}
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
for i in 0..CAP {
s.send(i).unwrap();
assert_eq!(s.len(), i + 1);
}
for _ in 0..CAP {
r.recv().unwrap();
}
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
let len = r.len();
assert!(len <= CAP);
}
});
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
let len = s.len();
assert!(len <= CAP);
}
});
})
.unwrap();
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
}
#[test]
fn disconnect_wakes_sender() {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.send(()), Ok(()));
assert_eq!(s.send(()), Err(SendError(())));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(r);
});
})
.unwrap();
}
#[test]
fn disconnect_wakes_receiver() {
let (s, r) = bounded::<()>(1);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(s);
});
})
.unwrap();
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let (s, r) = bounded(3);
scope(|scope| {
scope.spawn(move |_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
}
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
})
.unwrap();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = bounded::<usize>(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = r.recv().unwrap();
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}
})
.unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn stress_oneshot() {
const COUNT: usize = 10_000;
for _ in 0..COUNT {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(|_| r.recv().unwrap());
scope.spawn(|_| s.send(0).unwrap());
})
.unwrap();
}
}
#[test]
fn stress_iter() {
const COUNT: usize = 100_000;
let (request_s, request_r) = bounded(1);
let (response_s, response_r) = bounded(1);
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == COUNT {
return;
}
}
request_s.send(()).unwrap();
}
});
for _ in request_r.iter() {
if response_s.send(1).is_err() {
break;
}
}
})
.unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 100;
let (s, r) = bounded(2);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(()) = s.send_timeout(i, ms(10)) {
break;
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(x) = r.recv_timeout(ms(10)) {
assert_eq!(x, i);
break;
}
}
}
});
})
.unwrap();
}
#[test]
fn drops() {
const RUNS: usize = 100;
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..RUNS {
let steps = rng.gen_range(0, 10_000);
let additional = rng.gen_range(0, 50);
DROPS.store(0, Ordering::SeqCst);
let (s, r) = bounded::<DropCounter>(50);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
r.recv().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..steps {
s.send(DropCounter).unwrap();
}
});
})
.unwrap();
for _ in 0..additional {
s.send(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(s);
drop(r);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn linearizable() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = bounded(THREADS);
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
s.send(0).unwrap();
r.try_recv().unwrap();
}
});
}
})
.unwrap();
}
#[test]
fn fairness() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded::<()>(COUNT);
let (s2, r2) = bounded::<()>(COUNT);
for _ in 0..COUNT {
s1.send(()).unwrap();
s2.send(()).unwrap();
}
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 10_000;
let (s, r) = bounded::<()>(COUNT);
for _ in 0..COUNT {
s.send(()).unwrap();
}
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn recv_in_send() {
let (s, _r) = bounded(1);
s.send(()).unwrap();
#[allow(unreachable_code)]
{
select! {
send(s, panic!()) -> _ => panic!(),
default => {}
}
}
let (s, r) = bounded(2);
s.send(()).unwrap();
select! {
send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {}
}
}
#[test]
fn channel_through_channel() {
const COUNT: usize = 1000;
type T = Box<dyn Any + Send>;
let (s, r) = bounded::<T>(1);
scope(|scope| {
scope.spawn(move |_| {
let mut s = s;
for _ in 0..COUNT {
let (new_s, new_r) = bounded(1);
let new_r: T = Box::new(Some(new_r));
s.send(new_r).unwrap();
s = new_s;
}
});
scope.spawn(move |_| {
let mut r = r;
for _ in 0..COUNT {
r = r
.recv()
.unwrap()
.downcast_mut::<Option<Receiver<T>>>()
.unwrap()
.take()
.unwrap()
}
});
})
.unwrap();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
//! Tests for iteration over receivers.
use crossbeam_channel::unbounded;
use crossbeam_utils::thread::scope;
#[test]
fn nested_recv_iter() {
let (s, r) = unbounded::<i32>();
let (total_s, total_r) = unbounded::<i32>();
scope(|scope| {
scope.spawn(move |_| {
let mut acc = 0;
for x in r.iter() {
acc += x;
}
total_s.send(acc).unwrap();
});
s.send(3).unwrap();
s.send(1).unwrap();
s.send(2).unwrap();
drop(s);
assert_eq!(total_r.recv().unwrap(), 6);
})
.unwrap();
}
#[test]
fn recv_iter_break() {
let (s, r) = unbounded::<i32>();
let (count_s, count_r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
for x in r.iter() {
if count >= 3 {
break;
} else {
count += x;
}
}
count_s.send(count).unwrap();
});
s.send(2).unwrap();
s.send(2).unwrap();
s.send(2).unwrap();
let _ = s.send(2);
drop(s);
assert_eq!(count_r.recv().unwrap(), 4);
})
.unwrap();
}
#[test]
fn recv_try_iter() {
let (request_s, request_r) = unbounded();
let (response_s, response_r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == 6 {
return;
}
}
request_s.send(()).unwrap();
}
});
for _ in request_r.iter() {
if response_s.send(2).is_err() {
break;
}
}
})
.unwrap();
}
#[test]
fn recv_into_iter_owned() {
let mut iter = {
let (s, r) = unbounded::<i32>();
s.send(1).unwrap();
s.send(2).unwrap();
r.into_iter()
};
assert_eq!(iter.next().unwrap(), 1);
assert_eq!(iter.next().unwrap(), 2);
assert_eq!(iter.next().is_none(), true);
}
#[test]
fn recv_into_iter_borrowed() {
let (s, r) = unbounded::<i32>();
s.send(1).unwrap();
s.send(2).unwrap();
drop(s);
let mut iter = (&r).into_iter();
assert_eq!(iter.next().unwrap(), 1);
assert_eq!(iter.next().unwrap(), 2);
assert_eq!(iter.next().is_none(), true);
}

View File

@ -0,0 +1,533 @@
//! Tests for the list channel flavor.
use std::any::Any;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crossbeam_channel::{select, unbounded, Receiver};
use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError};
use crossbeam_channel::{SendError, SendTimeoutError, TrySendError};
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
let (s, r) = unbounded();
s.try_send(7).unwrap();
assert_eq!(r.try_recv(), Ok(7));
s.send(8).unwrap();
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
}
#[test]
fn capacity() {
let (s, r) = unbounded::<()>();
assert_eq!(s.capacity(), None);
assert_eq!(r.capacity(), None);
}
#[test]
fn len_empty_full() {
let (s, r) = unbounded();
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
s.send(()).unwrap();
assert_eq!(s.len(), 1);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), false);
r.recv().unwrap();
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(1500));
assert_eq!(r.try_recv(), Ok(7));
thread::sleep(ms(500));
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
s.send(7).unwrap();
});
})
.unwrap();
}
#[test]
fn recv() {
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Ok(7));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(9));
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
s.send(8).unwrap();
s.send(9).unwrap();
});
})
.unwrap();
}
#[test]
fn recv_timeout() {
let (s, r) = unbounded::<i32>();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
assert_eq!(r.recv_timeout(ms(1000)), Ok(7));
assert_eq!(
r.recv_timeout(ms(1000)),
Err(RecvTimeoutError::Disconnected)
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
});
})
.unwrap();
}
#[test]
fn try_send() {
let (s, r) = unbounded();
for i in 0..1000 {
assert_eq!(s.try_send(i), Ok(()));
}
drop(r);
assert_eq!(s.try_send(777), Err(TrySendError::Disconnected(777)));
}
#[test]
fn send() {
let (s, r) = unbounded();
for i in 0..1000 {
assert_eq!(s.send(i), Ok(()));
}
drop(r);
assert_eq!(s.send(777), Err(SendError(777)));
}
#[test]
fn send_timeout() {
let (s, r) = unbounded();
for i in 0..1000 {
assert_eq!(s.send_timeout(i, ms(i as u64)), Ok(()));
}
drop(r);
assert_eq!(
s.send_timeout(777, ms(0)),
Err(SendTimeoutError::Disconnected(777))
);
}
#[test]
fn send_after_disconnect() {
let (s, r) = unbounded();
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(r);
assert_eq!(s.send(4), Err(SendError(4)));
assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5)));
assert_eq!(
s.send_timeout(6, ms(0)),
Err(SendTimeoutError::Disconnected(6))
);
}
#[test]
fn recv_after_disconnect() {
let (s, r) = unbounded();
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(s);
assert_eq!(r.recv(), Ok(1));
assert_eq!(r.recv(), Ok(2));
assert_eq!(r.recv(), Ok(3));
assert_eq!(r.recv(), Err(RecvError));
}
#[test]
fn len() {
let (s, r) = unbounded();
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
for i in 0..50 {
s.send(i).unwrap();
assert_eq!(s.len(), i + 1);
}
for i in 0..50 {
r.recv().unwrap();
assert_eq!(r.len(), 50 - i - 1);
}
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
}
#[test]
fn disconnect_wakes_receiver() {
let (s, r) = unbounded::<()>();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(s);
});
})
.unwrap();
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
}
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
})
.unwrap();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = unbounded::<usize>();
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = r.recv().unwrap();
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}
})
.unwrap();
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn stress_oneshot() {
const COUNT: usize = 10_000;
for _ in 0..COUNT {
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(|_| r.recv().unwrap());
scope.spawn(|_| s.send(0).unwrap());
})
.unwrap();
}
}
#[test]
fn stress_iter() {
const COUNT: usize = 100_000;
let (request_s, request_r) = unbounded();
let (response_s, response_r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == COUNT {
return;
}
}
request_s.send(()).unwrap();
}
});
for _ in request_r.iter() {
if response_s.send(1).is_err() {
break;
}
}
})
.unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 100;
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
s.send(i).unwrap();
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(x) = r.recv_timeout(ms(10)) {
assert_eq!(x, i);
break;
}
}
}
});
})
.unwrap();
}
#[test]
fn drops() {
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..100 {
let steps = rng.gen_range(0, 10_000);
let additional = rng.gen_range(0, 1000);
DROPS.store(0, Ordering::SeqCst);
let (s, r) = unbounded::<DropCounter>();
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
r.recv().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..steps {
s.send(DropCounter).unwrap();
}
});
})
.unwrap();
for _ in 0..additional {
s.try_send(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(s);
drop(r);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn linearizable() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = unbounded();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
s.send(0).unwrap();
r.try_recv().unwrap();
}
});
}
})
.unwrap();
}
#[test]
fn fairness() {
const COUNT: usize = 10_000;
let (s1, r1) = unbounded::<()>();
let (s2, r2) = unbounded::<()>();
for _ in 0..COUNT {
s1.send(()).unwrap();
s2.send(()).unwrap();
}
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 10_000;
let (s, r) = unbounded();
for _ in 0..COUNT {
s.send(()).unwrap();
}
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn recv_in_send() {
let (s, r) = unbounded();
s.send(()).unwrap();
select! {
send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {}
}
}
#[test]
fn channel_through_channel() {
const COUNT: usize = 1000;
type T = Box<dyn Any + Send>;
let (s, r) = unbounded::<T>();
scope(|scope| {
scope.spawn(move |_| {
let mut s = s;
for _ in 0..COUNT {
let (new_s, new_r) = unbounded();
let new_r: T = Box::new(Some(new_r));
s.send(new_r).unwrap();
s = new_s;
}
});
scope.spawn(move |_| {
let mut r = r;
for _ in 0..COUNT {
r = r
.recv()
.unwrap()
.downcast_mut::<Option<Receiver<T>>>()
.unwrap()
.take()
.unwrap()
}
});
})
.unwrap();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,95 @@
//! Tests for the never channel flavor.
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{never, select, tick, unbounded};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
select! {
recv(never::<i32>()) -> _ => panic!(),
default => {}
}
}
#[test]
fn optional() {
let (s, r) = unbounded::<i32>();
s.send(1).unwrap();
s.send(2).unwrap();
let mut r = Some(&r);
select! {
recv(r.unwrap_or(&never())) -> _ => {}
default => panic!(),
}
r = None;
select! {
recv(r.unwrap_or(&never())) -> _ => panic!(),
default => {}
}
}
#[test]
fn tick_n() {
let mut r = tick(ms(100));
let mut step = 0;
loop {
select! {
recv(r) -> _ => step += 1,
default(ms(500)) => break,
}
if step == 10 {
r = never();
}
}
assert_eq!(step, 10);
}
#[test]
fn capacity() {
let r = never::<i32>();
assert_eq!(r.capacity(), Some(0));
}
#[test]
fn len_empty_full() {
let r = never::<i32>();
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), true);
}
#[test]
fn try_recv() {
let r = never::<i32>();
assert!(r.try_recv().is_err());
thread::sleep(ms(100));
assert!(r.try_recv().is_err());
}
#[test]
fn recv_timeout() {
let start = Instant::now();
let r = never::<i32>();
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(100));
assert!(now - start <= ms(150));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(200));
assert!(now - start <= ms(250));
}

View File

@ -0,0 +1,834 @@
//! Tests for channel readiness using the `Select` struct.
use std::any::Any;
use std::cell::Cell;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{after, bounded, tick, unbounded};
use crossbeam_channel::{Receiver, Select, TryRecvError, TrySendError};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke1() {
let (s1, r1) = unbounded::<usize>();
let (s2, r2) = unbounded::<usize>();
s1.send(1).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert_eq!(sel.ready(), 0);
assert_eq!(r1.try_recv(), Ok(1));
s2.send(2).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert_eq!(sel.ready(), 1);
assert_eq!(r2.try_recv(), Ok(2));
}
#[test]
fn smoke2() {
let (_s1, r1) = unbounded::<i32>();
let (_s2, r2) = unbounded::<i32>();
let (_s3, r3) = unbounded::<i32>();
let (_s4, r4) = unbounded::<i32>();
let (s5, r5) = unbounded::<i32>();
s5.send(5).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&r3);
sel.recv(&r4);
sel.recv(&r5);
assert_eq!(sel.ready(), 4);
assert_eq!(r5.try_recv(), Ok(5));
}
#[test]
fn disconnected() {
let (s1, r1) = unbounded::<i32>();
let (s2, r2) = unbounded::<i32>();
scope(|scope| {
scope.spawn(|_| {
drop(s1);
thread::sleep(ms(500));
s2.send(5).unwrap();
});
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
r2.recv().unwrap();
})
.unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
drop(s2);
});
let mut sel = Select::new();
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r2.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
})
.unwrap();
}
#[test]
fn default() {
let (s1, r1) = unbounded::<i32>();
let (s2, r2) = unbounded::<i32>();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert!(sel.try_ready().is_err());
drop(s1);
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.try_ready() {
Ok(0) => assert!(r1.try_recv().is_err()),
_ => panic!(),
}
s2.send(2).unwrap();
let mut sel = Select::new();
sel.recv(&r2);
match sel.try_ready() {
Ok(0) => assert_eq!(r2.try_recv(), Ok(2)),
_ => panic!(),
}
let mut sel = Select::new();
sel.recv(&r2);
assert!(sel.try_ready().is_err());
let mut sel = Select::new();
assert!(sel.try_ready().is_err());
}
#[test]
fn timeout() {
let (_s1, r1) = unbounded::<i32>();
let (s2, r2) = unbounded::<i32>();
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(1500));
s2.send(2).unwrap();
});
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert!(sel.ready_timeout(ms(1000)).is_err());
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(1) => assert_eq!(r2.try_recv(), Ok(2)),
_ => panic!(),
}
})
.unwrap();
scope(|scope| {
let (s, r) = unbounded::<i32>();
scope.spawn(move |_| {
thread::sleep(ms(500));
drop(s);
});
let mut sel = Select::new();
assert!(sel.ready_timeout(ms(1000)).is_err());
let mut sel = Select::new();
sel.recv(&r);
match sel.try_ready() {
Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
})
.unwrap();
}
#[test]
fn default_when_disconnected() {
let (_, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.recv(&r);
match sel.try_ready() {
Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
let (_, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.recv(&r);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
let (s, _) = bounded::<i32>(0);
let mut sel = Select::new();
sel.send(&s);
match sel.try_ready() {
Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))),
_ => panic!(),
}
let (s, _) = bounded::<i32>(0);
let mut sel = Select::new();
sel.send(&s);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))),
_ => panic!(),
}
}
#[test]
fn default_only() {
let start = Instant::now();
let mut sel = Select::new();
assert!(sel.try_ready().is_err());
let now = Instant::now();
assert!(now - start <= ms(50));
let start = Instant::now();
let mut sel = Select::new();
assert!(sel.ready_timeout(ms(500)).is_err());
let now = Instant::now();
assert!(now - start >= ms(450));
assert!(now - start <= ms(550));
}
#[test]
fn unblocks() {
let (s1, r1) = bounded::<i32>(0);
let (s2, r2) = bounded::<i32>(0);
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
s2.send(2).unwrap();
});
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(1) => assert_eq!(r2.try_recv(), Ok(2)),
_ => panic!(),
}
})
.unwrap();
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
assert_eq!(r1.recv().unwrap(), 1);
});
let mut sel = Select::new();
let oper1 = sel.send(&s1);
let oper2 = sel.send(&s2);
let oper = sel.select_timeout(ms(1000));
match oper {
Err(_) => panic!(),
Ok(oper) => match oper.index() {
i if i == oper1 => oper.send(&s1, 1).unwrap(),
i if i == oper2 => panic!(),
_ => unreachable!(),
},
}
})
.unwrap();
}
#[test]
fn both_ready() {
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
s1.send(1).unwrap();
assert_eq!(r2.recv().unwrap(), 2);
});
for _ in 0..2 {
let mut sel = Select::new();
sel.recv(&r1);
sel.send(&s2);
match sel.ready() {
0 => assert_eq!(r1.try_recv(), Ok(1)),
1 => s2.try_send(2).unwrap(),
_ => panic!(),
}
}
})
.unwrap();
}
#[test]
fn cloning1() {
scope(|scope| {
let (s1, r1) = unbounded::<i32>();
let (_s2, r2) = unbounded::<i32>();
let (s3, r3) = unbounded::<()>();
scope.spawn(move |_| {
r3.recv().unwrap();
drop(s1.clone());
assert!(r3.try_recv().is_err());
s1.send(1).unwrap();
r3.recv().unwrap();
});
s3.send(()).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready() {
0 => drop(r1.try_recv()),
1 => drop(r2.try_recv()),
_ => panic!(),
}
s3.send(()).unwrap();
})
.unwrap();
}
#[test]
fn cloning2() {
let (s1, r1) = unbounded::<()>();
let (s2, r2) = unbounded::<()>();
let (_s3, _r3) = unbounded::<()>();
scope(|scope| {
scope.spawn(move |_| {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready() {
0 => panic!(),
1 => drop(r2.try_recv()),
_ => panic!(),
}
});
thread::sleep(ms(500));
drop(s1.clone());
s2.send(()).unwrap();
})
.unwrap();
}
#[test]
fn preflight1() {
let (s, r) = unbounded();
s.send(()).unwrap();
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => drop(r.try_recv()),
_ => panic!(),
}
}
#[test]
fn preflight2() {
let (s, r) = unbounded();
drop(s.clone());
s.send(()).unwrap();
drop(s);
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => assert_eq!(r.try_recv(), Ok(())),
_ => panic!(),
}
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
}
#[test]
fn preflight3() {
let (s, r) = unbounded();
drop(s.clone());
s.send(()).unwrap();
drop(s);
r.recv().unwrap();
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
}
#[test]
fn duplicate_operations() {
let (s, r) = unbounded::<i32>();
let hit = vec![Cell::new(false); 4];
while hit.iter().map(|h| h.get()).any(|hit| !hit) {
let mut sel = Select::new();
sel.recv(&r);
sel.recv(&r);
sel.send(&s);
sel.send(&s);
match sel.ready() {
0 => {
assert!(r.try_recv().is_ok());
hit[0].set(true);
}
1 => {
assert!(r.try_recv().is_ok());
hit[1].set(true);
}
2 => {
assert!(s.try_send(0).is_ok());
hit[2].set(true);
}
3 => {
assert!(s.try_send(0).is_ok());
hit[3].set(true);
}
_ => panic!(),
}
}
}
#[test]
fn nesting() {
let (s, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.send(&s);
match sel.ready() {
0 => {
assert!(s.try_send(0).is_ok());
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => {
assert_eq!(r.try_recv(), Ok(0));
let mut sel = Select::new();
sel.send(&s);
match sel.ready() {
0 => {
assert!(s.try_send(1).is_ok());
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => {
assert_eq!(r.try_recv(), Ok(1));
}
_ => panic!(),
}
}
_ => panic!(),
}
}
_ => panic!(),
}
}
_ => panic!(),
}
}
#[test]
fn stress_recv() {
const COUNT: usize = 10_000;
let (s1, r1) = unbounded();
let (s2, r2) = bounded(5);
let (s3, r3) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
s1.send(i).unwrap();
r3.recv().unwrap();
s2.send(i).unwrap();
r3.recv().unwrap();
}
});
for i in 0..COUNT {
for _ in 0..2 {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready() {
0 => assert_eq!(r1.try_recv(), Ok(i)),
1 => assert_eq!(r2.try_recv(), Ok(i)),
_ => panic!(),
}
s3.send(()).unwrap();
}
}
})
.unwrap();
}
#[test]
fn stress_send() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
let (s3, r3) = bounded(100);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
assert_eq!(r1.recv().unwrap(), i);
assert_eq!(r2.recv().unwrap(), i);
r3.recv().unwrap();
}
});
for i in 0..COUNT {
for _ in 0..2 {
let mut sel = Select::new();
sel.send(&s1);
sel.send(&s2);
match sel.ready() {
0 => assert!(s1.try_send(i).is_ok()),
1 => assert!(s2.try_send(i).is_ok()),
_ => panic!(),
}
}
s3.send(()).unwrap();
}
})
.unwrap();
}
#[test]
fn stress_mixed() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
let (s3, r3) = bounded(100);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
s1.send(i).unwrap();
assert_eq!(r2.recv().unwrap(), i);
r3.recv().unwrap();
}
});
for i in 0..COUNT {
for _ in 0..2 {
let mut sel = Select::new();
sel.recv(&r1);
sel.send(&s2);
match sel.ready() {
0 => assert_eq!(r1.try_recv(), Ok(i)),
1 => assert!(s2.try_send(i).is_ok()),
_ => panic!(),
}
}
s3.send(()).unwrap();
}
})
.unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 20;
let (s, r) = bounded(2);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(500));
}
let done = false;
while !done {
let mut sel = Select::new();
sel.send(&s);
match sel.ready_timeout(ms(100)) {
Err(_) => {}
Ok(0) => {
assert!(s.try_send(i).is_ok());
break;
}
Ok(_) => panic!(),
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(500));
}
let mut done = false;
while !done {
let mut sel = Select::new();
sel.recv(&r);
match sel.ready_timeout(ms(100)) {
Err(_) => {}
Ok(0) => {
assert_eq!(r.try_recv(), Ok(i));
done = true;
}
Ok(_) => panic!(),
}
}
}
});
})
.unwrap();
}
#[test]
fn send_recv_same_channel() {
let (s, r) = bounded::<i32>(0);
let mut sel = Select::new();
sel.send(&s);
sel.recv(&r);
assert!(sel.ready_timeout(ms(100)).is_err());
let (s, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.send(&s);
sel.recv(&r);
match sel.ready_timeout(ms(100)) {
Err(_) => panic!(),
Ok(0) => assert!(s.try_send(0).is_ok()),
Ok(_) => panic!(),
}
}
#[test]
fn channel_through_channel() {
const COUNT: usize = 1000;
type T = Box<dyn Any + Send>;
for cap in 1..4 {
let (s, r) = bounded::<T>(cap);
scope(|scope| {
scope.spawn(move |_| {
let mut s = s;
for _ in 0..COUNT {
let (new_s, new_r) = bounded(cap);
let new_r: T = Box::new(Some(new_r));
{
let mut sel = Select::new();
sel.send(&s);
match sel.ready() {
0 => assert!(s.try_send(new_r).is_ok()),
_ => panic!(),
}
}
s = new_s;
}
});
scope.spawn(move |_| {
let mut r = r;
for _ in 0..COUNT {
let new = {
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => r
.try_recv()
.unwrap()
.downcast_mut::<Option<Receiver<T>>>()
.unwrap()
.take()
.unwrap(),
_ => panic!(),
}
};
r = new;
}
});
})
.unwrap();
}
}
#[test]
fn fairness1() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded::<()>(COUNT);
let (s2, r2) = unbounded::<()>();
for _ in 0..COUNT {
s1.send(()).unwrap();
s2.send(()).unwrap();
}
let hits = vec![Cell::new(0usize); 4];
for _ in 0..COUNT {
let after = after(ms(0));
let tick = tick(ms(0));
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&after);
sel.recv(&tick);
match sel.ready() {
0 => {
r1.try_recv().unwrap();
hits[0].set(hits[0].get() + 1);
}
1 => {
r2.try_recv().unwrap();
hits[1].set(hits[1].get() + 1);
}
2 => {
after.try_recv().unwrap();
hits[2].set(hits[2].get() + 1);
}
3 => {
tick.try_recv().unwrap();
hits[3].set(hits[3].get() + 1);
}
_ => panic!(),
}
}
assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2));
}
#[test]
fn fairness2() {
const COUNT: usize = 100_000;
let (s1, r1) = unbounded::<()>();
let (s2, r2) = bounded::<()>(1);
let (s3, r3) = bounded::<()>(0);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..COUNT {
let mut sel = Select::new();
let mut oper1 = None;
let mut oper2 = None;
if s1.is_empty() {
oper1 = Some(sel.send(&s1));
}
if s2.is_empty() {
oper2 = Some(sel.send(&s2));
}
let oper3 = sel.send(&s3);
let oper = sel.select();
match oper.index() {
i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()),
i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()),
i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()),
_ => unreachable!(),
}
}
});
let hits = vec![Cell::new(0usize); 3];
for _ in 0..COUNT {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&r3);
loop {
match sel.ready() {
0 => {
if r1.try_recv().is_ok() {
hits[0].set(hits[0].get() + 1);
break;
}
}
1 => {
if r2.try_recv().is_ok() {
hits[1].set(hits[1].get() + 1);
break;
}
}
2 => {
if r3.try_recv().is_ok() {
hits[2].set(hits[2].get() + 1);
break;
}
}
_ => unreachable!(),
}
}
}
assert!(hits.iter().all(|x| x.get() > 0));
})
.unwrap();
}

View File

@ -0,0 +1,112 @@
use std::time::Duration;
use crossbeam_channel::{after, bounded, never, tick, unbounded};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn after_same_channel() {
let r = after(ms(50));
let r2 = r.clone();
assert!(r.same_channel(&r2));
let r3 = after(ms(50));
assert!(!r.same_channel(&r3));
assert!(!r2.same_channel(&r3));
let r4 = after(ms(100));
assert!(!r.same_channel(&r4));
assert!(!r2.same_channel(&r4));
}
#[test]
fn array_same_channel() {
let (s, r) = bounded::<usize>(1);
let s2 = s.clone();
assert!(s.same_channel(&s2));
let r2 = r.clone();
assert!(r.same_channel(&r2));
let (s3, r3) = bounded::<usize>(1);
assert!(!s.same_channel(&s3));
assert!(!s2.same_channel(&s3));
assert!(!r.same_channel(&r3));
assert!(!r2.same_channel(&r3));
}
#[test]
fn list_same_channel() {
let (s, r) = unbounded::<usize>();
let s2 = s.clone();
assert!(s.same_channel(&s2));
let r2 = r.clone();
assert!(r.same_channel(&r2));
let (s3, r3) = unbounded::<usize>();
assert!(!s.same_channel(&s3));
assert!(!s2.same_channel(&s3));
assert!(!r.same_channel(&r3));
assert!(!r2.same_channel(&r3));
}
#[test]
fn never_same_channel() {
let r = never::<usize>();
let r2 = r.clone();
assert!(r.same_channel(&r2));
// Never channel are always equal to one another.
let r3 = never::<usize>();
assert!(r.same_channel(&r3));
assert!(r2.same_channel(&r3));
}
#[test]
fn tick_same_channel() {
let r = tick(ms(50));
let r2 = r.clone();
assert!(r.same_channel(&r2));
let r3 = tick(ms(50));
assert!(!r.same_channel(&r3));
assert!(!r2.same_channel(&r3));
let r4 = tick(ms(100));
assert!(!r.same_channel(&r4));
assert!(!r2.same_channel(&r4));
}
#[test]
fn zero_same_channel() {
let (s, r) = bounded::<usize>(0);
let s2 = s.clone();
assert!(s.same_channel(&s2));
let r2 = r.clone();
assert!(r.same_channel(&r2));
let (s3, r3) = bounded::<usize>(0);
assert!(!s.same_channel(&s3));
assert!(!s2.same_channel(&s3));
assert!(!r.same_channel(&r3));
assert!(!r2.same_channel(&r3));
}
#[test]
fn different_flavors_same_channel() {
let (s1, r1) = bounded::<usize>(0);
let (s2, r2) = unbounded::<usize>();
assert!(!s1.same_channel(&s2));
assert!(!r1.same_channel(&r2));
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
//! Tests that make sure accessing thread-locals while exiting the thread doesn't cause panics.
use std::thread;
use std::time::Duration;
use crossbeam_channel::{select, unbounded};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
#[cfg_attr(target_os = "macos", ignore = "TLS is destroyed too early on macOS")]
fn use_while_exiting() {
struct Foo;
impl Drop for Foo {
fn drop(&mut self) {
// A blocking operation after the thread-locals have been dropped. This will attempt to
// use the thread-locals and must not panic.
let (_s, r) = unbounded::<()>();
select! {
recv(r) -> _ => {}
default(ms(100)) => {}
}
}
}
thread_local! {
static FOO: Foo = Foo;
}
let (s, r) = unbounded::<()>();
scope(|scope| {
scope.spawn(|_| {
// First initialize `FOO`, then the thread-locals related to crossbeam-channel.
FOO.with(|_| ());
r.recv().unwrap();
// At thread exit, thread-locals related to crossbeam-channel get dropped first and
// `FOO` is dropped last.
});
scope.spawn(|_| {
thread::sleep(ms(100));
s.send(()).unwrap();
});
})
.unwrap();
}

View File

@ -0,0 +1,348 @@
//! Tests for the tick channel flavor.
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{after, select, tick, Select, TryRecvError};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn fire() {
let start = Instant::now();
let r = tick(ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(100));
let fired = r.try_recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired >= ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
select! {
recv(r) -> _ => panic!(),
default => {}
}
select! {
recv(r) -> _ => {}
recv(tick(ms(200))) -> _ => panic!(),
}
}
#[test]
fn intervals() {
let start = Instant::now();
let r = tick(ms(50));
let t1 = r.recv().unwrap();
assert!(start + ms(50) <= t1);
assert!(start + ms(100) > t1);
thread::sleep(ms(300));
let t2 = r.try_recv().unwrap();
assert!(start + ms(100) <= t2);
assert!(start + ms(150) > t2);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
let t3 = r.recv().unwrap();
assert!(start + ms(400) <= t3);
assert!(start + ms(450) > t3);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn capacity() {
const COUNT: usize = 10;
for i in 0..COUNT {
let r = tick(ms(i as u64));
assert_eq!(r.capacity(), Some(1));
}
}
#[test]
fn len_empty_full() {
let r = tick(ms(50));
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
thread::sleep(ms(100));
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), true);
r.try_recv().unwrap();
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let r = tick(ms(200));
assert!(r.try_recv().is_err());
thread::sleep(ms(100));
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_ok());
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_ok());
assert!(r.try_recv().is_err());
}
#[test]
fn recv() {
let start = Instant::now();
let r = tick(ms(50));
let fired = r.recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired < fired - start);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn recv_timeout() {
let start = Instant::now();
let r = tick(ms(200));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(100));
assert!(now - start <= ms(150));
let fired = r.recv_timeout(ms(200)).unwrap();
assert!(fired - start >= ms(200));
assert!(fired - start <= ms(250));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(300));
assert!(now - start <= ms(350));
let fired = r.recv_timeout(ms(200)).unwrap();
assert!(fired - start >= ms(400));
assert!(fired - start <= ms(450));
}
#[test]
fn recv_two() {
let r1 = tick(ms(50));
let r2 = tick(ms(50));
scope(|scope| {
scope.spawn(|_| {
for _ in 0..10 {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
}
});
scope.spawn(|_| {
for _ in 0..10 {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
}
});
})
.unwrap();
}
#[test]
fn recv_race() {
select! {
recv(tick(ms(50))) -> _ => {}
recv(tick(ms(100))) -> _ => panic!(),
}
select! {
recv(tick(ms(100))) -> _ => panic!(),
recv(tick(ms(50))) -> _ => {}
}
}
#[test]
fn stress_default() {
const COUNT: usize = 10;
for _ in 0..COUNT {
select! {
recv(tick(ms(0))) -> _ => {}
default => panic!(),
}
}
for _ in 0..COUNT {
select! {
recv(tick(ms(100))) -> _ => panic!(),
default => {}
}
}
}
#[test]
fn select() {
const THREADS: usize = 4;
let hits = AtomicUsize::new(0);
let r1 = tick(ms(200));
let r2 = tick(ms(300));
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let timeout = after(ms(1100));
loop {
let mut sel = Select::new();
let oper1 = sel.recv(&r1);
let oper2 = sel.recv(&r2);
let oper3 = sel.recv(&timeout);
let oper = sel.select();
match oper.index() {
i if i == oper1 => {
oper.recv(&r1).unwrap();
hits.fetch_add(1, Ordering::SeqCst);
}
i if i == oper2 => {
oper.recv(&r2).unwrap();
hits.fetch_add(1, Ordering::SeqCst);
}
i if i == oper3 => {
oper.recv(&timeout).unwrap();
break;
}
_ => unreachable!(),
}
}
});
}
})
.unwrap();
assert_eq!(hits.load(Ordering::SeqCst), 8);
}
#[test]
fn ready() {
const THREADS: usize = 4;
let hits = AtomicUsize::new(0);
let r1 = tick(ms(200));
let r2 = tick(ms(300));
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let timeout = after(ms(1100));
'outer: loop {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&timeout);
loop {
match sel.ready() {
0 => {
if r1.try_recv().is_ok() {
hits.fetch_add(1, Ordering::SeqCst);
break;
}
}
1 => {
if r2.try_recv().is_ok() {
hits.fetch_add(1, Ordering::SeqCst);
break;
}
}
2 => {
if timeout.try_recv().is_ok() {
break 'outer;
}
}
_ => unreachable!(),
}
}
}
});
}
})
.unwrap();
assert_eq!(hits.load(Ordering::SeqCst), 8);
}
#[test]
fn fairness() {
const COUNT: usize = 30;
for &dur in &[0, 1] {
let mut hits = [0usize; 2];
for _ in 0..COUNT {
let r1 = tick(ms(dur));
let r2 = tick(ms(dur));
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 30;
for &dur in &[0, 1] {
let mut hits = [0usize; 5];
for _ in 0..COUNT {
let r = tick(ms(dur));
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}

View File

@ -0,0 +1,554 @@
//! Tests for the zero channel flavor.
use std::any::Any;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crossbeam_channel::{bounded, select, Receiver};
use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError};
use crossbeam_channel::{SendError, SendTimeoutError, TrySendError};
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
let (s, r) = bounded(0);
assert_eq!(s.try_send(7), Err(TrySendError::Full(7)));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn capacity() {
let (s, r) = bounded::<()>(0);
assert_eq!(s.capacity(), Some(0));
assert_eq!(r.capacity(), Some(0));
}
#[test]
fn len_empty_full() {
let (s, r) = bounded(0);
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), true);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), true);
scope(|scope| {
scope.spawn(|_| s.send(0).unwrap());
scope.spawn(|_| r.recv().unwrap());
})
.unwrap();
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), true);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), true);
}
#[test]
fn try_recv() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(1500));
assert_eq!(r.try_recv(), Ok(7));
thread::sleep(ms(500));
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
s.send(7).unwrap();
});
})
.unwrap();
}
#[test]
fn recv() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Ok(7));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(9));
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
s.send(8).unwrap();
s.send(9).unwrap();
});
})
.unwrap();
}
#[test]
fn recv_timeout() {
let (s, r) = bounded::<i32>(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
assert_eq!(r.recv_timeout(ms(1000)), Ok(7));
assert_eq!(
r.recv_timeout(ms(1000)),
Err(RecvTimeoutError::Disconnected)
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
});
})
.unwrap();
}
#[test]
fn try_send() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.try_send(7), Err(TrySendError::Full(7)));
thread::sleep(ms(1500));
assert_eq!(s.try_send(8), Ok(()));
thread::sleep(ms(500));
assert_eq!(s.try_send(9), Err(TrySendError::Disconnected(9)));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
});
})
.unwrap();
}
#[test]
fn send() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
s.send(7).unwrap();
thread::sleep(ms(1000));
s.send(8).unwrap();
thread::sleep(ms(1000));
s.send(9).unwrap();
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
assert_eq!(r.recv(), Ok(7));
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.recv(), Ok(9));
});
})
.unwrap();
}
#[test]
fn send_timeout() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(
s.send_timeout(7, ms(1000)),
Err(SendTimeoutError::Timeout(7))
);
assert_eq!(s.send_timeout(8, ms(1000)), Ok(()));
assert_eq!(
s.send_timeout(9, ms(1000)),
Err(SendTimeoutError::Disconnected(9))
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
assert_eq!(r.recv(), Ok(8));
});
})
.unwrap();
}
#[test]
fn len() {
const COUNT: usize = 25_000;
let (s, r) = bounded(0);
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
assert_eq!(r.len(), 0);
}
});
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
assert_eq!(s.len(), 0);
}
});
})
.unwrap();
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
}
#[test]
fn disconnect_wakes_sender() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.send(()), Err(SendError(())));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(r);
});
})
.unwrap();
}
#[test]
fn disconnect_wakes_receiver() {
let (s, r) = bounded::<()>(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(s);
});
})
.unwrap();
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
}
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
})
.unwrap();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = bounded::<usize>(0);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = r.recv().unwrap();
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}
})
.unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn stress_oneshot() {
const COUNT: usize = 10_000;
for _ in 0..COUNT {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(|_| r.recv().unwrap());
scope.spawn(|_| s.send(0).unwrap());
})
.unwrap();
}
}
#[test]
fn stress_iter() {
const COUNT: usize = 1000;
let (request_s, request_r) = bounded(0);
let (response_s, response_r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == COUNT {
return;
}
}
let _ = request_s.try_send(());
}
});
for _ in request_r.iter() {
if response_s.send(1).is_err() {
break;
}
}
})
.unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 100;
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(()) = s.send_timeout(i, ms(10)) {
break;
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(x) = r.recv_timeout(ms(10)) {
assert_eq!(x, i);
break;
}
}
}
});
})
.unwrap();
}
#[test]
fn drops() {
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..100 {
let steps = rng.gen_range(0, 3_000);
DROPS.store(0, Ordering::SeqCst);
let (s, r) = bounded::<DropCounter>(0);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
r.recv().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..steps {
s.send(DropCounter).unwrap();
}
});
})
.unwrap();
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(s);
drop(r);
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
}
}
#[test]
fn fairness() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded::<()>(0);
let (s2, r2) = bounded::<()>(0);
scope(|scope| {
scope.spawn(|_| {
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
});
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
send(s1, ()) -> _ => hits[0] += 1,
send(s2, ()) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
})
.unwrap();
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 10_000;
let (s, r) = bounded::<()>(0);
scope(|scope| {
scope.spawn(|_| {
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
});
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
send(s, ()) -> _ => hits[0] += 1,
send(s, ()) -> _ => hits[1] += 1,
send(s, ()) -> _ => hits[2] += 1,
send(s, ()) -> _ => hits[3] += 1,
send(s, ()) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
})
.unwrap();
}
#[test]
fn recv_in_send() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(100));
r.recv()
});
scope.spawn(|_| {
thread::sleep(ms(500));
s.send(()).unwrap();
});
select! {
send(s, r.recv().unwrap()) -> _ => {}
}
})
.unwrap();
}
#[test]
fn channel_through_channel() {
const COUNT: usize = 1000;
type T = Box<dyn Any + Send>;
let (s, r) = bounded::<T>(0);
scope(|scope| {
scope.spawn(move |_| {
let mut s = s;
for _ in 0..COUNT {
let (new_s, new_r) = bounded(0);
let new_r: T = Box::new(Some(new_r));
s.send(new_r).unwrap();
s = new_s;
}
});
scope.spawn(move |_| {
let mut r = r;
for _ in 0..COUNT {
r = r
.recv()
.unwrap()
.downcast_mut::<Option<Receiver<T>>>()
.unwrap()
.take()
.unwrap()
}
});
})
.unwrap();
}