Bug 1670261 - Cargo update & mach vendor. r=chutten

Differential Revision: https://phabricator.services.mozilla.com/D121085
This commit is contained in:
Jan-Erik Rediger 2021-08-10 07:49:07 +00:00
parent 1dc3abde95
commit a8c5939ae3
114 changed files with 10002 additions and 126 deletions

67
Cargo.lock generated
View File

@ -29,6 +29,24 @@ dependencies = [
"memchr",
]
[[package]]
name = "android_log-sys"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85965b6739a430150bdd138e2374a98af0c3ee0d030b3bb7fc3bddff58d0102e"
[[package]]
name = "android_logger"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9ed09b18365ed295d722d0b5ed59c01b79a826ff2d2a8f73d5ecca8e6fb2f66"
dependencies = [
"android_log-sys",
"env_logger",
"lazy_static",
"log",
]
[[package]]
name = "anyhow"
version = "1.0.41"
@ -1091,6 +1109,16 @@ dependencies = [
"syn",
]
[[package]]
name = "dashmap"
version = "4.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c"
dependencies = [
"cfg-if 1.0.0",
"num_cpus",
]
[[package]]
name = "data-encoding"
version = "2.3.2"
@ -1563,6 +1591,7 @@ dependencies = [
"cstr",
"fog",
"glean",
"glean-ffi",
"log",
"nserror",
"nsstring",
@ -2074,9 +2103,9 @@ dependencies = [
[[package]]
name = "glean"
version = "39.0.0"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8e24825d3123194a212e3daf8ddac150713cec4f1e126dc3c12ba207f0b5d77"
checksum = "4467acdfed9d396d5c8c1f47c658c6781476cfbb2e73a3c985e40204a9f4d350"
dependencies = [
"chrono",
"crossbeam-channel 0.5.1",
@ -2094,9 +2123,9 @@ dependencies = [
[[package]]
name = "glean-core"
version = "39.0.0"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1a31d189a28ffde9bfcc7ab26586ee2b81705f0b95bf7a090b67fab1f378782"
checksum = "a78e1fedc285fbf042f8f4f48281799c87f7715ad1ad9dabbfb57b09094cbaf5"
dependencies = [
"bincode",
"chrono",
@ -2112,6 +2141,25 @@ dependencies = [
"zeitstempel",
]
[[package]]
name = "glean-ffi"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0da7ce4e6115c834a8da23c9978e007c047284eb136689d76538ecb0ef6ff69c"
dependencies = [
"android_logger",
"env_logger",
"ffi-support",
"glean-core",
"libc",
"log",
"once_cell",
"oslog",
"serde",
"serde_json",
"uuid",
]
[[package]]
name = "glob"
version = "0.3.0"
@ -3621,6 +3669,17 @@ dependencies = [
"winapi",
]
[[package]]
name = "oslog"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8343ce955f18e7e68c0207dd0ea776ec453035685395ababd2ea651c569728b3"
dependencies = [
"cc",
"dashmap",
"log",
]
[[package]]
name = "ouroboros"
version = "0.9.2"

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"0e375933b97d6192182aeb688009d3bd6cebef415d150e8219c0561202367b48","LICENSE-APACHE":"4d4c32b31308f5a992434c2cf948205852bb2c7bb85cea4c1ab051f41a3eefb3","LICENSE-MIT":"bb3c0c388d2e5efc777ee1a7bc4671188447d5fbbad130aecac9fd52e0010b76","README.md":"56808f9f272c6fad922f23033591464c1403bb5d1f716ee224b6933b90d62e86","src/lib.rs":"f851176600c207aece048273a283398143c50a08eb732772468080683cbd39f1"},"package":"85965b6739a430150bdd138e2374a98af0c3ee0d030b3bb7fc3bddff58d0102e"}

View File

@ -0,0 +1,26 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "android_log-sys"
version = "0.2.0"
authors = ["Nerijus Arlauskas <nercury@gmail.com>"]
description = "FFI bindings to Android log Library.\n"
documentation = "https://docs.rs/android_log-sys"
readme = "README.md"
keywords = ["ffi", "android", "log"]
categories = ["external-ffi-bindings"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/nercury/android_log-sys-rs"
[lib]
name = "android_log_sys"

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 The android_log_sys Developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,19 @@
Copyright (c) 2016 The android_log_sys Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,17 @@
# Bindings to Android log Library
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@ -0,0 +1,88 @@
// Copyright 2016 The android_log_sys Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::os::raw;
#[allow(non_camel_case_types)]
pub type c_va_list = raw::c_void;
#[allow(non_camel_case_types)]
pub type c_int = raw::c_int;
#[allow(non_camel_case_types)]
pub type c_char = raw::c_char;
// automatically generated by rust-bindgen
#[derive(Clone, Copy)]
#[repr(isize)]
pub enum LogPriority {
UNKNOWN = 0,
DEFAULT = 1,
VERBOSE = 2,
DEBUG = 3,
INFO = 4,
WARN = 5,
ERROR = 6,
FATAL = 7,
SILENT = 8,
}
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
#[non_exhaustive]
#[repr(i32)]
pub enum log_id_t {
MAIN = 0,
RADIO = 1,
EVENTS = 2,
SYSTEM = 3,
CRASH = 4,
STATS = 5,
SECURITY = 6,
KERNEL = 7,
MAX = 8,
DEFAULT = 0x7FFFFFFF,
}
#[allow(non_camel_case_types)]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct __android_log_message {
pub struct_size: usize,
pub buffer_id: i32,
pub priority: i32,
pub tag: *const c_char,
pub file: *const c_char,
pub line: u32,
pub message: *const c_char,
}
#[link(name = "log")]
extern "C" {
pub fn __android_log_write(prio: c_int,
tag: *const c_char,
text: *const c_char)
-> c_int;
pub fn __android_log_print(prio: c_int,
tag: *const c_char,
fmt: *const c_char,
...)
-> c_int;
pub fn __android_log_vprint(prio: c_int,
tag: *const c_char,
fmt: *const c_char,
ap: *mut c_va_list)
-> c_int;
pub fn __android_log_assert(cond: *const c_char,
tag: *const c_char,
fmt: *const c_char,
...);
pub fn __android_log_is_loggable(prio: c_int,
tag: *const c_char,
default_prio: c_int)
-> c_int;
pub fn __android_log_write_log_message(log_message: *mut __android_log_message);
}

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"632df8824223ca0e23352f6b0d103ac994fab57dc51f33c2e6d41c91256e1c4d","LICENSE-APACHE":"99938c5864dd33decb62ab20fd883a9b00181d768ae887a4f19b2d0015c41dc9","LICENSE-MIT":"35043211d1b7be8f7e3f9cad27d981f2189ba9a39d9527b275b3c9740298dfe2","README.md":"7a4f75e61fc014f4dbb907fa947e1983f45993dc2a85104cdb619c0808433f65","src/lib.rs":"bdbd60c12117123c2554b1984949dfbc403d890318a0de637829592f4359de6d","tests/config_log_level.rs":"8aae2c7decbcf12a2a454486c9d4dd4a82a20e01d327c4abf4e9cfded973159d","tests/default_init.rs":"ef18c9ea38687a178623c11acfa3d34d16b9030eaad337ab9ed6a609a2c42ca2","tests/multiple_init.rs":"a6ed4986a758b7b2322c6ad0a18ec99fd06521a6c8767a6622eab2cbf9be601e"},"package":"d9ed09b18365ed295d722d0b5ed59c01b79a826ff2d2a8f73d5ecca8e6fb2f66"}

View File

@ -0,0 +1,40 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "android_logger"
version = "0.10.1"
authors = ["The android_logger Developers"]
description = "A logging implementation for `log` which hooks to android log output.\n"
readme = "README.md"
keywords = ["android", "bindings", "log", "logger"]
categories = ["api-bindings"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/Nercury/android_logger-rs"
[dependencies.android_log-sys]
version = "0.2"
[dependencies.env_logger]
version = "0.8"
default-features = false
[dependencies.lazy_static]
version = "1.4"
[dependencies.log]
version = "0.4"
[features]
default = ["regex"]
regex = ["env_logger/regex"]
[badges.travis-ci]
repository = "Nercury/android_logger-rs"

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 The android_logger Developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,19 @@
Copyright (c) 2016 The android_logger Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,81 @@
## Send Rust logs to Logcat
[![Version](https://img.shields.io/crates/v/android_logger.svg)](https://crates.io/crates/android_logger)
[![CI status](https://github.com/Nercury/android_logger-rs/actions/workflows/ci.yml/badge.svg)](https://github.com/Nercury/android_logger-rs/actions/workflows/ci.yml/)
This library is a drop-in replacement for `env_logger`. Instead, it outputs messages to
android's logcat.
This only works on Android and requires linking to `log` which
is only available under android. With Cargo, it is possible to conditionally require
this library:
```toml
[target.'cfg(target_os = "android")'.dependencies]
android_logger = "0.10"
```
Example of initialization on activity creation, with log configuration:
```rust
#[macro_use] extern crate log;
extern crate android_logger;
use log::Level;
use android_logger::{Config,FilterBuilder};
fn native_activity_create() {
android_logger::init_once(
Config::default()
.with_min_level(Level::Trace) // limit log level
.with_tag("mytag") // logs will show under mytag tag
.with_filter( // configure messages for specific crate
FilterBuilder::new()
.parse("debug,hello::crate=error")
.build())
);
trace!("this is a verbose {}", "message");
error!("this is printed by default");
}
```
To allow all logs, use the default configuration with min level Trace:
```rust
#[macro_use] extern crate log;
extern crate android_logger;
use log::Level;
use android_logger::Config;
fn native_activity_create() {
android_logger::init_once(
Config::default().with_min_level(Level::Trace));
}
```
There is a caveat that this library can only be initialized once
(hence the `init_once` function name). However, Android native activity can be
re-created every time the screen is rotated, resulting in multiple initialization calls.
Therefore this library will only log a warning for subsequent `init_once` calls.
This library ensures that logged messages do not overflow Android log message limits
by efficiently splitting messages into chunks.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@ -0,0 +1,635 @@
// Copyright 2016 The android_logger Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A logger which writes to android output.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Config;
//!
//! /// Android code may not have obvious "main", this is just an example.
//! fn main() {
//! android_logger::init_once(
//! Config::default().with_min_level(Level::Trace),
//! );
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//! }
//! ```
//!
//! ## Example with module path filter
//!
//! It is possible to limit log messages to output from a specific crate,
//! and override the logcat tag name (by default, the crate name is used):
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::{Config,FilterBuilder};
//!
//! fn main() {
//! android_logger::init_once(
//! Config::default()
//! .with_min_level(Level::Trace)
//! .with_tag("mytag")
//! .with_filter(FilterBuilder::new().parse("debug,hello::crate=trace").build()),
//! );
//!
//! // ..
//! }
//! ```
//!
//! ## Example with a custom log formatter
//!
//! ```
//! use android_logger::Config;
//!
//! android_logger::init_once(
//! Config::default()
//! .with_min_level(log::Level::Trace)
//! .format(|f, record| write!(f, "my_app: {}", record.args()))
//! )
//! ```
#[cfg(target_os = "android")]
extern crate android_log_sys as log_ffi;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate env_logger;
use std::sync::RwLock;
#[cfg(target_os = "android")]
use log_ffi::LogPriority;
use log::{Level, Log, Metadata, Record};
use std::ffi::{CStr, CString};
use std::mem;
use std::fmt;
use std::ptr;
pub use env_logger::filter::{Filter, Builder as FilterBuilder};
pub use env_logger::fmt::Formatter;
pub(crate) type FormatFn = Box<dyn Fn(&mut dyn fmt::Write, &Record) -> fmt::Result + Sync + Send>;
/// Output log to android system.
#[cfg(target_os = "android")]
fn android_log(prio: log_ffi::LogPriority, tag: &CStr, msg: &CStr) {
unsafe {
log_ffi::__android_log_write(
prio as log_ffi::c_int,
tag.as_ptr() as *const log_ffi::c_char,
msg.as_ptr() as *const log_ffi::c_char,
)
};
}
/// Dummy output placeholder for tests.
#[cfg(not(target_os = "android"))]
fn android_log(_priority: Level, _tag: &CStr, _msg: &CStr) {}
/// Underlying android logger backend
pub struct AndroidLogger {
config: RwLock<Config>,
}
impl AndroidLogger {
/// Create new logger instance from config
pub fn new(config: Config) -> AndroidLogger {
AndroidLogger {
config: RwLock::new(config),
}
}
}
lazy_static! {
static ref ANDROID_LOGGER: AndroidLogger = AndroidLogger::default();
}
const LOGGING_TAG_MAX_LEN: usize = 23;
const LOGGING_MSG_MAX_LEN: usize = 4000;
impl Default for AndroidLogger {
/// Create a new logger with default config
fn default() -> AndroidLogger {
AndroidLogger {
config: RwLock::new(Config::default()),
}
}
}
impl Log for AndroidLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
let config = self.config
.read()
.expect("failed to acquire android_log filter lock for read");
if !config.filter_matches(record) {
return;
}
// tag must not exceed LOGGING_TAG_MAX_LEN
#[allow(deprecated)] // created an issue #35 for this
let mut tag_bytes: [u8; LOGGING_TAG_MAX_LEN + 1] = unsafe { mem::uninitialized() };
let module_path = record.module_path().unwrap_or_default().to_owned();
// If no tag was specified, use module name
let custom_tag = &config.tag;
let tag = custom_tag.as_ref().map(|s| s.as_bytes()).unwrap_or(module_path.as_bytes());
// truncate the tag here to fit into LOGGING_TAG_MAX_LEN
self.fill_tag_bytes(&mut tag_bytes, tag);
// use stack array as C string
let tag: &CStr = unsafe { CStr::from_ptr(mem::transmute(tag_bytes.as_ptr())) };
// message must not exceed LOGGING_MSG_MAX_LEN
// therefore split log message into multiple log calls
let mut writer = PlatformLogWriter::new(record.level(), tag);
// If a custom tag is used, add the module path to the message.
// Use PlatformLogWriter to output chunks if they exceed max size.
let _ = match (custom_tag, &config.custom_format) {
(_, Some(format)) => format(&mut writer, record),
(Some(_), _) => fmt::write(
&mut writer,
format_args!("{}: {}", module_path, *record.args()),
),
_ => fmt::write(&mut writer, *record.args()),
};
// output the remaining message (this would usually be the most common case)
writer.flush();
}
fn flush(&self) {}
}
impl AndroidLogger {
fn fill_tag_bytes(&self, array: &mut [u8], tag: &[u8]) {
if tag.len() > LOGGING_TAG_MAX_LEN {
for (input, output) in tag.iter()
.take(LOGGING_TAG_MAX_LEN - 2)
.chain(b"..\0".iter())
.zip(array.iter_mut())
{
*output = *input;
}
} else {
for (input, output) in tag.iter()
.chain(b"\0".iter())
.zip(array.iter_mut())
{
*output = *input;
}
}
}
}
/// Filter for android logger.
pub struct Config {
log_level: Option<Level>,
filter: Option<env_logger::filter::Filter>,
tag: Option<CString>,
custom_format: Option<FormatFn>,
}
impl Default for Config {
fn default() -> Self {
Config {
log_level: None,
filter: None,
tag: None,
custom_format: None,
}
}
}
impl Config {
/// Change the minimum log level.
///
/// All values above the set level are logged. For example, if
/// `Warn` is set, the `Error` is logged too, but `Info` isn't.
pub fn with_min_level(mut self, level: Level) -> Self {
self.log_level = Some(level);
self
}
fn filter_matches(&self, record: &Record) -> bool {
if let Some(ref filter) = self.filter {
filter.matches(&record)
} else {
true
}
}
pub fn with_filter(mut self, filter: env_logger::filter::Filter) -> Self {
self.filter = Some(filter);
self
}
pub fn with_tag<S: Into<Vec<u8>>>(mut self, tag: S) -> Self {
self.tag = Some(CString::new(tag).expect("Can't convert tag to CString"));
self
}
/// Sets the format function for formatting the log output.
/// ```
/// # use android_logger::Config;
/// android_logger::init_once(
/// Config::default()
/// .with_min_level(log::Level::Trace)
/// .format(|f, record| write!(f, "my_app: {}", record.args()))
/// )
/// ```
pub fn format<F>(mut self, format: F) -> Self
where
F: Fn(&mut dyn fmt::Write, &Record) -> fmt::Result + Sync + Send + 'static,
{
self.custom_format = Some(Box::new(format));
self
}
}
struct PlatformLogWriter<'a> {
#[cfg(target_os = "android")] priority: LogPriority,
#[cfg(not(target_os = "android"))] priority: Level,
len: usize,
last_newline_index: usize,
tag: &'a CStr,
buffer: [u8; LOGGING_MSG_MAX_LEN + 1],
}
impl<'a> PlatformLogWriter<'a> {
#[cfg(target_os = "android")]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
#[allow(deprecated)] // created an issue #35 for this
PlatformLogWriter {
priority: match level {
Level::Warn => LogPriority::WARN,
Level::Info => LogPriority::INFO,
Level::Debug => LogPriority::DEBUG,
Level::Error => LogPriority::ERROR,
Level::Trace => LogPriority::VERBOSE,
},
len: 0,
last_newline_index: 0,
tag,
buffer: unsafe { mem::uninitialized() },
}
}
#[cfg(not(target_os = "android"))]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
#[allow(deprecated)] // created an issue #35 for this
PlatformLogWriter {
priority: level,
len: 0,
last_newline_index: 0,
tag,
buffer: unsafe { mem::uninitialized() },
}
}
/// Flush some bytes to android logger.
///
/// If there is a newline, flush up to it.
/// If ther was no newline, flush all.
///
/// Not guaranteed to flush everything.
fn temporal_flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
if self.last_newline_index > 0 {
let copy_from_index = self.last_newline_index;
let remaining_chunk_len = total_len - copy_from_index;
self.output_specified_len(copy_from_index);
self.copy_bytes_to_start(copy_from_index, remaining_chunk_len);
self.len = remaining_chunk_len;
} else {
self.output_specified_len(total_len);
self.len = 0;
}
self.last_newline_index = 0;
}
/// Flush everything remaining to android logger.
fn flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
self.output_specified_len(total_len);
self.len = 0;
self.last_newline_index = 0;
}
/// Output buffer up until the \0 which will be placed at `len` position.
fn output_specified_len(&mut self, len: usize) {
let mut last_byte: u8 = b'\0';
mem::swap(&mut last_byte, unsafe {
self.buffer.get_unchecked_mut(len)
});
let msg: &CStr = unsafe { CStr::from_ptr(mem::transmute(self.buffer.as_ptr())) };
android_log(self.priority, self.tag, msg);
*unsafe { self.buffer.get_unchecked_mut(len) } = last_byte;
}
/// Copy `len` bytes from `index` position to starting position.
fn copy_bytes_to_start(&mut self, index: usize, len: usize) {
let src = unsafe { self.buffer.as_ptr().offset(index as isize) };
let dst = self.buffer.as_mut_ptr();
unsafe { ptr::copy(src, dst, len) };
}
}
impl<'a> fmt::Write for PlatformLogWriter<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut incomming_bytes = s.as_bytes();
while !incomming_bytes.is_empty() {
let len = self.len;
// write everything possible to buffer and mark last \n
let new_len = len + incomming_bytes.len();
let last_newline = self.buffer[len..LOGGING_MSG_MAX_LEN]
.iter_mut()
.zip(incomming_bytes)
.enumerate()
.fold(None, |acc, (i, (output, input))| {
*output = *input;
if *input == b'\n' {
Some(i)
} else {
acc
}
});
// update last \n index
if let Some(newline) = last_newline {
self.last_newline_index = len + newline;
}
// calculate how many bytes were written
let written_len = if new_len <= LOGGING_MSG_MAX_LEN {
// if the len was not exceeded
self.len = new_len;
new_len - len // written len
} else {
// if new length was exceeded
self.len = LOGGING_MSG_MAX_LEN;
self.temporal_flush();
LOGGING_MSG_MAX_LEN - len // written len
};
incomming_bytes = &incomming_bytes[written_len..];
}
Ok(())
}
}
/// Send a log record to Android logging backend.
///
/// This action does not require initialization. However, without initialization it
/// will use the default filter, which allows all logs.
pub fn log(record: &Record) {
ANDROID_LOGGER.log(record)
}
/// Initializes the global logger with an android logger.
///
/// This can be called many times, but will only initialize logging once,
/// and will not replace any other previously initialized logger.
///
/// It is ok to call this at the activity creation, and it will be
/// repeatedly called on every lifecycle restart (i.e. screen rotation).
pub fn init_once(config: Config) {
if let Err(err) = log::set_logger(&*ANDROID_LOGGER) {
debug!("android_logger: log::set_logger failed: {}", err);
} else {
if let Some(level) = config.log_level {
log::set_max_level(level.to_level_filter());
}
*ANDROID_LOGGER
.config
.write()
.expect("failed to acquire android_log filter lock for write") = config;
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fmt::Write;
use std::sync::atomic::{AtomicBool, Ordering};
#[test]
fn check_config_values() {
// Filter is checked in config_filter_match below.
let config = Config::default()
.with_min_level(Level::Trace)
.with_tag("my_app");
assert_eq!(config.log_level, Some(Level::Trace));
assert_eq!(config.tag, Some(CString::new("my_app").unwrap()));
}
#[test]
fn log_calls_formatter() {
static FORMAT_FN_WAS_CALLED: AtomicBool = AtomicBool::new(false);
let config = Config::default()
.with_min_level(Level::Info)
.format(|_, _| {
FORMAT_FN_WAS_CALLED.store(true, Ordering::SeqCst);
Ok(())
});
let logger = AndroidLogger::new(config);
logger.log(&Record::builder().level(Level::Info).build());
assert!(FORMAT_FN_WAS_CALLED.load(Ordering::SeqCst));
}
#[test]
fn logger_always_enabled() {
let logger = AndroidLogger::new(Config::default());
assert!(logger.enabled(&log::MetadataBuilder::new().build()));
}
// Test whether the filter gets called correctly. Not meant to be exhaustive for all filter
// options, as these are handled directly by the filter itself.
#[test]
fn config_filter_match() {
let info_record = Record::builder().level(Level::Info).build();
let debug_record = Record::builder().level(Level::Debug).build();
let info_all_filter = env_logger::filter::Builder::new().parse("info").build();
let info_all_config = Config::default().with_filter(info_all_filter);
assert!(info_all_config.filter_matches(&info_record));
assert!(!info_all_config.filter_matches(&debug_record));
}
#[test]
fn fill_tag_bytes_truncates_long_tag() {
let logger = AndroidLogger::new(Config::default());
let too_long_tag: [u8; LOGGING_TAG_MAX_LEN + 20] = [b'a'; LOGGING_TAG_MAX_LEN + 20];
let mut result: [u8; LOGGING_TAG_MAX_LEN + 1] = Default::default();
logger.fill_tag_bytes(&mut result, &too_long_tag);
let mut expected_result = [b'a'; LOGGING_TAG_MAX_LEN - 2].to_vec();
expected_result.extend("..\0".as_bytes());
assert_eq!(result.to_vec(), expected_result);
}
#[test]
fn fill_tag_bytes_keeps_short_tag() {
let logger = AndroidLogger::new(Config::default());
let short_tag: [u8; 3] = [b'a'; 3];
let mut result: [u8; LOGGING_TAG_MAX_LEN + 1] = Default::default();
logger.fill_tag_bytes(&mut result, &short_tag);
let mut expected_result = short_tag.to_vec();
expected_result.push(0);
assert_eq!(result.to_vec()[..4], expected_result);
}
#[test]
fn platform_log_writer_init_values() {
let tag = CStr::from_bytes_with_nul(b"tag\0").unwrap();
let writer = PlatformLogWriter::new(Level::Warn, &tag);
assert_eq!(writer.tag, tag);
// Android uses LogPriority instead, which doesn't implement equality checks
#[cfg(not(target_os = "android"))]
assert_eq!(writer.priority, Level::Warn);
}
#[test]
fn temporal_flush() {
let mut writer = get_tag_writer();
writer
.write_str("12\n\n567\n90")
.expect("Unable to write to PlatformLogWriter");
assert_eq!(writer.len, 10);
writer.temporal_flush();
// Should have flushed up until the last newline.
assert_eq!(writer.len, 3);
assert_eq!(writer.last_newline_index, 0);
assert_eq!(&writer.buffer.to_vec()[..writer.len], "\n90".as_bytes());
writer.temporal_flush();
// Should have flushed all remaining bytes.
assert_eq!(writer.len, 0);
assert_eq!(writer.last_newline_index, 0);
}
#[test]
fn flush() {
let mut writer = get_tag_writer();
writer
.write_str("abcdefghij\n\nklm\nnopqr\nstuvwxyz")
.expect("Unable to write to PlatformLogWriter");
writer.flush();
assert_eq!(writer.last_newline_index, 0);
assert_eq!(writer.len, 0);
}
#[test]
fn last_newline_index() {
let mut writer = get_tag_writer();
writer
.write_str("12\n\n567\n90")
.expect("Unable to write to PlatformLogWriter");
assert_eq!(writer.last_newline_index, 7);
}
#[test]
fn output_specified_len_leaves_buffer_unchanged() {
let mut writer = get_tag_writer();
let log_string = "abcdefghij\n\nklm\nnopqr\nstuvwxyz";
writer
.write_str(log_string)
.expect("Unable to write to PlatformLogWriter");
writer.output_specified_len(5);
assert_eq!(
writer.buffer[..log_string.len()].to_vec(),
log_string.as_bytes()
);
}
#[test]
fn copy_bytes_to_start() {
let mut writer = get_tag_writer();
writer
.write_str("0123456789")
.expect("Unable to write to PlatformLogWriter");
writer.copy_bytes_to_start(3, 2);
assert_eq!(writer.buffer[..10].to_vec(), "3423456789".as_bytes());
}
#[test]
fn copy_bytes_to_start_nop() {
let test_string = "Test_string_with\n\n\n\nnewlines\n";
let mut writer = get_tag_writer();
writer
.write_str(test_string)
.expect("Unable to write to PlatformLogWriter");
writer.copy_bytes_to_start(0, 20);
writer.copy_bytes_to_start(10, 0);
assert_eq!(
writer.buffer[..test_string.len()].to_vec(),
test_string.as_bytes()
);
}
fn get_tag_writer() -> PlatformLogWriter<'static> {
PlatformLogWriter::new(Level::Warn, &CStr::from_bytes_with_nul(b"tag\0").unwrap())
}
}

View File

@ -0,0 +1,9 @@
extern crate android_logger;
extern crate log;
#[test]
fn config_log_level() {
android_logger::init_once(android_logger::Config::default().with_min_level(log::Level::Trace));
assert_eq!(log::max_level(), log::LevelFilter::Trace);
}

View File

@ -0,0 +1,10 @@
extern crate android_logger;
extern crate log;
#[test]
fn default_init() {
android_logger::init_once(Default::default());
// android_logger has default log level "off"
assert_eq!(log::max_level(), log::LevelFilter::Off);
}

View File

@ -0,0 +1,12 @@
extern crate android_logger;
extern crate log;
#[test]
fn multiple_init() {
android_logger::init_once(android_logger::Config::default().with_min_level(log::Level::Trace));
// Second initialization should be silently ignored
android_logger::init_once(android_logger::Config::default().with_min_level(log::Level::Error));
assert_eq!(log::max_level(), log::LevelFilter::Trace);
}

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"5c8eee21158a25c0f9a586e1739dbf956a8a514dce1b7f5001ddbff5bac35065","LICENSE":"16692e8cee4aa06e3913787497eba2d47c42002014136f5da67be6ee640e28a3","README.md":"666c5a8c9b9404937a193fdf0a1648ac5370723661d55f3804406cfef2c0df1c","rust-toolchain":"e043627c837e03bbb64749ab458b0eb0c93249b2aa4fcd8b2d2d8dd9be1ccfd3","src/iter.rs":"12e09b86c3cc7e8e0db058c31af00a166f46e2c2c182cea5034e43dc6a3d54a2","src/iter_set.rs":"5327da951dc93d30b293aeb7a805666ee4b8e6364881348ab5d86b23c29a0e13","src/lib.rs":"409d1993a6b60e93f7092de111673500e82f0689752a4ef1ecb15149e6bed2ec","src/lock.rs":"a22f2cac6262c729e62a9597b0dae4a04b7662ace5f0960f669f57f9e756e9f1","src/mapref/entry.rs":"290a3f5e1eb49e049286e7de3724130b0c8f2aa69d735b2d8a51e79d08aefe28","src/mapref/mod.rs":"15bd45cfc642a9e3e77bbfbf2d9fad9c5fac0ff6904214a3c501e262242ec8b0","src/mapref/multiple.rs":"3fe7dac633ef085381b7d0c899a4ebeec0b88da7adb4088d1c6c735e9ab05845","src/mapref/one.rs":"ccfd10ca6ca56bae8dcf4c0ac5683b66ecd881b6c1a9883a2d97c02dce6d1bd5","src/rayon/map.rs":"edfe572719ed30a4213993c42a3e1eac9657f12ba27a11e80829519d6a7bfd74","src/rayon/set.rs":"21fe2ca8c58c8ff1bc753f5e2eb6131afd598211eea375f2ac7190cd0f9abcba","src/read_only.rs":"6d3d2d6d3ed13bedc8ece1ed7522acf9d2fb216395adcfa76df8c1dbf52327bd","src/serde.rs":"9a7d240a9c5d093cb8d4ff02ffb4dcdfb4cbac47bac66412bb74fb801f3623f6","src/set.rs":"332df646590e398452b251440d9b2df3e58597799d7c1c016ffefdd5c0c66964","src/setref/mod.rs":"cc39e406a333dc6c04398f4b1336fb400e3a8360c387466d8a91f5d7f4ed40a7","src/setref/multiple.rs":"2270749e83f80dbb4761448f0629ecd02b0b4268f76834236d1167844e6d5e37","src/setref/one.rs":"2af9493c296a3d59dac3a9fed0298d9ca27fa50b0693f82f4377c571e28691c5","src/t.rs":"1e9567ebb665cd422d8934e79a024232ba1a7cb88a48471d47b05653509226bd","src/util.rs":"7a8096a713cf04e60d6c9e38cd366314ed74472abab4bb8a3a9ed081a0e0301b"},"package":"e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c"}

45
third_party/rust/dashmap/Cargo.toml vendored Normal file
View File

@ -0,0 +1,45 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "dashmap"
version = "4.0.2"
authors = ["Acrimon <joel.wejdenstal@gmail.com>"]
description = "Blazing fast concurrent HashMap for Rust."
homepage = "https://github.com/xacrimon/dashmap"
documentation = "https://docs.rs/dashmap"
readme = "README.md"
keywords = ["atomic", "concurrent", "hashmap"]
categories = ["concurrency", "algorithms", "data-structures"]
license = "MIT"
repository = "https://github.com/xacrimon/dashmap"
[package.metadata.docs.rs]
features = ["rayon", "raw-api", "serde"]
[dependencies.cfg-if]
version = "1.0.0"
[dependencies.num_cpus]
version = "1.13.0"
[dependencies.rayon]
version = "1.5.0"
optional = true
[dependencies.serde]
version = "1.0.118"
features = ["derive"]
optional = true
[features]
default = []
raw-api = []

21
third_party/rust/dashmap/LICENSE vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Acrimon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

73
third_party/rust/dashmap/README.md vendored Normal file
View File

@ -0,0 +1,73 @@
# DashMap
Blazingly fast concurrent map in Rust.
DashMap is an implementation of a concurrent associative array/hashmap in Rust.
DashMap tries to implement an easy to use API similar to `std::collections::HashMap`
with some slight changes to handle concurrency.
DashMap tries to be very simple to use and to be a direct replacement for `RwLock<HashMap<K, V>>`.
To accomplish these all methods take `&self` instead modifying methods taking `&mut self`.
This allows you to put a DashMap in an `Arc<T>` and share it between threads while being able to modify it.
DashMap puts great effort into performance and aims to be as fast as possible.
If you have any suggestions or tips do not hesitate to open an issue or a PR.
[![version](https://img.shields.io/crates/v/dashmap)](https://crates.io/crates/dashmap)
[![documentation](https://docs.rs/dashmap/badge.svg)](https://docs.rs/dashmap)
[![downloads](https://img.shields.io/crates/d/dashmap)](https://crates.io/crates/dashmap)
[![minimum rustc version](https://img.shields.io/badge/rustc-1.44.1-orange.svg)](https://crates.io/crates/dashmap)
## Cargo features
- `serde` - Enables serde support.
- `raw-api` - Enables the unstable raw-shard api.
- `rayon` - Enables rayon support.
## Support me
[![Foo](https://c5.patreon.com/external/logo/become_a_patron_button@2x.png)](https://patreon.com/acrimon)
Creating and testing open-source software like DashMap takes up a large portion of my time
and comes with costs such as test hardware. Please consider supporting me and everything I make for the public
to enable me to continue doing this.
If you want to support me please head over and take a look at my [patreon](https://www.patreon.com/acrimon).
## Contributing
DashMap is gladly accepts contributions!
Do not hesitate to open issues or PR's.
I will take a look as soon as I have time for it.
That said I do not get paid (yet) to work on open-source. This means
that my time is limited and my work here comes after my personal life.
## Performance
A comprehensive benchmark suite including DashMap can be found [here](https://github.com/xacrimon/conc-map-bench).
## Special thanks
- [Jon Gjengset](https://github.com/jonhoo)
- [Yato](https://github.com/RustyYato)
- [Karl Bergström](https://github.com/kabergstrom)
- [Dylan DPC](https://github.com/Dylan-DPC)
- [Lokathor](https://github.com/Lokathor)
- [namibj](https://github.com/namibj)
## License
This project is licensed under MIT.

View File

@ -0,0 +1 @@
stable-2020-06-18

309
third_party/rust/dashmap/src/iter.rs vendored Normal file
View File

@ -0,0 +1,309 @@
use super::mapref::multiple::{RefMulti, RefMutMulti};
use super::util;
use crate::lock::{RwLockReadGuard, RwLockWriteGuard};
use crate::t::Map;
use crate::util::SharedValue;
use crate::{DashMap, HashMap};
use core::hash::{BuildHasher, Hash};
use core::mem;
use std::collections::hash_map;
use std::collections::hash_map::RandomState;
use std::sync::Arc;
/// Iterator over a DashMap yielding key value pairs.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::new();
/// map.insert("hello", "world");
/// map.insert("alex", "steve");
/// let pairs: Vec<(&'static str, &'static str)> = map.into_iter().collect();
/// assert_eq!(pairs.len(), 2);
/// ```
pub struct OwningIter<K, V, S = RandomState> {
map: DashMap<K, V, S>,
shard_i: usize,
current: Option<GuardOwningIter<K, V>>,
}
impl<K: Eq + Hash, V, S: BuildHasher + Clone> OwningIter<K, V, S> {
pub(crate) fn new(map: DashMap<K, V, S>) -> Self {
Self {
map,
shard_i: 0,
current: None,
}
}
}
type GuardOwningIter<K, V> = hash_map::IntoIter<K, SharedValue<V>>;
impl<K: Eq + Hash, V, S: BuildHasher + Clone> Iterator for OwningIter<K, V, S> {
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(current) = self.current.as_mut() {
if let Some((k, v)) = current.next() {
return Some((k, v.into_inner()));
}
}
if self.shard_i == self.map._shard_count() {
return None;
}
//let guard = unsafe { self.map._yield_read_shard(self.shard_i) };
let mut shard_wl = unsafe { self.map._yield_write_shard(self.shard_i) };
let hasher = self.map._hasher();
let map = mem::replace(&mut *shard_wl, HashMap::with_hasher(hasher));
drop(shard_wl);
let iter = map.into_iter();
//unsafe { ptr::write(&mut self.current, Some((arcee, iter))); }
self.current = Some(iter);
self.shard_i += 1;
}
}
}
unsafe impl<K, V, S> Send for OwningIter<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Clone + Send,
{
}
unsafe impl<K, V, S> Sync for OwningIter<K, V, S>
where
K: Eq + Hash + Sync,
V: Sync,
S: BuildHasher + Clone + Sync,
{
}
type GuardIter<'a, K, V, S> = (
Arc<RwLockReadGuard<'a, HashMap<K, V, S>>>,
hash_map::Iter<'a, K, SharedValue<V>>,
);
type GuardIterMut<'a, K, V, S> = (
Arc<RwLockWriteGuard<'a, HashMap<K, V, S>>>,
hash_map::IterMut<'a, K, SharedValue<V>>,
);
/// Iterator over a DashMap yielding immutable references.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::new();
/// map.insert("hello", "world");
/// assert_eq!(map.iter().count(), 1);
/// ```
pub struct Iter<'a, K, V, S = RandomState, M = DashMap<K, V, S>> {
map: &'a M,
shard_i: usize,
current: Option<GuardIter<'a, K, V, S>>,
}
unsafe impl<'a, 'i, K, V, S, M> Send for Iter<'i, K, V, S, M>
where
K: 'a + Eq + Hash + Send,
V: 'a + Send,
S: 'a + BuildHasher + Clone,
M: Map<'a, K, V, S>,
{
}
unsafe impl<'a, 'i, K, V, S, M> Sync for Iter<'i, K, V, S, M>
where
K: 'a + Eq + Hash + Sync,
V: 'a + Sync,
S: 'a + BuildHasher + Clone,
M: Map<'a, K, V, S>,
{
}
impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> Iter<'a, K, V, S, M> {
pub(crate) fn new(map: &'a M) -> Self {
Self {
map,
shard_i: 0,
current: None,
}
}
}
impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> Iterator
for Iter<'a, K, V, S, M>
{
type Item = RefMulti<'a, K, V, S>;
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(current) = self.current.as_mut() {
if let Some((k, v)) = current.1.next() {
let guard = current.0.clone();
return Some(RefMulti::new(guard, k, v.get()));
}
}
if self.shard_i == self.map._shard_count() {
return None;
}
let guard = unsafe { self.map._yield_read_shard(self.shard_i) };
let sref: &HashMap<K, V, S> = unsafe { util::change_lifetime_const(&*guard) };
let iter = sref.iter();
self.current = Some((Arc::new(guard), iter));
self.shard_i += 1;
}
}
}
/// Iterator over a DashMap yielding mutable references.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::new();
/// map.insert("Johnny", 21);
/// map.iter_mut().for_each(|mut r| *r += 1);
/// assert_eq!(*map.get("Johnny").unwrap(), 22);
/// ```
pub struct IterMut<'a, K, V, S = RandomState, M = DashMap<K, V, S>> {
map: &'a M,
shard_i: usize,
current: Option<GuardIterMut<'a, K, V, S>>,
}
unsafe impl<'a, 'i, K, V, S, M> Send for IterMut<'i, K, V, S, M>
where
K: 'a + Eq + Hash + Send,
V: 'a + Send,
S: 'a + BuildHasher + Clone,
M: Map<'a, K, V, S>,
{
}
unsafe impl<'a, 'i, K, V, S, M> Sync for IterMut<'i, K, V, S, M>
where
K: 'a + Eq + Hash + Sync,
V: 'a + Sync,
S: 'a + BuildHasher + Clone,
M: Map<'a, K, V, S>,
{
}
impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>>
IterMut<'a, K, V, S, M>
{
pub(crate) fn new(map: &'a M) -> Self {
Self {
map,
shard_i: 0,
current: None,
}
}
}
impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> Iterator
for IterMut<'a, K, V, S, M>
{
type Item = RefMutMulti<'a, K, V, S>;
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(current) = self.current.as_mut() {
if let Some((k, v)) = current.1.next() {
let guard = current.0.clone();
unsafe {
let k = util::change_lifetime_const(k);
let v = &mut *v.as_ptr();
return Some(RefMutMulti::new(guard, k, v));
}
}
}
if self.shard_i == self.map._shard_count() {
return None;
}
let mut guard = unsafe { self.map._yield_write_shard(self.shard_i) };
let sref: &mut HashMap<K, V, S> = unsafe { util::change_lifetime_mut(&mut *guard) };
let iter = sref.iter_mut();
self.current = Some((Arc::new(guard), iter));
self.shard_i += 1;
}
}
}
#[cfg(test)]
mod tests {
use crate::DashMap;
#[test]
fn iter_mut_manual_count() {
let map = DashMap::new();
map.insert("Johnny", 21);
assert_eq!(map.len(), 1);
let mut c = 0;
for shard in map.shards() {
c += shard.write().iter_mut().count();
}
assert_eq!(c, 1);
}
#[test]
fn iter_mut_count() {
let map = DashMap::new();
map.insert("Johnny", 21);
assert_eq!(map.len(), 1);
assert_eq!(map.iter_mut().count(), 1);
}
#[test]
fn iter_count() {
let map = DashMap::new();
map.insert("Johnny", 21);
assert_eq!(map.len(), 1);
assert_eq!(map.iter().count(), 1);
}
}

View File

@ -0,0 +1,71 @@
use crate::setref::multiple::RefMulti;
use crate::t::Map;
use core::hash::{BuildHasher, Hash};
pub struct OwningIter<K, S> {
inner: crate::iter::OwningIter<K, (), S>,
}
impl<K: Eq + Hash, S: BuildHasher + Clone> OwningIter<K, S> {
pub(crate) fn new(inner: crate::iter::OwningIter<K, (), S>) -> Self {
Self { inner }
}
}
impl<K: Eq + Hash, S: BuildHasher + Clone> Iterator for OwningIter<K, S> {
type Item = K;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(k, _)| k)
}
}
unsafe impl<K, S> Send for OwningIter<K, S>
where
K: Eq + Hash + Send,
S: BuildHasher + Clone + Send,
{
}
unsafe impl<K, S> Sync for OwningIter<K, S>
where
K: Eq + Hash + Sync,
S: BuildHasher + Clone + Sync,
{
}
pub struct Iter<'a, K, S, M> {
inner: crate::iter::Iter<'a, K, (), S, M>,
}
unsafe impl<'a, 'i, K, S, M> Send for Iter<'i, K, S, M>
where
K: 'a + Eq + Hash + Send,
S: 'a + BuildHasher + Clone,
M: Map<'a, K, (), S>,
{
}
unsafe impl<'a, 'i, K, S, M> Sync for Iter<'i, K, S, M>
where
K: 'a + Eq + Hash + Sync,
S: 'a + BuildHasher + Clone,
M: Map<'a, K, (), S>,
{
}
impl<'a, K: Eq + Hash, S: 'a + BuildHasher + Clone, M: Map<'a, K, (), S>> Iter<'a, K, S, M> {
pub(crate) fn new(inner: crate::iter::Iter<'a, K, (), S, M>) -> Self {
Self { inner }
}
}
impl<'a, K: Eq + Hash, S: 'a + BuildHasher + Clone, M: Map<'a, K, (), S>> Iterator
for Iter<'a, K, S, M>
{
type Item = RefMulti<'a, K, S>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(RefMulti::new)
}
}

993
third_party/rust/dashmap/src/lib.rs vendored Normal file
View File

@ -0,0 +1,993 @@
#![allow(clippy::type_complexity)]
pub mod iter;
pub mod iter_set;
pub mod lock;
pub mod mapref;
mod read_only;
#[cfg(feature = "serde")]
mod serde;
mod set;
pub mod setref;
mod t;
mod util;
#[cfg(feature = "rayon")]
pub mod rayon {
pub mod map;
pub mod set;
}
use cfg_if::cfg_if;
use core::borrow::Borrow;
use core::fmt;
use core::hash::{BuildHasher, Hash, Hasher};
use core::iter::FromIterator;
use core::ops::{BitAnd, BitOr, Shl, Shr, Sub};
use iter::{Iter, IterMut, OwningIter};
use lock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use mapref::entry::{Entry, OccupiedEntry, VacantEntry};
use mapref::multiple::RefMulti;
use mapref::one::{Ref, RefMut};
pub use read_only::ReadOnlyView;
pub use set::DashSet;
use std::collections::hash_map::RandomState;
pub use t::Map;
cfg_if! {
if #[cfg(feature = "raw-api")] {
pub use util::SharedValue;
} else {
use util::SharedValue;
}
}
pub(crate) type HashMap<K, V, S> = std::collections::HashMap<K, SharedValue<V>, S>;
fn shard_amount() -> usize {
(num_cpus::get() * 4).next_power_of_two()
}
fn ncb(shard_amount: usize) -> usize {
shard_amount.trailing_zeros() as usize
}
/// DashMap is an implementation of a concurrent associative array/hashmap in Rust.
///
/// DashMap tries to implement an easy to use API similar to `std::collections::HashMap`
/// with some slight changes to handle concurrency.
///
/// DashMap tries to be very simple to use and to be a direct replacement for `RwLock<HashMap<K, V, S>>`.
/// To accomplish these all methods take `&self` instead modifying methods taking `&mut self`.
/// This allows you to put a DashMap in an `Arc<T>` and share it between threads while being able to modify it.
///
/// Documentation mentioning locking behaviour acts in the reference frame of the calling thread.
/// This means that it is safe to ignore it across multiple threads.
pub struct DashMap<K, V, S = RandomState> {
shift: usize,
shards: Box<[RwLock<HashMap<K, V, S>>]>,
hasher: S,
}
impl<K: Eq + Hash + Clone, V: Clone, S: Clone> Clone for DashMap<K, V, S> {
fn clone(&self) -> Self {
let mut inner_shards = Vec::new();
for shard in self.shards.iter() {
let shard = shard.read();
inner_shards.push(RwLock::new((*shard).clone()));
}
Self {
shift: self.shift,
shards: inner_shards.into_boxed_slice(),
hasher: self.hasher.clone(),
}
}
}
impl<K, V, S> Default for DashMap<K, V, S>
where
K: Eq + Hash,
S: Default + BuildHasher + Clone,
{
fn default() -> Self {
Self::with_hasher(Default::default())
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a> DashMap<K, V, RandomState> {
/// Creates a new DashMap with a capacity of 0.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let reviews = DashMap::new();
/// reviews.insert("Veloren", "What a fantastic game!");
/// ```
pub fn new() -> Self {
DashMap::with_hasher(RandomState::default())
}
/// Creates a new DashMap with a specified starting capacity.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let mappings = DashMap::with_capacity(2);
/// mappings.insert(2, 4);
/// mappings.insert(8, 16);
/// ```
pub fn with_capacity(capacity: usize) -> Self {
DashMap::with_capacity_and_hasher(capacity, RandomState::default())
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> DashMap<K, V, S> {
/// Wraps this `DashMap` into a read-only view. This view allows to obtain raw references to the stored values.
pub fn into_read_only(self) -> ReadOnlyView<K, V, S> {
ReadOnlyView::new(self)
}
/// Creates a new DashMap with a capacity of 0 and the provided hasher.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let reviews = DashMap::with_hasher(s);
/// reviews.insert("Veloren", "What a fantastic game!");
/// ```
pub fn with_hasher(hasher: S) -> Self {
Self::with_capacity_and_hasher(0, hasher)
}
/// Creates a new DashMap with a specified starting capacity and hasher.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let mappings = DashMap::with_capacity_and_hasher(2, s);
/// mappings.insert(2, 4);
/// mappings.insert(8, 16);
/// ```
pub fn with_capacity_and_hasher(mut capacity: usize, hasher: S) -> Self {
let shard_amount = shard_amount();
let shift = util::ptr_size_bits() - ncb(shard_amount);
if capacity != 0 {
capacity = (capacity + (shard_amount - 1)) & !(shard_amount - 1);
}
let cps = capacity / shard_amount;
let shards = (0..shard_amount)
.map(|_| RwLock::new(HashMap::with_capacity_and_hasher(cps, hasher.clone())))
.collect();
Self {
shift,
shards,
hasher,
}
}
/// Hash a given item to produce a usize.
/// Uses the provided or default HashBuilder.
pub fn hash_usize<T: Hash>(&self, item: &T) -> usize {
let mut hasher = self.hasher.build_hasher();
item.hash(&mut hasher);
hasher.finish() as usize
}
cfg_if! {
if #[cfg(feature = "raw-api")] {
/// Allows you to peek at the inner shards that store your data.
/// You should probably not use this unless you know what you are doing.
///
/// Requires the `raw-api` feature to be enabled.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::<(), ()>::new();
/// println!("Amount of shards: {}", map.shards().len());
/// ```
pub fn shards(&self) -> &[RwLock<HashMap<K, V, S>>] {
&self.shards
}
} else {
#[allow(dead_code)]
pub(crate) fn shards(&self) -> &[RwLock<HashMap<K, V, S>>] {
&self.shards
}
}
}
cfg_if! {
if #[cfg(feature = "raw-api")] {
/// Finds which shard a certain key is stored in.
/// You should probably not use this unless you know what you are doing.
/// Note that shard selection is dependent on the default or provided HashBuilder.
///
/// Requires the `raw-api` feature to be enabled.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::new();
/// map.insert("coca-cola", 1.4);
/// println!("coca-cola is stored in shard: {}", map.determine_map("coca-cola"));
/// ```
pub fn determine_map<Q>(&self, key: &Q) -> usize
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.hash_usize(&key);
self.determine_shard(hash)
}
}
}
cfg_if! {
if #[cfg(feature = "raw-api")] {
/// Finds which shard a certain hash is stored in.
///
/// Requires the `raw-api` feature to be enabled.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map: DashMap<i32, i32> = DashMap::new();
/// let key = "key";
/// let hash = map.hash_usize(&key);
/// println!("hash is stored in shard: {}", map.determine_shard(hash));
/// ```
pub fn determine_shard(&self, hash: usize) -> usize {
// Leave the high 7 bits for the HashBrown SIMD tag.
(hash << 7) >> self.shift
}
} else {
pub(crate) fn determine_shard(&self, hash: usize) -> usize {
// Leave the high 7 bits for the HashBrown SIMD tag.
(hash << 7) >> self.shift
}
}
}
/// Returns a reference to the map's [`BuildHasher`].
///
/// # Examples
///
/// ```rust
/// use dashmap::DashMap;
/// use std::collections::hash_map::RandomState;
///
/// let hasher = RandomState::new();
/// let map: DashMap<i32, i32> = DashMap::new();
/// let hasher: &RandomState = map.hasher();
/// ```
///
/// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
pub fn hasher(&self) -> &S {
&self.hasher
}
/// Inserts a key and a value into the map. Returns the old value associated with the key if there was one.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::new();
/// map.insert("I am the key!", "And I am the value!");
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
self._insert(key, value)
}
/// Removes an entry from the map, returning the key and value if they existed in the map.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let soccer_team = DashMap::new();
/// soccer_team.insert("Jack", "Goalie");
/// assert_eq!(soccer_team.remove("Jack").unwrap().1, "Goalie");
/// ```
pub fn remove<Q>(&self, key: &Q) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._remove(key)
}
/// Removes an entry from the map, returning the key and value
/// if the entry existed and the provided conditional function returned true.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// ```
/// use dashmap::DashMap;
///
/// let soccer_team = DashMap::new();
/// soccer_team.insert("Sam", "Forward");
/// soccer_team.remove_if("Sam", |_, position| position == &"Goalie");
/// assert!(soccer_team.contains_key("Sam"));
/// ```
/// ```
/// use dashmap::DashMap;
///
/// let soccer_team = DashMap::new();
/// soccer_team.insert("Sam", "Forward");
/// soccer_team.remove_if("Sam", |_, position| position == &"Forward");
/// assert!(!soccer_team.contains_key("Sam"));
/// ```
pub fn remove_if<Q>(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._remove_if(key, f)
}
/// Creates an iterator over a DashMap yielding immutable references.
///
/// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let words = DashMap::new();
/// words.insert("hello", "world");
/// assert_eq!(words.iter().count(), 1);
/// ```
pub fn iter(&'a self) -> Iter<'a, K, V, S, DashMap<K, V, S>> {
self._iter()
}
/// Iterator over a DashMap yielding mutable references.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::new();
/// map.insert("Johnny", 21);
/// map.iter_mut().for_each(|mut r| *r += 1);
/// assert_eq!(*map.get("Johnny").unwrap(), 22);
/// ```
pub fn iter_mut(&'a self) -> IterMut<'a, K, V, S, DashMap<K, V, S>> {
self._iter_mut()
}
/// Get a immutable reference to an entry in the map
///
/// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let youtubers = DashMap::new();
/// youtubers.insert("Bosnian Bill", 457000);
/// assert_eq!(*youtubers.get("Bosnian Bill").unwrap(), 457000);
/// ```
pub fn get<Q>(&'a self, key: &Q) -> Option<Ref<'a, K, V, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._get(key)
}
/// Get a mutable reference to an entry in the map
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let class = DashMap::new();
/// class.insert("Albin", 15);
/// *class.get_mut("Albin").unwrap() -= 1;
/// assert_eq!(*class.get("Albin").unwrap(), 14);
/// ```
pub fn get_mut<Q>(&'a self, key: &Q) -> Option<RefMut<'a, K, V, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._get_mut(key)
}
/// Remove excess capacity to reduce memory usage.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
pub fn shrink_to_fit(&self) {
self._shrink_to_fit();
}
/// Retain elements that whose predicates return true
/// and discard elements whose predicates return false.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let people = DashMap::new();
/// people.insert("Albin", 15);
/// people.insert("Jones", 22);
/// people.insert("Charlie", 27);
/// people.retain(|_, v| *v > 20);
/// assert_eq!(people.len(), 2);
/// ```
pub fn retain(&self, f: impl FnMut(&K, &mut V) -> bool) {
self._retain(f);
}
/// Fetches the total number of key-value pairs stored in the map.
///
/// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let people = DashMap::new();
/// people.insert("Albin", 15);
/// people.insert("Jones", 22);
/// people.insert("Charlie", 27);
/// assert_eq!(people.len(), 3);
/// ```
pub fn len(&self) -> usize {
self._len()
}
/// Checks if the map is empty or not.
///
/// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let map = DashMap::<(), ()>::new();
/// assert!(map.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self._is_empty()
}
/// Removes all key-value pairs in the map.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let stats = DashMap::new();
/// stats.insert("Goals", 4);
/// assert!(!stats.is_empty());
/// stats.clear();
/// assert!(stats.is_empty());
/// ```
pub fn clear(&self) {
self._clear();
}
/// Returns how many key-value pairs the map can store without reallocating.
///
/// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map.
pub fn capacity(&self) -> usize {
self._capacity()
}
/// Modify a specific value according to a function.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let stats = DashMap::new();
/// stats.insert("Goals", 4);
/// stats.alter("Goals", |_, v| v * 2);
/// assert_eq!(*stats.get("Goals").unwrap(), 8);
/// ```
///
/// # Panics
///
/// If the given closure panics, then `alter` will abort the process
pub fn alter<Q>(&self, key: &Q, f: impl FnOnce(&K, V) -> V)
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._alter(key, f);
}
/// Modify every value in the map according to a function.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let stats = DashMap::new();
/// stats.insert("Wins", 4);
/// stats.insert("Losses", 2);
/// stats.alter_all(|_, v| v + 1);
/// assert_eq!(*stats.get("Wins").unwrap(), 5);
/// assert_eq!(*stats.get("Losses").unwrap(), 3);
/// ```
///
/// # Panics
///
/// If the given closure panics, then `alter_all` will abort the process
pub fn alter_all(&self, f: impl FnMut(&K, V) -> V) {
self._alter_all(f);
}
/// Checks if the map contains a specific key.
///
/// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashMap;
///
/// let team_sizes = DashMap::new();
/// team_sizes.insert("Dakota Cherries", 23);
/// assert!(team_sizes.contains_key("Dakota Cherries"));
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._contains_key(key)
}
/// Advanced entry API that tries to mimic `std::collections::HashMap`.
/// See the documentation on `dashmap::mapref::entry` for more details.
///
/// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map.
pub fn entry(&'a self, key: K) -> Entry<'a, K, V, S> {
self._entry(key)
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: 'a + BuildHasher + Clone> Map<'a, K, V, S>
for DashMap<K, V, S>
{
fn _shard_count(&self) -> usize {
self.shards.len()
}
unsafe fn _get_read_shard(&'a self, i: usize) -> &'a HashMap<K, V, S> {
debug_assert!(i < self.shards.len());
self.shards.get_unchecked(i).get()
}
unsafe fn _yield_read_shard(&'a self, i: usize) -> RwLockReadGuard<'a, HashMap<K, V, S>> {
debug_assert!(i < self.shards.len());
self.shards.get_unchecked(i).read()
}
unsafe fn _yield_write_shard(&'a self, i: usize) -> RwLockWriteGuard<'a, HashMap<K, V, S>> {
debug_assert!(i < self.shards.len());
self.shards.get_unchecked(i).write()
}
fn _insert(&self, key: K, value: V) -> Option<V> {
let hash = self.hash_usize(&key);
let idx = self.determine_shard(hash);
let mut shard = unsafe { self._yield_write_shard(idx) };
shard
.insert(key, SharedValue::new(value))
.map(|v| v.into_inner())
}
fn _remove<Q>(&self, key: &Q) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.hash_usize(&key);
let idx = self.determine_shard(hash);
let mut shard = unsafe { self._yield_write_shard(idx) };
shard.remove_entry(key).map(|(k, v)| (k, v.into_inner()))
}
fn _remove_if<Q>(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.hash_usize(&key);
let idx = self.determine_shard(hash);
let mut shard = unsafe { self._yield_write_shard(idx) };
if let Some((k, v)) = shard.get_key_value(key) {
if f(k, v.get()) {
shard.remove_entry(key).map(|(k, v)| (k, v.into_inner()))
} else {
None
}
} else {
None
}
}
fn _iter(&'a self) -> Iter<'a, K, V, S, DashMap<K, V, S>> {
Iter::new(self)
}
fn _iter_mut(&'a self) -> IterMut<'a, K, V, S, DashMap<K, V, S>> {
IterMut::new(self)
}
fn _get<Q>(&'a self, key: &Q) -> Option<Ref<'a, K, V, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.hash_usize(&key);
let idx = self.determine_shard(hash);
let shard = unsafe { self._yield_read_shard(idx) };
if let Some((kptr, vptr)) = shard.get_key_value(key) {
unsafe {
let kptr = util::change_lifetime_const(kptr);
let vptr = util::change_lifetime_const(vptr);
Some(Ref::new(shard, kptr, vptr.get()))
}
} else {
None
}
}
fn _get_mut<Q>(&'a self, key: &Q) -> Option<RefMut<'a, K, V, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.hash_usize(&key);
let idx = self.determine_shard(hash);
let shard = unsafe { self._yield_write_shard(idx) };
if let Some((kptr, vptr)) = shard.get_key_value(key) {
unsafe {
let kptr = util::change_lifetime_const(kptr);
let vptr = &mut *vptr.as_ptr();
Some(RefMut::new(shard, kptr, vptr))
}
} else {
None
}
}
fn _shrink_to_fit(&self) {
self.shards.iter().for_each(|s| s.write().shrink_to_fit());
}
fn _retain(&self, mut f: impl FnMut(&K, &mut V) -> bool) {
self.shards
.iter()
.for_each(|s| s.write().retain(|k, v| f(k, v.get_mut())));
}
fn _len(&self) -> usize {
self.shards.iter().map(|s| s.read().len()).sum()
}
fn _capacity(&self) -> usize {
self.shards.iter().map(|s| s.read().capacity()).sum()
}
fn _alter<Q>(&self, key: &Q, f: impl FnOnce(&K, V) -> V)
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if let Some(mut r) = self.get_mut(key) {
util::map_in_place_2(r.pair_mut(), f);
}
}
fn _alter_all(&self, mut f: impl FnMut(&K, V) -> V) {
self.shards.iter().for_each(|s| {
s.write()
.iter_mut()
.for_each(|(k, v)| util::map_in_place_2((k, v.get_mut()), &mut f));
});
}
fn _entry(&'a self, key: K) -> Entry<'a, K, V, S> {
let hash = self.hash_usize(&key);
let idx = self.determine_shard(hash);
let shard = unsafe { self._yield_write_shard(idx) };
if let Some((kptr, vptr)) = shard.get_key_value(&key) {
unsafe {
let kptr = util::change_lifetime_const(kptr);
let vptr = &mut *vptr.as_ptr();
Entry::Occupied(OccupiedEntry::new(shard, key, (kptr, vptr)))
}
} else {
Entry::Vacant(VacantEntry::new(shard, key))
}
}
fn _hasher(&self) -> S {
self.hasher.clone()
}
}
impl<K: Eq + Hash + fmt::Debug, V: fmt::Debug, S: BuildHasher + Clone> fmt::Debug
for DashMap<K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut pmap = f.debug_map();
for r in self {
let (k, v) = r.pair();
pmap.entry(k, v);
}
pmap.finish()
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> Shl<(K, V)> for &'a DashMap<K, V, S> {
type Output = Option<V>;
fn shl(self, pair: (K, V)) -> Self::Output {
self.insert(pair.0, pair.1)
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> Shr<&Q> for &'a DashMap<K, V, S>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
type Output = Ref<'a, K, V, S>;
fn shr(self, key: &Q) -> Self::Output {
self.get(key).unwrap()
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> BitOr<&Q> for &'a DashMap<K, V, S>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
type Output = RefMut<'a, K, V, S>;
fn bitor(self, key: &Q) -> Self::Output {
self.get_mut(key).unwrap()
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> Sub<&Q> for &'a DashMap<K, V, S>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
type Output = Option<(K, V)>;
fn sub(self, key: &Q) -> Self::Output {
self.remove(key)
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> BitAnd<&Q> for &'a DashMap<K, V, S>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
type Output = bool;
fn bitand(self, key: &Q) -> Self::Output {
self.contains_key(key)
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher + Clone> IntoIterator for DashMap<K, V, S> {
type Item = (K, V);
type IntoIter = OwningIter<K, V, S>;
fn into_iter(self) -> Self::IntoIter {
OwningIter::new(self)
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher + Clone> IntoIterator for &'a DashMap<K, V, S> {
type Item = RefMulti<'a, K, V, S>;
type IntoIter = Iter<'a, K, V, S, DashMap<K, V, S>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<K: Eq + Hash, V, S: BuildHasher + Clone> Extend<(K, V)> for DashMap<K, V, S> {
fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, intoiter: I) {
for pair in intoiter.into_iter() {
self.insert(pair.0, pair.1);
}
}
}
impl<K: Eq + Hash, V> FromIterator<(K, V)> for DashMap<K, V, RandomState> {
fn from_iter<I: IntoIterator<Item = (K, V)>>(intoiter: I) -> Self {
let mut map = DashMap::new();
map.extend(intoiter);
map
}
}
#[cfg(test)]
mod tests {
use crate::DashMap;
use std::collections::hash_map::RandomState;
#[test]
fn test_basic() {
let dm = DashMap::new();
dm.insert(0, 0);
assert_eq!(dm.get(&0).unwrap().value(), &0);
}
#[test]
fn test_default() {
let dm: DashMap<u32, u32> = DashMap::default();
dm.insert(0, 0);
assert_eq!(dm.get(&0).unwrap().value(), &0);
}
#[test]
fn test_multiple_hashes() {
let dm: DashMap<u32, u32> = DashMap::default();
for i in 0..100 {
dm.insert(0, i);
dm.insert(i, i);
}
for i in 1..100 {
let r = dm.get(&i).unwrap();
assert_eq!(i, *r.value());
assert_eq!(i, *r.key());
}
let r = dm.get(&0).unwrap();
assert_eq!(99, *r.value());
}
#[test]
fn test_more_complex_values() {
#[derive(Hash, PartialEq, Debug, Clone)]
struct T0 {
s: String,
u: u8,
}
let dm = DashMap::new();
let range = 0..10;
for i in range {
let t = T0 {
s: i.to_string(),
u: i as u8,
};
dm.insert(i, t.clone());
assert_eq!(&t, dm.get(&i).unwrap().value());
}
}
#[test]
fn test_different_hashers_randomstate() {
let dm_hm_default: DashMap<u32, u32, RandomState> =
DashMap::with_hasher(RandomState::new());
for i in 0..10 {
dm_hm_default.insert(i, i);
assert_eq!(i, *dm_hm_default.get(&i).unwrap().value());
}
}
}

557
third_party/rust/dashmap/src/lock.rs vendored Normal file
View File

@ -0,0 +1,557 @@
use core::cell::UnsafeCell;
use core::default::Default;
use core::fmt;
use core::marker::PhantomData;
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::NonNull;
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
pub struct RwLock<T: ?Sized> {
lock: AtomicUsize,
data: UnsafeCell<T>,
}
const READER: usize = 1 << 2;
const UPGRADED: usize = 1 << 1;
const WRITER: usize = 1;
#[derive(Debug)]
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
}
unsafe impl<'a, T: Send> Send for RwLockReadGuard<'a, T> {}
unsafe impl<'a, T: Sync> Sync for RwLockReadGuard<'a, T> {}
#[derive(Debug)]
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
#[doc(hidden)]
_invariant: PhantomData<&'a mut T>,
}
unsafe impl<'a, T: Send> Send for RwLockWriteGuard<'a, T> {}
unsafe impl<'a, T: Sync> Sync for RwLockWriteGuard<'a, T> {}
#[derive(Debug)]
pub struct RwLockUpgradeableGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: NonNull<T>,
#[doc(hidden)]
_invariant: PhantomData<&'a mut T>,
}
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
impl<T> RwLock<T> {
pub const fn new(user_data: T) -> RwLock<T> {
RwLock {
lock: AtomicUsize::new(0),
data: UnsafeCell::new(user_data),
}
}
pub fn into_inner(self) -> T {
let RwLock { data, .. } = self;
data.into_inner()
}
}
impl<T: ?Sized> RwLock<T> {
pub fn read(&self) -> RwLockReadGuard<T> {
loop {
match self.try_read() {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
let value = self.lock.fetch_add(READER, Ordering::Acquire);
// We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
// This helps reduce writer starvation.
if value & (WRITER | UPGRADED) != 0 {
// Lock is taken, undo.
self.lock.fetch_sub(READER, Ordering::Release);
None
} else {
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
})
}
}
/// # Safety
///
/// This is only safe if the lock is currently locked in read mode and the number of readers is not 0.
pub unsafe fn force_read_decrement(&self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
self.lock.fetch_sub(READER, Ordering::Release);
}
/// # Safety
///
/// The lock must be locked in write mode.
pub unsafe fn force_write_unlock(&self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
}
fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T>> {
if compare_exchange(
&self.lock,
0,
WRITER,
Ordering::Acquire,
Ordering::Relaxed,
strong,
)
.is_ok()
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
_invariant: PhantomData,
})
} else {
None
}
}
pub fn write(&self) -> RwLockWriteGuard<T> {
loop {
match self.try_write_internal(false) {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
self.try_write_internal(true)
}
pub fn upgradeable_read(&self) -> RwLockUpgradeableGuard<T> {
loop {
match self.try_upgradeable_read() {
Some(guard) => return guard,
None => cpu_relax(),
}
}
}
pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradeableGuard<T>> {
if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
Some(RwLockUpgradeableGuard {
lock: &self.lock,
data: unsafe { NonNull::new_unchecked(self.data.get()) },
_invariant: PhantomData,
})
} else {
None
}
}
/// # Safety
/// Write locks may not be used in combination with this method.
pub unsafe fn get(&self) -> &T {
&*self.data.get()
}
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(guard) => write!(f, "RwLock {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T: ?Sized> RwLockUpgradeableGuard<'rwlock, T> {
fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
if compare_exchange(
&self.lock,
UPGRADED,
WRITER,
Ordering::Acquire,
Ordering::Relaxed,
strong,
)
.is_ok()
{
let out = Ok(RwLockWriteGuard {
lock: &self.lock,
data: self.data,
_invariant: PhantomData,
});
mem::forget(self);
out
} else {
Err(self)
}
}
pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T> {
loop {
self = match self.try_upgrade_internal(false) {
Ok(guard) => return guard,
Err(e) => e,
};
cpu_relax();
}
}
pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
self.try_upgrade_internal(true)
}
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
self.lock.fetch_add(READER, Ordering::Acquire);
RwLockReadGuard {
lock: &self.lock,
data: self.data,
}
}
}
impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
self.lock.fetch_add(READER, Ordering::Acquire);
RwLockReadGuard {
lock: &self.lock,
data: self.data,
}
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockUpgradeableGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.data.as_ref() }
}
}
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { self.data.as_mut() }
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
self.lock.fetch_sub(READER, Ordering::Release);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockUpgradeableGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(
self.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
UPGRADED
);
self.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & WRITER, WRITER);
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
}
}
fn compare_exchange(
atomic: &AtomicUsize,
current: usize,
new: usize,
success: Ordering,
failure: Ordering,
strong: bool,
) -> Result<usize, usize> {
if strong {
atomic.compare_exchange(current, new, success, failure)
} else {
atomic.compare_exchange_weak(current, new, success, failure)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
for r in children {
assert!(r.join().is_ok());
}
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_rw_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => panic!("try_write should not succeed while read_guard is in scope"),
}
drop(read_guard);
}
#[test]
fn test_rw_try_read() {
let m = RwLock::new(0);
mem::forget(m.write());
assert!(m.try_read().is_none());
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_force_read_decrement() {
let m = RwLock::new(());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
m.force_read_decrement();
}
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
}
assert!(m.try_write().is_some());
}
#[test]
fn test_force_write_unlock() {
let m = RwLock::new(());
::std::mem::forget(m.write());
assert!(m.try_read().is_none());
unsafe {
m.force_write_unlock();
}
assert!(m.try_read().is_some());
}
#[test]
fn test_upgrade_downgrade() {
let m = RwLock::new(());
{
let _r = m.read();
let upg = m.try_upgradeable_read().unwrap();
assert!(m.try_read().is_none());
assert!(m.try_write().is_none());
assert!(upg.try_upgrade().is_err());
}
{
let w = m.write();
assert!(m.try_upgradeable_read().is_none());
let _r = w.downgrade();
assert!(m.try_upgradeable_read().is_some());
assert!(m.try_read().is_some());
assert!(m.try_write().is_none());
}
{
let _u = m.upgradeable_read();
assert!(m.try_upgradeable_read().is_none());
}
assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
}
}

View File

@ -0,0 +1,198 @@
use super::one::RefMut;
use crate::lock::RwLockWriteGuard;
use crate::util;
use crate::util::SharedValue;
use crate::HashMap;
use core::hash::{BuildHasher, Hash};
use core::mem;
use core::ptr;
use std::collections::hash_map::RandomState;
pub enum Entry<'a, K, V, S = RandomState> {
Occupied(OccupiedEntry<'a, K, V, S>),
Vacant(VacantEntry<'a, K, V, S>),
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Entry<'a, K, V, S> {
/// Apply a function to the stored value if it exists.
pub fn and_modify(self, f: impl FnOnce(&mut V)) -> Self {
match self {
Entry::Occupied(mut entry) => {
f(entry.get_mut());
Entry::Occupied(entry)
}
Entry::Vacant(entry) => Entry::Vacant(entry),
}
}
/// Get the key of the entry.
pub fn key(&self) -> &K {
match *self {
Entry::Occupied(ref entry) => entry.key(),
Entry::Vacant(ref entry) => entry.key(),
}
}
/// Into the key of the entry.
pub fn into_key(self) -> K {
match self {
Entry::Occupied(entry) => entry.into_key(),
Entry::Vacant(entry) => entry.into_key(),
}
}
/// Return a mutable reference to the element if it exists,
/// otherwise insert the default and return a mutable reference to that.
pub fn or_default(self) -> RefMut<'a, K, V, S>
where
V: Default,
{
match self {
Entry::Occupied(entry) => entry.into_ref(),
Entry::Vacant(entry) => entry.insert(V::default()),
}
}
/// Return a mutable reference to the element if it exists,
/// otherwise a provided value and return a mutable reference to that.
pub fn or_insert(self, value: V) -> RefMut<'a, K, V, S> {
match self {
Entry::Occupied(entry) => entry.into_ref(),
Entry::Vacant(entry) => entry.insert(value),
}
}
/// Return a mutable reference to the element if it exists,
/// otherwise insert the result of a provided function and return a mutable reference to that.
pub fn or_insert_with(self, value: impl FnOnce() -> V) -> RefMut<'a, K, V, S> {
match self {
Entry::Occupied(entry) => entry.into_ref(),
Entry::Vacant(entry) => entry.insert(value()),
}
}
pub fn or_try_insert_with<E>(
self,
value: impl FnOnce() -> Result<V, E>,
) -> Result<RefMut<'a, K, V, S>, E> {
match self {
Entry::Occupied(entry) => Ok(entry.into_ref()),
Entry::Vacant(entry) => Ok(entry.insert(value()?)),
}
}
}
pub struct VacantEntry<'a, K, V, S> {
shard: RwLockWriteGuard<'a, HashMap<K, V, S>>,
key: K,
}
unsafe impl<'a, K: Eq + Hash + Send, V: Send, S: BuildHasher> Send for VacantEntry<'a, K, V, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, V: Send + Sync, S: BuildHasher> Sync
for VacantEntry<'a, K, V, S>
{
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> VacantEntry<'a, K, V, S> {
pub(crate) fn new(shard: RwLockWriteGuard<'a, HashMap<K, V, S>>, key: K) -> Self {
Self { shard, key }
}
pub fn insert(mut self, value: V) -> RefMut<'a, K, V, S> {
unsafe {
let c: K = ptr::read(&self.key);
self.shard.insert(self.key, SharedValue::new(value));
let (k, v) = self.shard.get_key_value(&c).unwrap();
let k = util::change_lifetime_const(k);
let v = &mut *v.as_ptr();
let r = RefMut::new(self.shard, k, v);
mem::forget(c);
r
}
}
pub fn into_key(self) -> K {
self.key
}
pub fn key(&self) -> &K {
&self.key
}
}
pub struct OccupiedEntry<'a, K, V, S> {
shard: RwLockWriteGuard<'a, HashMap<K, V, S>>,
elem: (&'a K, &'a mut V),
key: K,
}
unsafe impl<'a, K: Eq + Hash + Send, V: Send, S: BuildHasher> Send for OccupiedEntry<'a, K, V, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, V: Send + Sync, S: BuildHasher> Sync
for OccupiedEntry<'a, K, V, S>
{
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> OccupiedEntry<'a, K, V, S> {
pub(crate) fn new(
shard: RwLockWriteGuard<'a, HashMap<K, V, S>>,
key: K,
elem: (&'a K, &'a mut V),
) -> Self {
Self { shard, elem, key }
}
pub fn get(&self) -> &V {
self.elem.1
}
pub fn get_mut(&mut self) -> &mut V {
self.elem.1
}
pub fn insert(&mut self, value: V) -> V {
mem::replace(self.elem.1, value)
}
pub fn into_ref(self) -> RefMut<'a, K, V, S> {
RefMut::new(self.shard, self.elem.0, self.elem.1)
}
pub fn into_key(self) -> K {
self.key
}
pub fn key(&self) -> &K {
self.elem.0
}
pub fn remove(mut self) -> V {
self.shard.remove(self.elem.0).unwrap().into_inner()
}
pub fn remove_entry(mut self) -> (K, V) {
let (k, v) = self.shard.remove_entry(self.elem.0).unwrap();
(k, v.into_inner())
}
pub fn replace_entry(mut self, value: V) -> (K, V) {
let nk = self.key;
let (k, v) = self.shard.remove_entry(self.elem.0).unwrap();
self.shard.insert(nk, SharedValue::new(value));
(k, v.into_inner())
}
}

View File

@ -0,0 +1,3 @@
pub mod entry;
pub mod multiple;
pub mod one;

View File

@ -0,0 +1,120 @@
use crate::lock::{RwLockReadGuard, RwLockWriteGuard};
use crate::HashMap;
use core::hash::BuildHasher;
use core::hash::Hash;
use core::ops::{Deref, DerefMut};
use std::collections::hash_map::RandomState;
use std::sync::Arc;
// -- Shared
pub struct RefMulti<'a, K, V, S = RandomState> {
_guard: Arc<RwLockReadGuard<'a, HashMap<K, V, S>>>,
k: &'a K,
v: &'a V,
}
unsafe impl<'a, K: Eq + Hash + Send, V: Send, S: BuildHasher> Send for RefMulti<'a, K, V, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, V: Send + Sync, S: BuildHasher> Sync
for RefMulti<'a, K, V, S>
{
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMulti<'a, K, V, S> {
pub(crate) fn new(
guard: Arc<RwLockReadGuard<'a, HashMap<K, V, S>>>,
k: &'a K,
v: &'a V,
) -> Self {
Self {
_guard: guard,
k,
v,
}
}
pub fn key(&self) -> &K {
self.k
}
pub fn value(&self) -> &V {
self.v
}
pub fn pair(&self) -> (&K, &V) {
(self.k, self.v)
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for RefMulti<'a, K, V, S> {
type Target = V;
fn deref(&self) -> &V {
self.value()
}
}
// --
// -- Unique
pub struct RefMutMulti<'a, K, V, S = RandomState> {
_guard: Arc<RwLockWriteGuard<'a, HashMap<K, V, S>>>,
k: &'a K,
v: &'a mut V,
}
unsafe impl<'a, K: Eq + Hash + Send, V: Send, S: BuildHasher> Send for RefMutMulti<'a, K, V, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, V: Send + Sync, S: BuildHasher> Sync
for RefMutMulti<'a, K, V, S>
{
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMutMulti<'a, K, V, S> {
pub(crate) fn new(
guard: Arc<RwLockWriteGuard<'a, HashMap<K, V, S>>>,
k: &'a K,
v: &'a mut V,
) -> Self {
Self {
_guard: guard,
k,
v,
}
}
pub fn key(&self) -> &K {
self.k
}
pub fn value(&self) -> &V {
self.v
}
pub fn value_mut(&mut self) -> &mut V {
self.v
}
pub fn pair(&self) -> (&K, &V) {
(self.k, self.v)
}
pub fn pair_mut(&mut self) -> (&K, &mut V) {
(self.k, self.v)
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for RefMutMulti<'a, K, V, S> {
type Target = V;
fn deref(&self) -> &V {
self.value()
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> DerefMut for RefMutMulti<'a, K, V, S> {
fn deref_mut(&mut self) -> &mut V {
self.value_mut()
}
}
// --

View File

@ -0,0 +1,114 @@
use crate::lock::{RwLockReadGuard, RwLockWriteGuard};
use crate::HashMap;
use core::hash::{BuildHasher, Hash};
use core::ops::{Deref, DerefMut};
use std::collections::hash_map::RandomState;
// -- Shared
pub struct Ref<'a, K, V, S = RandomState> {
_guard: RwLockReadGuard<'a, HashMap<K, V, S>>,
k: &'a K,
v: &'a V,
}
unsafe impl<'a, K: Eq + Hash + Send, V: Send, S: BuildHasher> Send for Ref<'a, K, V, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, V: Send + Sync, S: BuildHasher> Sync
for Ref<'a, K, V, S>
{
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> {
pub(crate) fn new(guard: RwLockReadGuard<'a, HashMap<K, V, S>>, k: &'a K, v: &'a V) -> Self {
Self {
_guard: guard,
k,
v,
}
}
pub fn key(&self) -> &K {
self.k
}
pub fn value(&self) -> &V {
self.v
}
pub fn pair(&self) -> (&K, &V) {
(self.k, self.v)
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for Ref<'a, K, V, S> {
type Target = V;
fn deref(&self) -> &V {
self.value()
}
}
// --
// -- Unique
pub struct RefMut<'a, K, V, S = RandomState> {
guard: RwLockWriteGuard<'a, HashMap<K, V, S>>,
k: &'a K,
v: &'a mut V,
}
unsafe impl<'a, K: Eq + Hash + Send, V: Send, S: BuildHasher> Send for RefMut<'a, K, V, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, V: Send + Sync, S: BuildHasher> Sync
for RefMut<'a, K, V, S>
{
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> {
pub(crate) fn new(
guard: RwLockWriteGuard<'a, HashMap<K, V, S>>,
k: &'a K,
v: &'a mut V,
) -> Self {
Self { guard, k, v }
}
pub fn key(&self) -> &K {
self.k
}
pub fn value(&self) -> &V {
self.v
}
pub fn value_mut(&mut self) -> &mut V {
self.v
}
pub fn pair(&self) -> (&K, &V) {
(self.k, self.v)
}
pub fn pair_mut(&mut self) -> (&K, &mut V) {
(self.k, self.v)
}
pub fn downgrade(self) -> Ref<'a, K, V, S> {
Ref::new(self.guard.downgrade(), self.k, self.v)
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for RefMut<'a, K, V, S> {
type Target = V;
fn deref(&self) -> &V {
self.value()
}
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> DerefMut for RefMut<'a, K, V, S> {
fn deref_mut(&mut self) -> &mut V {
self.value_mut()
}
}
// --

View File

@ -0,0 +1,221 @@
use crate::lock::RwLock;
use crate::mapref::multiple::{RefMulti, RefMutMulti};
use crate::util;
use crate::{DashMap, HashMap};
use core::hash::{BuildHasher, Hash};
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
use std::collections::hash_map::RandomState;
use std::sync::Arc;
impl<K, V, S> ParallelExtend<(K, V)> for DashMap<K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
(&*self).par_extend(par_iter);
}
}
// Since we don't actually need mutability, we can implement this on a
// reference, similar to `io::Write for &File`.
impl<K, V, S> ParallelExtend<(K, V)> for &'_ DashMap<K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
let &mut map = self;
par_iter.into_par_iter().for_each(move |(key, value)| {
map.insert(key, value);
});
}
}
impl<K, V, S> FromParallelIterator<(K, V)> for DashMap<K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + Default + BuildHasher,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = (K, V)>,
{
let map = Self::default();
(&map).par_extend(par_iter);
map
}
}
// Implementation note: while the shards will iterate in parallel, we flatten
// sequentially within each shard (`flat_map_iter`), because the standard
// `HashMap` only implements `ParallelIterator` by collecting to a `Vec` first.
// There is real parallel support in the `hashbrown/rayon` feature, but we don't
// always use that map.
impl<K, V, S> IntoParallelIterator for DashMap<K, V, S>
where
K: Send + Eq + Hash,
V: Send,
S: Send + Clone + BuildHasher,
{
type Iter = OwningIter<K, V, S>;
type Item = (K, V);
fn into_par_iter(self) -> Self::Iter {
OwningIter {
shards: self.shards,
}
}
}
pub struct OwningIter<K, V, S = RandomState> {
shards: Box<[RwLock<HashMap<K, V, S>>]>,
}
impl<K, V, S> ParallelIterator for OwningIter<K, V, S>
where
K: Send + Eq + Hash,
V: Send,
S: Send + Clone + BuildHasher,
{
type Item = (K, V);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
Vec::from(self.shards)
.into_par_iter()
.flat_map_iter(|shard| {
shard
.into_inner()
.into_iter()
.map(|(k, v)| (k, v.into_inner()))
})
.drive_unindexed(consumer)
}
}
// This impl also enables `IntoParallelRefIterator::par_iter`
impl<'a, K, V, S> IntoParallelIterator for &'a DashMap<K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
type Iter = Iter<'a, K, V, S>;
type Item = RefMulti<'a, K, V, S>;
fn into_par_iter(self) -> Self::Iter {
Iter {
shards: &self.shards,
}
}
}
pub struct Iter<'a, K, V, S = RandomState> {
shards: &'a [RwLock<HashMap<K, V, S>>],
}
impl<'a, K, V, S> ParallelIterator for Iter<'a, K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
type Item = RefMulti<'a, K, V, S>;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.shards
.into_par_iter()
.flat_map_iter(|shard| {
let guard = shard.read();
let sref: &'a HashMap<K, V, S> = unsafe { util::change_lifetime_const(&*guard) };
let guard = Arc::new(guard);
sref.iter().map(move |(k, v)| {
let guard = Arc::clone(&guard);
RefMulti::new(guard, k, v.get())
})
})
.drive_unindexed(consumer)
}
}
// This impl also enables `IntoParallelRefMutIterator::par_iter_mut`
impl<'a, K, V, S> IntoParallelIterator for &'a mut DashMap<K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
type Iter = IterMut<'a, K, V, S>;
type Item = RefMutMulti<'a, K, V, S>;
fn into_par_iter(self) -> Self::Iter {
IterMut {
shards: &self.shards,
}
}
}
impl<'a, K, V, S> DashMap<K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
// Unlike `IntoParallelRefMutIterator::par_iter_mut`, we only _need_ `&self`.
pub fn par_iter_mut(&self) -> IterMut<'_, K, V, S> {
IterMut {
shards: &self.shards,
}
}
}
pub struct IterMut<'a, K, V, S = RandomState> {
shards: &'a [RwLock<HashMap<K, V, S>>],
}
impl<'a, K, V, S> ParallelIterator for IterMut<'a, K, V, S>
where
K: Send + Sync + Eq + Hash,
V: Send + Sync,
S: Send + Sync + Clone + BuildHasher,
{
type Item = RefMutMulti<'a, K, V, S>;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.shards
.into_par_iter()
.flat_map_iter(|shard| {
let mut guard = shard.write();
let sref: &'a mut HashMap<K, V, S> =
unsafe { util::change_lifetime_mut(&mut *guard) };
let guard = Arc::new(guard);
sref.iter_mut().map(move |(k, v)| {
let guard = Arc::clone(&guard);
RefMutMulti::new(guard, k, v.get_mut())
})
})
.drive_unindexed(consumer)
}
}

View File

@ -0,0 +1,121 @@
use crate::setref::multiple::RefMulti;
use crate::DashSet;
use core::hash::{BuildHasher, Hash};
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
use std::collections::hash_map::RandomState;
impl<K, S> ParallelExtend<K> for DashSet<K, S>
where
K: Send + Sync + Eq + Hash,
S: Send + Sync + Clone + BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = K>,
{
(&*self).par_extend(par_iter);
}
}
// Since we don't actually need mutability, we can implement this on a
// reference, similar to `io::Write for &File`.
impl<K, S> ParallelExtend<K> for &'_ DashSet<K, S>
where
K: Send + Sync + Eq + Hash,
S: Send + Sync + Clone + BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = K>,
{
let &mut set = self;
par_iter.into_par_iter().for_each(move |key| {
set.insert(key);
});
}
}
impl<K, S> FromParallelIterator<K> for DashSet<K, S>
where
K: Send + Sync + Eq + Hash,
S: Send + Sync + Clone + Default + BuildHasher,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = K>,
{
let set = Self::default();
(&set).par_extend(par_iter);
set
}
}
impl<K, S> IntoParallelIterator for DashSet<K, S>
where
K: Send + Eq + Hash,
S: Send + Clone + BuildHasher,
{
type Iter = OwningIter<K, S>;
type Item = K;
fn into_par_iter(self) -> Self::Iter {
OwningIter {
inner: self.inner.into_par_iter(),
}
}
}
pub struct OwningIter<K, S = RandomState> {
inner: super::map::OwningIter<K, (), S>,
}
impl<K, S> ParallelIterator for OwningIter<K, S>
where
K: Send + Eq + Hash,
S: Send + Clone + BuildHasher,
{
type Item = K;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.map(|(k, _)| k).drive_unindexed(consumer)
}
}
// This impl also enables `IntoParallelRefIterator::par_iter`
impl<'a, K, S> IntoParallelIterator for &'a DashSet<K, S>
where
K: Send + Sync + Eq + Hash,
S: Send + Sync + Clone + BuildHasher,
{
type Iter = Iter<'a, K, S>;
type Item = RefMulti<'a, K, S>;
fn into_par_iter(self) -> Self::Iter {
Iter {
inner: (&self.inner).into_par_iter(),
}
}
}
pub struct Iter<'a, K, S = RandomState> {
inner: super::map::Iter<'a, K, (), S>,
}
impl<'a, K, S> ParallelIterator for Iter<'a, K, S>
where
K: Send + Sync + Eq + Hash,
S: Send + Sync + Clone + BuildHasher,
{
type Item = RefMulti<'a, K, S>;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.map(RefMulti::new).drive_unindexed(consumer)
}
}

View File

@ -0,0 +1,243 @@
use crate::t::Map;
use crate::{DashMap, HashMap};
use core::borrow::Borrow;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use std::collections::hash_map::RandomState;
/// A read-only view into a `DashMap`. Allows to obtain raw references to the stored values.
pub struct ReadOnlyView<K, V, S = RandomState> {
map: DashMap<K, V, S>,
}
impl<K: Eq + Hash + Clone, V: Clone, S: Clone> Clone for ReadOnlyView<K, V, S> {
fn clone(&self) -> Self {
Self {
map: self.map.clone(),
}
}
}
impl<K: Eq + Hash + fmt::Debug, V: fmt::Debug, S: BuildHasher + Clone> fmt::Debug
for ReadOnlyView<K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.fmt(f)
}
}
impl<K, V, S> ReadOnlyView<K, V, S> {
pub(crate) fn new(map: DashMap<K, V, S>) -> Self {
Self { map }
}
/// Consumes this `ReadOnlyView`, returning the underlying `DashMap`.
pub fn into_inner(self) -> DashMap<K, V, S> {
self.map
}
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> ReadOnlyView<K, V, S> {
/// Returns the number of elements in the map.
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the map contains no elements.
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Returns the number of elements the map can hold without reallocating.
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Returns `true` if the map contains a value for the specified key.
pub fn contains_key<Q>(&'a self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.map.hash_usize(&key);
let idx = self.map.determine_shard(hash);
let shard = unsafe { self.map._get_read_shard(idx) };
shard.contains_key(key)
}
/// Returns a reference to the value corresponding to the key.
pub fn get<Q>(&'a self, key: &Q) -> Option<&'a V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.map.hash_usize(&key);
let idx = self.map.determine_shard(hash);
let shard = unsafe { self.map._get_read_shard(idx) };
shard.get(key).map(|v| v.get())
}
/// Returns the key-value pair corresponding to the supplied key.
pub fn get_key_value<Q>(&'a self, key: &Q) -> Option<(&'a K, &'a V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.map.hash_usize(&key);
let idx = self.map.determine_shard(hash);
let shard = unsafe { self.map._get_read_shard(idx) };
shard.get_key_value(key).map(|(k, v)| (k, v.get()))
}
fn shard_read_iter(&'a self) -> impl Iterator<Item = &'a HashMap<K, V, S>> + 'a {
(0..self.map._shard_count())
.map(move |shard_i| unsafe { self.map._get_read_shard(shard_i) })
}
/// An iterator visiting all key-value pairs in arbitrary order. The iterator element type is `(&'a K, &'a V)`.
pub fn iter(&'a self) -> impl Iterator<Item = (&'a K, &'a V)> + 'a {
self.shard_read_iter()
.flat_map(|shard| shard.iter())
.map(|(k, v)| (k, v.get()))
}
/// An iterator visiting all keys in arbitrary order. The iterator element type is `&'a K`.
pub fn keys(&'a self) -> impl Iterator<Item = &'a K> + 'a {
self.shard_read_iter().flat_map(|shard| shard.keys())
}
/// An iterator visiting all values in arbitrary order. The iterator element type is `&'a V`.
pub fn values(&'a self) -> impl Iterator<Item = &'a V> + 'a {
self.shard_read_iter()
.flat_map(|shard| shard.values())
.map(|v| v.get())
}
}
#[cfg(test)]
mod tests {
use crate::DashMap;
fn construct_sample_map() -> DashMap<i32, String> {
let map = DashMap::new();
map.insert(1, "one".to_string());
map.insert(10, "ten".to_string());
map.insert(27, "twenty seven".to_string());
map.insert(45, "forty five".to_string());
map
}
#[test]
fn test_properties() {
let map = construct_sample_map();
let view = map.clone().into_read_only();
assert_eq!(view.is_empty(), map.is_empty());
assert_eq!(view.len(), map.len());
assert_eq!(view.capacity(), map.capacity());
let new_map = view.into_inner();
assert_eq!(new_map.is_empty(), map.is_empty());
assert_eq!(new_map.len(), map.len());
assert_eq!(new_map.capacity(), map.capacity());
}
#[test]
fn test_get() {
let map = construct_sample_map();
let view = map.clone().into_read_only();
for key in map.iter().map(|entry| *entry.key()) {
assert!(view.contains_key(&key));
let map_entry = map.get(&key).unwrap();
assert_eq!(view.get(&key).unwrap(), map_entry.value());
let key_value: (&i32, &String) = view.get_key_value(&key).unwrap();
assert_eq!(key_value.0, map_entry.key());
assert_eq!(key_value.1, map_entry.value());
}
}
#[test]
fn test_iters() {
let map = construct_sample_map();
let view = map.clone().into_read_only();
let mut visited_items = Vec::new();
for (key, value) in view.iter() {
map.contains_key(key);
let map_entry = map.get(&key).unwrap();
assert_eq!(key, map_entry.key());
assert_eq!(value, map_entry.value());
visited_items.push((key, value));
}
let mut visited_keys = Vec::new();
for key in view.keys() {
map.contains_key(key);
let map_entry = map.get(&key).unwrap();
assert_eq!(key, map_entry.key());
assert_eq!(view.get(key).unwrap(), map_entry.value());
visited_keys.push(key);
}
let mut visited_values = Vec::new();
for value in view.values() {
visited_values.push(value);
}
for entry in map.iter() {
let key = entry.key();
let value = entry.value();
assert!(visited_keys.contains(&key));
assert!(visited_values.contains(&value));
assert!(visited_items.contains(&(key, value)));
}
}
}

148
third_party/rust/dashmap/src/serde.rs vendored Normal file
View File

@ -0,0 +1,148 @@
use crate::{DashMap, DashSet};
use core::fmt;
use core::hash::Hash;
use core::marker::PhantomData;
use serde::de::{Deserialize, MapAccess, SeqAccess, Visitor};
use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer};
use serde::Deserializer;
pub struct DashMapVisitor<K, V> {
marker: PhantomData<fn() -> DashMap<K, V>>,
}
impl<K, V> DashMapVisitor<K, V>
where
K: Eq + Hash,
{
fn new() -> Self {
DashMapVisitor {
marker: PhantomData,
}
}
}
impl<'de, K, V> Visitor<'de> for DashMapVisitor<K, V>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
{
type Value = DashMap<K, V>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a DashMap")
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let map = DashMap::with_capacity(access.size_hint().unwrap_or(0));
while let Some((key, value)) = access.next_entry()? {
map.insert(key, value);
}
Ok(map)
}
}
impl<'de, K, V> Deserialize<'de> for DashMap<K, V>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(DashMapVisitor::<K, V>::new())
}
}
impl<K, V> Serialize for DashMap<K, V>
where
K: Serialize + Eq + Hash,
V: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
for ref_multi in self.iter() {
map.serialize_entry(ref_multi.key(), ref_multi.value())?;
}
map.end()
}
}
pub struct DashSetVisitor<K> {
marker: PhantomData<fn() -> DashSet<K>>,
}
impl<K> DashSetVisitor<K>
where
K: Eq + Hash,
{
fn new() -> Self {
DashSetVisitor {
marker: PhantomData,
}
}
}
impl<'de, K> Visitor<'de> for DashSetVisitor<K>
where
K: Deserialize<'de> + Eq + Hash,
{
type Value = DashSet<K>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a DashSet")
}
fn visit_seq<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
let map = DashSet::with_capacity(access.size_hint().unwrap_or(0));
while let Some(key) = access.next_element()? {
map.insert(key);
}
Ok(map)
}
}
impl<'de, K> Deserialize<'de> for DashSet<K>
where
K: Deserialize<'de> + Eq + Hash,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(DashSetVisitor::<K>::new())
}
}
impl<K> Serialize for DashSet<K>
where
K: Serialize + Eq + Hash,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for ref_multi in self.iter() {
seq.serialize_element(ref_multi.key())?;
}
seq.end()
}
}

458
third_party/rust/dashmap/src/set.rs vendored Normal file
View File

@ -0,0 +1,458 @@
use crate::iter_set::{Iter, OwningIter};
#[cfg(feature = "raw-api")]
use crate::lock::RwLock;
use crate::setref::one::Ref;
use crate::DashMap;
#[cfg(feature = "raw-api")]
use crate::HashMap;
use cfg_if::cfg_if;
use core::borrow::Borrow;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::iter::FromIterator;
use std::collections::hash_map::RandomState;
/// DashSet is a thin wrapper around [`DashMap`] using `()` as the value type. It uses
/// methods and types which are more convenient to work with on a set.
///
/// [`DashMap`]: struct.DashMap.html
pub struct DashSet<K, S = RandomState> {
pub(crate) inner: DashMap<K, (), S>,
}
impl<K: Eq + Hash + fmt::Debug, S: BuildHasher + Clone> fmt::Debug for DashSet<K, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl<K: Eq + Hash + Clone, S: Clone> Clone for DashSet<K, S> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
fn clone_from(&mut self, source: &Self) {
self.inner.clone_from(&source.inner)
}
}
impl<K, S> Default for DashSet<K, S>
where
K: Eq + Hash,
S: Default + BuildHasher + Clone,
{
fn default() -> Self {
Self::with_hasher(Default::default())
}
}
impl<'a, K: 'a + Eq + Hash> DashSet<K, RandomState> {
/// Creates a new DashSet with a capacity of 0.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let games = DashSet::new();
/// games.insert("Veloren");
/// ```
pub fn new() -> Self {
Self::with_hasher(RandomState::default())
}
/// Creates a new DashMap with a specified starting capacity.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let numbers = DashSet::with_capacity(2);
/// numbers.insert(2);
/// numbers.insert(8);
/// ```
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_and_hasher(capacity, RandomState::default())
}
}
impl<'a, K: 'a + Eq + Hash, S: BuildHasher + Clone> DashSet<K, S> {
/// Creates a new DashMap with a capacity of 0 and the provided hasher.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let games = DashSet::with_hasher(s);
/// games.insert("Veloren");
/// ```
pub fn with_hasher(hasher: S) -> Self {
Self::with_capacity_and_hasher(0, hasher)
}
/// Creates a new DashMap with a specified starting capacity and hasher.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let numbers = DashSet::with_capacity_and_hasher(2, s);
/// numbers.insert(2);
/// numbers.insert(8);
/// ```
pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
Self {
inner: DashMap::with_capacity_and_hasher(capacity, hasher),
}
}
/// Hash a given item to produce a usize.
/// Uses the provided or default HashBuilder.
pub fn hash_usize<T: Hash>(&self, item: &T) -> usize {
self.inner.hash_usize(item)
}
cfg_if! {
if #[cfg(feature = "raw-api")] {
/// Allows you to peek at the inner shards that store your data.
/// You should probably not use this unless you know what you are doing.
///
/// Requires the `raw-api` feature to be enabled.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let set = DashSet::<()>::new();
/// println!("Amount of shards: {}", set.shards().len());
/// ```
pub fn shards(&self) -> &[RwLock<HashMap<K, (), S>>] {
self.inner.shards()
}
}
}
cfg_if! {
if #[cfg(feature = "raw-api")] {
/// Finds which shard a certain key is stored in.
/// You should probably not use this unless you know what you are doing.
/// Note that shard selection is dependent on the default or provided HashBuilder.
///
/// Requires the `raw-api` feature to be enabled.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let set = DashSet::new();
/// set.insert("coca-cola");
/// println!("coca-cola is stored in shard: {}", set.determine_map("coca-cola"));
/// ```
pub fn determine_map<Q>(&self, key: &Q) -> usize
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.inner.determine_map(key)
}
}
}
cfg_if! {
if #[cfg(feature = "raw-api")] {
/// Finds which shard a certain hash is stored in.
///
/// Requires the `raw-api` feature to be enabled.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let set: DashSet<i32> = DashSet::new();
/// let key = "key";
/// let hash = set.hash_usize(&key);
/// println!("hash is stored in shard: {}", set.determine_shard(hash));
/// ```
pub fn determine_shard(&self, hash: usize) -> usize {
self.inner.determine_shard(hash)
}
}
}
/// Inserts a key into the set. Returns true if the key was not already in the set.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let set = DashSet::new();
/// set.insert("I am the key!");
/// ```
pub fn insert(&self, key: K) -> bool {
self.inner.insert(key, ()).is_none()
}
/// Removes an entry from the map, returning the key if it existed in the map.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let soccer_team = DashSet::new();
/// soccer_team.insert("Jack");
/// assert_eq!(soccer_team.remove("Jack").unwrap(), "Jack");
/// ```
pub fn remove<Q>(&self, key: &Q) -> Option<K>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.inner.remove(key).map(|(k, _)| k)
}
/// Removes an entry from the set, returning the key
/// if the entry existed and the provided conditional function returned true.
///
/// ```
/// use dashmap::DashSet;
///
/// let soccer_team = DashSet::new();
/// soccer_team.insert("Sam");
/// soccer_team.remove_if("Sam", |player| player.starts_with("Ja"));
/// assert!(soccer_team.contains("Sam"));
/// ```
/// ```
/// use dashmap::DashSet;
///
/// let soccer_team = DashSet::new();
/// soccer_team.insert("Sam");
/// soccer_team.remove_if("Jacob", |player| player.starts_with("Ja"));
/// assert!(!soccer_team.contains("Jacob"));
/// ```
pub fn remove_if<Q>(&self, key: &Q, f: impl FnOnce(&K) -> bool) -> Option<K>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
// TODO: Don't create another closure around f
self.inner.remove_if(key, |k, _| f(k)).map(|(k, _)| k)
}
/// Creates an iterator over a DashMap yielding immutable references.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let words = DashSet::new();
/// words.insert("hello");
/// assert_eq!(words.iter().count(), 1);
/// ```
pub fn iter(&'a self) -> Iter<'a, K, S, DashMap<K, (), S>> {
let iter = self.inner.iter();
Iter::new(iter)
}
/// Get a reference to an entry in the set
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let youtubers = DashSet::new();
/// youtubers.insert("Bosnian Bill");
/// assert_eq!(*youtubers.get("Bosnian Bill").unwrap(), "Bosnian Bill");
/// ```
pub fn get<Q>(&'a self, key: &Q) -> Option<Ref<'a, K, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.inner.get(key).map(Ref::new)
}
/// Remove excess capacity to reduce memory usage.
pub fn shrink_to_fit(&self) {
self.inner.shrink_to_fit()
}
/// Retain elements that whose predicates return true
/// and discard elements whose predicates return false.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let people = DashSet::new();
/// people.insert("Albin");
/// people.insert("Jones");
/// people.insert("Charlie");
/// people.retain(|name| name.contains('i'));
/// assert_eq!(people.len(), 2);
/// ```
pub fn retain(&self, mut f: impl FnMut(&K) -> bool) {
self.inner.retain(|k, _| f(k))
}
/// Fetches the total number of keys stored in the set.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let people = DashSet::new();
/// people.insert("Albin");
/// people.insert("Jones");
/// people.insert("Charlie");
/// assert_eq!(people.len(), 3);
/// ```
pub fn len(&self) -> usize {
self.inner.len()
}
/// Checks if the set is empty or not.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let map = DashSet::<()>::new();
/// assert!(map.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Removes all keys in the set.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let people = DashSet::new();
/// people.insert("Albin");
/// assert!(!people.is_empty());
/// people.clear();
/// assert!(people.is_empty());
/// ```
pub fn clear(&self) {
self.inner.clear()
}
/// Returns how many keys the set can store without reallocating.
pub fn capacity(&self) -> usize {
self.inner.capacity()
}
/// Checks if the set contains a specific key.
///
/// # Examples
///
/// ```
/// use dashmap::DashSet;
///
/// let people = DashSet::new();
/// people.insert("Dakota Cherries");
/// assert!(people.contains("Dakota Cherries"));
/// ```
pub fn contains<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.inner.contains_key(key)
}
}
impl<'a, K: Eq + Hash, S: BuildHasher + Clone> IntoIterator for DashSet<K, S> {
type Item = K;
type IntoIter = OwningIter<K, S>;
fn into_iter(self) -> Self::IntoIter {
OwningIter::new(self.inner.into_iter())
}
}
impl<K: Eq + Hash, S: BuildHasher + Clone> Extend<K> for DashSet<K, S> {
fn extend<T: IntoIterator<Item = K>>(&mut self, iter: T) {
let iter = iter.into_iter().map(|k| (k, ()));
self.inner.extend(iter)
}
}
impl<K: Eq + Hash> FromIterator<K> for DashSet<K, RandomState> {
fn from_iter<I: IntoIterator<Item = K>>(iter: I) -> Self {
let mut set = DashSet::new();
set.extend(iter);
set
}
}
#[cfg(test)]
mod tests {
use crate::DashSet;
#[test]
fn test_basic() {
let set = DashSet::new();
set.insert(0);
assert_eq!(set.get(&0).as_deref(), Some(&0));
}
#[test]
fn test_default() {
let set: DashSet<u32> = DashSet::default();
set.insert(0);
assert_eq!(set.get(&0).as_deref(), Some(&0));
}
#[test]
fn test_multiple_hashes() {
let set = DashSet::<u32>::default();
for i in 0..100 {
assert!(set.insert(i));
}
for i in 0..100 {
assert!(!set.insert(i));
}
for i in 0..100 {
assert_eq!(Some(i), set.remove(&i));
}
for i in 0..100 {
assert_eq!(None, set.remove(&i));
}
}
}

View File

@ -0,0 +1,2 @@
pub mod multiple;
pub mod one;

View File

@ -0,0 +1,25 @@
use crate::mapref;
use core::hash::{BuildHasher, Hash};
use core::ops::Deref;
use std::collections::hash_map::RandomState;
pub struct RefMulti<'a, K, S = RandomState> {
inner: mapref::multiple::RefMulti<'a, K, (), S>,
}
impl<'a, K: Eq + Hash, S: BuildHasher> RefMulti<'a, K, S> {
pub(crate) fn new(inner: mapref::multiple::RefMulti<'a, K, (), S>) -> Self {
Self { inner }
}
pub fn key(&self) -> &K {
self.inner.key()
}
}
impl<'a, K: Eq + Hash, S: BuildHasher> Deref for RefMulti<'a, K, S> {
type Target = K;
fn deref(&self) -> &K {
self.key()
}
}

View File

@ -0,0 +1,29 @@
use crate::mapref;
use core::hash::{BuildHasher, Hash};
use core::ops::Deref;
use std::collections::hash_map::RandomState;
pub struct Ref<'a, K, S = RandomState> {
inner: mapref::one::Ref<'a, K, (), S>,
}
unsafe impl<'a, K: Eq + Hash + Send, S: BuildHasher> Send for Ref<'a, K, S> {}
unsafe impl<'a, K: Eq + Hash + Send + Sync, S: BuildHasher> Sync for Ref<'a, K, S> {}
impl<'a, K: Eq + Hash, S: BuildHasher> Ref<'a, K, S> {
pub(crate) fn new(inner: mapref::one::Ref<'a, K, (), S>) -> Self {
Self { inner }
}
pub fn key(&self) -> &K {
self.inner.key()
}
}
impl<'a, K: Eq + Hash, S: BuildHasher> Deref for Ref<'a, K, S> {
type Target = K;
fn deref(&self) -> &K {
self.key()
}
}

95
third_party/rust/dashmap/src/t.rs vendored Normal file
View File

@ -0,0 +1,95 @@
//! Central map trait to ease modifications and extensions down the road.
use crate::iter::{Iter, IterMut};
use crate::lock::{RwLockReadGuard, RwLockWriteGuard};
use crate::mapref::entry::Entry;
use crate::mapref::one::{Ref, RefMut};
use crate::HashMap;
use core::borrow::Borrow;
use core::hash::{BuildHasher, Hash};
/// Implementation detail that is exposed due to generic constraints in public types.
pub trait Map<'a, K: 'a + Eq + Hash, V: 'a, S: 'a + Clone + BuildHasher> {
fn _shard_count(&self) -> usize;
/// # Safety
///
/// The index must not be out of bounds.
unsafe fn _get_read_shard(&'a self, i: usize) -> &'a HashMap<K, V, S>;
/// # Safety
///
/// The index must not be out of bounds.
unsafe fn _yield_read_shard(&'a self, i: usize) -> RwLockReadGuard<'a, HashMap<K, V, S>>;
/// # Safety
///
/// The index must not be out of bounds.
unsafe fn _yield_write_shard(&'a self, i: usize) -> RwLockWriteGuard<'a, HashMap<K, V, S>>;
fn _insert(&self, key: K, value: V) -> Option<V>;
fn _remove<Q>(&self, key: &Q) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized;
fn _remove_if<Q>(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized;
fn _iter(&'a self) -> Iter<'a, K, V, S, Self>
where
Self: Sized;
fn _iter_mut(&'a self) -> IterMut<'a, K, V, S, Self>
where
Self: Sized;
fn _get<Q>(&'a self, key: &Q) -> Option<Ref<'a, K, V, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized;
fn _get_mut<Q>(&'a self, key: &Q) -> Option<RefMut<'a, K, V, S>>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized;
fn _shrink_to_fit(&self);
fn _retain(&self, f: impl FnMut(&K, &mut V) -> bool);
fn _len(&self) -> usize;
fn _capacity(&self) -> usize;
fn _alter<Q>(&self, key: &Q, f: impl FnOnce(&K, V) -> V)
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized;
fn _alter_all(&self, f: impl FnMut(&K, V) -> V);
fn _entry(&'a self, key: K) -> Entry<'a, K, V, S>;
fn _hasher(&self) -> S;
// provided
fn _clear(&self) {
self._retain(|_, _| false)
}
fn _contains_key<Q>(&'a self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self._get(key).is_some()
}
fn _is_empty(&self) -> bool {
self._len() == 0
}
}

102
third_party/rust/dashmap/src/util.rs vendored Normal file
View File

@ -0,0 +1,102 @@
//! This module is full of hackery and dark magic.
//! Either spend a day fixing it and quietly submit a PR or don't mention it to anybody.
use core::cell::UnsafeCell;
use core::{mem, ptr};
pub const fn ptr_size_bits() -> usize {
mem::size_of::<usize>() * 8
}
pub fn map_in_place_2<T, U, F: FnOnce(U, T) -> T>((k, v): (U, &mut T), f: F) {
unsafe {
// # Safety
//
// If the closure panics, we must abort otherwise we could double drop `T`
let _promote_panic_to_abort = AbortOnPanic;
ptr::write(v, f(k, ptr::read(v)));
}
}
/// # Safety
///
/// Requires that you ensure the reference does not become invalid.
/// The object has to outlive the reference.
pub unsafe fn change_lifetime_const<'a, 'b, T>(x: &'a T) -> &'b T {
&*(x as *const T)
}
/// # Safety
///
/// Requires that you ensure the reference does not become invalid.
/// The object has to outlive the reference.
pub unsafe fn change_lifetime_mut<'a, 'b, T>(x: &'a mut T) -> &'b mut T {
&mut *(x as *mut T)
}
/// A simple wrapper around `T`
///
/// This is to prevent UB when using `HashMap::get_key_value`, because
/// `HashMap` doesn't expose an api to get the key and value, where
/// the value is a `&mut T`.
///
/// See [#10](https://github.com/xacrimon/dashmap/issues/10) for details
///
/// This type is meant to be an implementation detail, but must be exposed due to the `Dashmap::shards`
#[repr(transparent)]
pub struct SharedValue<T> {
value: UnsafeCell<T>,
}
impl<T: Clone> Clone for SharedValue<T> {
fn clone(&self) -> Self {
let inner = self.get().clone();
Self {
value: UnsafeCell::new(inner),
}
}
}
unsafe impl<T: Send> Send for SharedValue<T> {}
unsafe impl<T: Sync> Sync for SharedValue<T> {}
impl<T> SharedValue<T> {
/// Create a new `SharedValue<T>`
pub const fn new(value: T) -> Self {
Self {
value: UnsafeCell::new(value),
}
}
/// Get a shared reference to `T`
pub fn get(&self) -> &T {
unsafe { &*self.value.get() }
}
/// Get an unique reference to `T`
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.value.get() }
}
/// Unwraps the value
pub fn into_inner(self) -> T {
self.value.into_inner()
}
/// Get a mutable raw pointer to the underlying value
pub(crate) fn as_ptr(&self) -> *mut T {
self.value.get()
}
}
struct AbortOnPanic;
impl Drop for AbortOnPanic {
fn drop(&mut self) {
if std::thread::panicking() {
std::process::abort()
}
}
}

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "adler"
version = "1.0.2"
@ -52,9 +54,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "cc"
version = "1.0.68"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2"
[[package]]
name = "cfg-if"
@ -97,9 +99,9 @@ dependencies = [
[[package]]
name = "env_logger"
version = "0.7.1"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
dependencies = [
"atty",
"humantime",
@ -152,7 +154,7 @@ dependencies = [
[[package]]
name = "glean-core"
version = "39.0.0"
version = "40.0.0"
dependencies = [
"bincode",
"chrono",
@ -174,21 +176,18 @@ dependencies = [
[[package]]
name = "hermit-abi"
version = "0.1.18"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "humantime"
version = "1.3.0"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
dependencies = [
"quick-error",
]
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "id-arena"
@ -230,9 +229,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.95"
version = "0.2.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36"
checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
[[package]]
name = "lmdb-rkv"
@ -319,9 +318,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.7.2"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
[[package]]
name = "ordered-float"
@ -377,19 +376,13 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro2"
version = "1.0.27"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quick-error"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]]
name = "quote"
version = "1.0.9"
@ -401,9 +394,9 @@ dependencies = [
[[package]]
name = "rand"
version = "0.8.3"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
dependencies = [
"libc",
"rand_chacha",
@ -413,9 +406,9 @@ dependencies = [
[[package]]
name = "rand_chacha"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
@ -423,27 +416,27 @@ dependencies = [
[[package]]
name = "rand_core"
version = "0.6.2"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_hc"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
dependencies = [
"rand_core",
]
[[package]]
name = "redox_syscall"
version = "0.2.8"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee"
dependencies = [
"bitflags",
]
@ -519,9 +512,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.72"
version = "1.0.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c"
dependencies = [
"proc-macro2",
"quote",
@ -553,18 +546,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.25"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6"
checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.25"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d"
checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745"
dependencies = [
"proc-macro2",
"quote",
@ -583,9 +576,9 @@ dependencies = [
[[package]]
name = "tinyvec"
version = "1.2.0"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342"
checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338"
dependencies = [
"tinyvec_macros",
]
@ -607,9 +600,9 @@ dependencies = [
[[package]]
name = "unicode-normalization"
version = "0.1.18"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33717dca7ac877f497014e10d73f3acf948c342bee31b5ca7892faf94ccc6b49"
checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9"
dependencies = [
"tinyvec",
]

View File

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "glean-core"
version = "39.0.0"
version = "40.0.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["/README.md", "/LICENSE", "/src", "/examples", "/tests", "/Cargo.toml"]
description = "A modern Telemetry library"
@ -22,7 +22,7 @@ keywords = ["telemetry"]
license = "MPL-2.0"
repository = "https://github.com/mozilla/glean"
[package.metadata.glean]
glean-parser = "3.4.0"
glean-parser = "3.6.0"
[dependencies.bincode]
version = "1.2.1"
@ -63,7 +63,7 @@ version = "0.1.0"
version = "0.1.12"
[dev-dependencies.env_logger]
version = "0.7.1"
version = "0.8.0"
features = ["termcolor", "atty", "humantime"]
default-features = false

View File

@ -149,7 +149,7 @@ mod backend {
if should_delete {
log::debug!("Need to delete remaining LMDB files.");
delete_lmdb_database(&path);
delete_lmdb_database(path);
}
log::debug!("Migration ended. Safe-mode database in {}", path.display());
@ -296,7 +296,7 @@ impl Database {
fn open_rkv(path: &Path) -> Result<Rkv> {
fs::create_dir_all(&path)?;
let rkv = rkv_new(&path)?;
let rkv = rkv_new(path)?;
migrate(path, &rkv);
log::info!("Database initialized");

View File

@ -234,7 +234,7 @@ fn validate_source_tags(tags: &Vec<String>) -> bool {
return false;
}
tags.iter().all(|x| validate_tag(&x))
tags.iter().all(|x| validate_tag(x))
}
#[cfg(test)]

View File

@ -26,6 +26,7 @@ use crate::Lifetime;
/// Note: the cases in this enum must be kept in sync with the ones
/// in the platform-specific code (e.g. `ErrorType.kt`) and with the
/// metrics in the registry files.
// When adding a new error type ensure it's also added to `ErrorType::iter()` below.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ErrorType {
/// For when the value to be recorded does not match the metric-specific restrictions
@ -48,6 +49,27 @@ impl ErrorType {
ErrorType::InvalidOverflow => "invalid_overflow",
}
}
/// Return an iterator over all possible error types.
///
/// ```
/// # use glean_core::ErrorType;
/// let errors = ErrorType::iter();
/// let all_errors = errors.collect::<Vec<_>>();
/// assert_eq!(4, all_errors.len());
/// ```
pub fn iter() -> impl Iterator<Item = Self> {
// N.B.: This has no compile-time guarantees that it is complete.
// New `ErrorType` variants will need to be added manually.
[
ErrorType::InvalidValue,
ErrorType::InvalidLabel,
ErrorType::InvalidState,
ErrorType::InvalidOverflow,
]
.iter()
.copied()
}
}
impl TryFrom<i32> for ErrorType {

View File

@ -208,7 +208,7 @@ impl EventDatabase {
store.push(event.clone());
self.write_event_to_disk(store_name, &event_json);
if store.len() == glean.get_max_events() {
stores_to_submit.push(&store_name);
stores_to_submit.push(store_name);
}
}
}
@ -366,7 +366,7 @@ mod test {
let t = tempfile::tempdir().unwrap();
{
let db = EventDatabase::new(&t.path()).unwrap();
let db = EventDatabase::new(t.path()).unwrap();
db.write_event_to_disk("events", "{\"timestamp\": 500");
db.write_event_to_disk("events", "{\"timestamp\"");
db.write_event_to_disk(
@ -376,7 +376,7 @@ mod test {
}
{
let db = EventDatabase::new(&t.path()).unwrap();
let db = EventDatabase::new(t.path()).unwrap();
db.load_events_from_disk().unwrap();
let events = &db.event_stores.read().unwrap()["events"];
assert_eq!(1, events.len());
@ -448,11 +448,8 @@ mod test {
extra: Some(data),
};
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
assert_eq!(event_empty, serde_json::from_str(event_empty_json).unwrap());
assert_eq!(event_data, serde_json::from_str(event_data_json).unwrap());
}
#[test]

View File

@ -401,7 +401,7 @@ impl Glean {
///
/// Whether at least one ping was generated.
pub fn on_ready_to_submit_pings(&self) -> bool {
self.event_data_store.flush_pending_events_on_startup(&self)
self.event_data_store.flush_pending_events_on_startup(self)
}
/// Sets whether upload is enabled or not.
@ -560,7 +560,7 @@ impl Glean {
/// Gets a handle to the database.
pub fn storage(&self) -> &Database {
&self.data_store.as_ref().expect("No database found")
self.data_store.as_ref().expect("No database found")
}
/// Gets a handle to the event database.
@ -614,7 +614,7 @@ impl Glean {
/// The snapshot in a string encoded as JSON. If the snapshot is empty, returns an empty string.
pub fn snapshot(&mut self, store_name: &str, clear_store: bool) -> String {
StorageManager
.snapshot(&self.storage(), store_name, clear_store)
.snapshot(self.storage(), store_name, clear_store)
.unwrap_or_else(|| String::from(""))
}
@ -653,7 +653,7 @@ impl Glean {
let ping_maker = PingMaker::new();
let doc_id = Uuid::new_v4().to_string();
let url_path = self.make_path(&ping.name, &doc_id);
match ping_maker.collect(self, &ping, reason, &doc_id, &url_path) {
match ping_maker.collect(self, ping, reason, &doc_id, &url_path) {
None => {
log::info!(
"No content for ping '{}', therefore no ping queued.",
@ -668,10 +668,10 @@ impl Glean {
// be included in the *next* metrics ping.
self.additional_metrics
.pings_submitted
.get(&ping.name)
.add(&self, 1);
.get(ping.name)
.add(self, 1);
if let Err(e) = ping_maker.store_ping(&self.get_data_path(), &ping) {
if let Err(e) = ping_maker.store_ping(self.get_data_path(), &ping) {
log::warn!("IO error while writing ping to file: {}. Enqueuing upload of what we have in memory.", e);
self.additional_metrics.io_errors.add(self, 1);
// `serde_json::to_string` only fails if serialization of the content
@ -772,8 +772,8 @@ impl Glean {
branch: String,
extra: Option<HashMap<String, String>>,
) {
let metric = metrics::ExperimentMetric::new(&self, experiment_id);
metric.set_active(&self, branch, extra);
let metric = metrics::ExperimentMetric::new(self, experiment_id);
metric.set_active(self, branch, extra);
}
/// Indicates that an experiment is no longer running.
@ -782,8 +782,8 @@ impl Glean {
///
/// * `experiment_id` - The id of the active experiment to deactivate (maximum 30 bytes).
pub fn set_experiment_inactive(&self, experiment_id: String) {
let metric = metrics::ExperimentMetric::new(&self, experiment_id);
metric.set_inactive(&self);
let metric = metrics::ExperimentMetric::new(self, experiment_id);
metric.set_inactive(self);
}
/// Persists [`Lifetime::Ping`] data that might be in memory in case
@ -995,8 +995,8 @@ impl Glean {
///
/// if the requested experiment is active, `None` otherwise.
pub fn test_get_experiment_data_as_json(&self, experiment_id: String) -> Option<String> {
let metric = metrics::ExperimentMetric::new(&self, experiment_id);
metric.test_get_value_as_json_string(&self)
let metric = metrics::ExperimentMetric::new(self, experiment_id);
metric.test_get_value_as_json_string(self)
}
/// **Test-only API (exported for FFI purposes).**
@ -1025,7 +1025,7 @@ impl Glean {
/// If Glean wsa configured with `use_core_mps: false`, this has no effect.
pub fn start_metrics_ping_scheduler(&self) {
if self.schedule_metrics_pings {
scheduler::schedule(&self);
scheduler::schedule(self);
}
}
}

View File

@ -117,7 +117,7 @@ impl DatetimeMetric {
return;
}
let value = value.unwrap_or_else(|| local_now_with_offset_and_record(&glean));
let value = value.unwrap_or_else(|| local_now_with_offset_and_record(glean));
let value = Metric::Datetime(value, self.time_unit);
glean.storage().record(glean, &self.meta, &value)
}

View File

@ -159,7 +159,7 @@ where
let label = self.static_label(label);
self.new_metric_with_name(combine_base_identifier_and_label(
&self.submetric.meta().name,
&label,
label,
))
}
None => self.new_metric_with_dynamic_label(label.to_string()),
@ -222,7 +222,7 @@ pub fn validate_dynamic_label(
for store in &meta.send_in_pings {
glean
.storage()
.iter_store_from(lifetime, store, Some(&prefix), &mut snapshotter);
.iter_store_from(lifetime, store, Some(prefix), &mut snapshotter);
}
let error = if label_count >= MAX_LABELS {

View File

@ -64,7 +64,7 @@ impl StringMetric {
/// Non-exported API used for crate-internal storage.
/// Gets the current-stored value as a string, or None if there is no value.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
self.test_get_value(&glean, &storage_name)
self.test_get_value(glean, storage_name)
}
/// **Test-only API (exported for FFI purposes).**

View File

@ -69,7 +69,7 @@ impl UuidMetric {
return;
}
if let Ok(uuid) = uuid::Uuid::parse_str(&value) {
if let Ok(uuid) = uuid::Uuid::parse_str(value) {
self.set(glean, uuid);
} else {
let msg = format!("Unexpected UUID value '{}'", value);

View File

@ -112,7 +112,7 @@ impl PingMaker {
let start_time_data = start_time
.get_value(glean, INTERNAL_STORAGE)
.unwrap_or_else(|| glean.start_time());
let end_time_data = local_now_with_offset_and_record(&glean);
let end_time_data = local_now_with_offset_and_record(glean);
// Update the start time with the current time.
start_time.set(glean, Some(end_time_data));

View File

@ -52,7 +52,7 @@ impl MetricsPingSubmitter for GleanMetricsPingSubmitter {
fn submit_metrics_ping(&self, glean: &Glean, reason: Option<&str>, now: DateTime<FixedOffset>) {
glean.submit_ping_by_name("metrics", reason);
// Always update the collection date, irrespective of the ping being sent.
get_last_sent_time_metric().set(&glean, Some(now));
get_last_sent_time_metric().set(glean, Some(now));
}
}
@ -84,7 +84,7 @@ pub fn schedule(glean: &Glean) {
let submitter = GleanMetricsPingSubmitter {};
let scheduler = GleanMetricsPingScheduler {};
schedule_internal(&glean, submitter, scheduler, now)
schedule_internal(glean, submitter, scheduler, now)
}
/// Tells the scheduler task to exit quickly and cleanly.
@ -101,21 +101,21 @@ fn schedule_internal(
now: DateTime<FixedOffset>,
) {
let last_sent_build_metric = get_last_sent_build_metric();
if let Some(last_sent_build) = last_sent_build_metric.get_value(&glean, INTERNAL_STORAGE) {
if let Some(last_sent_build) = last_sent_build_metric.get_value(glean, INTERNAL_STORAGE) {
// If `app_build` is longer than StringMetric's max length, we will always
// treat it as a changed build when really it isn't.
// This will be externally-observable as InvalidOverflow errors on both the core
// `client_info.app_build` metric and the scheduler's internal metric.
if last_sent_build != glean.app_build {
last_sent_build_metric.set(&glean, &glean.app_build);
last_sent_build_metric.set(glean, &glean.app_build);
log::info!("App build changed. Sending 'metrics' ping");
submitter.submit_metrics_ping(&glean, Some("upgrade"), now);
submitter.submit_metrics_ping(glean, Some("upgrade"), now);
scheduler.start_scheduler(submitter, now, When::Reschedule);
return;
}
}
let last_sent_time = get_last_sent_time_metric().get_value(&glean, INTERNAL_STORAGE);
let last_sent_time = get_last_sent_time_metric().get_value(glean, INTERNAL_STORAGE);
if let Some(last_sent) = last_sent_time {
log::info!("The 'metrics' ping was last sent on {}", last_sent);
}
@ -137,7 +137,7 @@ fn schedule_internal(
} else if now > now.date().and_hms(SCHEDULED_HOUR, 0, 0) {
// Case #2
log::info!("Sending the 'metrics' ping immediately, {}", now);
submitter.submit_metrics_ping(&glean, Some("overdue"), now);
submitter.submit_metrics_ping(glean, Some("overdue"), now);
scheduler.start_scheduler(submitter, now, When::Reschedule);
} else {
// Case #3

View File

@ -86,7 +86,7 @@ impl StorageManager {
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.contains('/') {
snapshot_labeled_metrics(&mut snapshot, &metric_id, &metric);
snapshot_labeled_metrics(&mut snapshot, &metric_id, metric);
} else {
let map = snapshot
.entry(metric.ping_section().into())
@ -95,9 +95,9 @@ impl StorageManager {
}
};
storage.iter_store_from(Lifetime::Ping, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Application, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::User, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Ping, store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Application, store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::User, store_name, None, &mut snapshotter);
if clear_store {
if let Err(e) = storage.clear_ping_lifetime_storage(store_name) {
@ -139,7 +139,7 @@ impl StorageManager {
}
};
storage.iter_store_from(metric_lifetime, &store_name, None, &mut snapshotter);
storage.iter_store_from(metric_lifetime, store_name, None, &mut snapshotter);
snapshot
}

View File

@ -306,7 +306,7 @@ impl PingUploadManager {
Ok(request) => Some(request),
Err(e) => {
log::warn!("Error trying to build ping request: {}", e);
self.directory_manager.delete_file(&document_id);
self.directory_manager.delete_file(document_id);
// Record the error.
// Currently the only possible error is PingBodyOverflow.
@ -420,7 +420,7 @@ impl PingUploadManager {
deleting = true;
}
if deleting && self.directory_manager.delete_file(&document_id) {
if deleting && self.directory_manager.delete_file(document_id) {
self.upload_metrics
.deleted_pings_after_quota_hit
.add(glean, 1);
@ -668,7 +668,7 @@ impl PingUploadManager {
document_id,
status
);
self.enqueue_ping_from_file(glean, &document_id);
self.enqueue_ping_from_file(glean, document_id);
self.recoverable_failure_count
.fetch_add(1, Ordering::SeqCst);
}

View File

@ -103,7 +103,7 @@ pub(crate) fn local_now_with_offset_and_record(glean: &Glean) -> DateTime<FixedO
glean
.additional_metrics
.invalid_timezone_offset
.add(&glean, 1);
.add(glean, 1);
}
now

View File

@ -179,9 +179,7 @@ fn test_that_truncation_works() {
assert_eq!(
t.expected_result,
metric
.test_get_value_as_string(&glean, &store_name)
.unwrap()
metric.test_get_value_as_string(&glean, store_name).unwrap()
);
}
}

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"15923ad1a32a492f6bd0e393eb8cee6a220612cabc49aa340ef500574f60ed82","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"bfe00cc2501c9b15d5bc463c6db30ebbf8d7b5d6c555cf3827ae529fc9e7d6cc","cbindgen.toml":"ac25d1bc2ab7d6afaf25cfa0d35233f93b01f7129088cdd1fa89b9d987a8c564","glean.h":"591e297a1a9abcd3b517c291cadc507fae773f6777f536709cbef3eb63a394f9","src/boolean.rs":"0d1d59d0c13cdb63592a9513f2abcf3d1a8260d6523cc7d1af40cfcb4c75572a","src/byte_buffer.rs":"eeb6df25da7b393517f0c993e1e99a0acbccd7678b1127ce0e471d0e53a4bb45","src/counter.rs":"4d8f41285e4a9dbfa2733cdf937905006b475c0af7a501df73fde4ca77818e82","src/custom_distribution.rs":"b0b3b23289e413e7d150e8dae8217e6bd409cbbab68abb127f146041bcbfaf45","src/datetime.rs":"a5c1c993605b1a8ff044d88de4f4a385aff1a781cb8cb45573b90882da801fae","src/event.rs":"ef6dd4f0493ae223e4f7091d5779e46b33ea9864c2a4e5953811a7d9e8884c32","src/fd_logger.rs":"0f8197bb086f49413cca30a72bae029f663842fc3b78ceef6d0854a070b1cdfd","src/ffi_string_ext.rs":"389ae94dcdace1f56ca2c87207e865edda6d42da45733f1027e0a4dcfa86c492","src/from_raw.rs":"b17515a58d7e303ee746ea54c1c1c6d0523dc4de0bd8157dfaba2a610da637bb","src/handlemap_ext.rs":"3b1b088a2de197a0c3eaae0f2f0915d53602f51c13d27f48297f52cd885d5abc","src/jwe.rs":"72adff64900ca16d6527e8d6a436ac2ba85f738d6e92e33f3d71df32b485d0c3","src/labeled.rs":"93630c68c00b0b9fa74ff2483fbf92bb62620ec003705d495381dbddadbb5c54","src/lib.rs":"13a8d2fc4220a22a6fa277b6f0a2503b0f93fe023baa3c66e65ba1cf0d12e2ff","src/macros.rs":"e11614edb156552617354d7f6120c8e60ffeb6632ebc19d2b7c6c3e88523b01b","src/memory_distribution.rs":"08ef15e340f2e7ab2f4f83cd8e883c864d4affb94458e80198c106393bfb6bd8","src/ping_type.rs":"6401bcf4342ec1e4ba3782e2b67b3320aa96c9eddc03fc3c75ecc54e2f08a619","src/quantity.rs":"f72781ea642b5f7e363e9fecaded143d1afd772575603763543f1df3448ec337","src/string.rs":"199a238f3524eb36643d82b63df5c7f013adedb6af80632a2675f8562f34e692","src/string_list.rs":"12e2fbbdc08a1b8da1885fb14acd59ab27c8b598a24dc15a4eaca16636540a54","src/timespan.rs":"b7ac51dbfd5169d8c688c3fd2db51e38b6173c925ca14d7b0e8353f225b30a50","src/timing_distribution.rs":"4b5962729fb0b4c9ebf65a5fc5af105357652fcc282c6f8840f328452ba86ac6","src/upload.rs":"320c6e97df0a87040d2a269765401cd67da50f0a226c95a9a314f22452116f7c","src/uuid.rs":"c9ea7225fac53b55a8aeef39cd33470228c0a178185aa74b8fa652657994e404","src/weak.rs":"0199f4ef38d667f0b9f8ef3c5505ff15cd6e911bc83c27e7e9954fdffe1be0bb"},"package":"0da7ce4e6115c834a8da23c9978e007c047284eb136689d76538ecb0ef6ff69c"}

71
third_party/rust/glean-ffi/Cargo.toml vendored Normal file
View File

@ -0,0 +1,71 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "glean-ffi"
version = "40.0.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["/README.md", "/LICENSE", "/src", "/tests", "/Cargo.toml", "/cbindgen.toml", "/glean.h"]
description = "FFI layer for Glean, a modern Telemetry library"
readme = "README.md"
keywords = ["telemetry"]
license = "MPL-2.0"
repository = "https://github.com/mozilla/glean"
[lib]
name = "glean_ffi"
crate-type = ["lib"]
[dependencies.ffi-support]
version = "0.4.0"
[dependencies.glean-core]
version = "40.0.0"
[dependencies.log]
version = "0.4.8"
[dependencies.once_cell]
version = "1.2.0"
[dependencies.serde]
version = "1.0.104"
features = ["derive"]
[dependencies.serde_json]
version = "1.0.44"
[dependencies.uuid]
version = "0.8.1"
features = ["v4"]
[features]
rkv-safe-mode = ["glean-core/rkv-safe-mode"]
[target."cfg(not(target_os = \"android\"))".dependencies.env_logger]
version = "0.8.0"
features = ["termcolor", "atty", "humantime"]
default-features = false
[target."cfg(target_os = \"android\")".dependencies.android_logger]
version = "0.10.0"
default-features = false
[target."cfg(target_os = \"ios\")".dependencies.oslog]
version = "0.1.0"
features = ["logger"]
default-features = false
[target."cfg(unix)".dependencies.libc]
version = "0.2.82"
[badges.circle-ci]
branch = "main"
repository = "mozilla/glean"
[badges.maintenance]
status = "actively-developed"

373
third_party/rust/glean-ffi/LICENSE vendored Normal file
View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

24
third_party/rust/glean-ffi/README.md vendored Normal file
View File

@ -0,0 +1,24 @@
# Glean SDK
The `Glean SDK` is a modern approach for a Telemetry library and is part of the [Glean project](https://docs.telemetry.mozilla.org/concepts/glean/glean.html).
## `glean-ffi`
This library provides the FFI (Foreign function interface) for the Glean SDK.
Platform-specific code wraps this library to provide a platform-specific Glean API.
## Documentation
All documentation is available online:
* [The Glean SDK Book][book]
* [API documentation][apidocs]
[book]: https://mozilla.github.io/glean/
[apidocs]: https://mozilla.github.io/glean/docs/glean_ffi/index.html
## License
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/

View File

@ -0,0 +1,24 @@
header = """/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen.
* To generate this file:
* 1. Get the latest cbindgen using `cargo install --force cbindgen`
* a. Alternatively, you can clone `https://github.com/eqrion/cbindgen` and use a tagged release
* 2. Run `make cbindgen`
*/
"""
language = "C"
[parse.expand]
crates = ["glean-ffi"]
[parse]
parse_deps = true
include = ["glean-core", "ffi-support"]
[enum]
prefix_with_name = true

810
third_party/rust/glean-ffi/glean.h vendored Normal file
View File

@ -0,0 +1,810 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen.
* To generate this file:
* 1. Get the latest cbindgen using `cargo install --force cbindgen`
* a. Alternatively, you can clone `https://github.com/eqrion/cbindgen` and use a tagged release
* 2. Run `make cbindgen`
*/
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
/**
* A recoverable error.
*/
#define UPLOAD_RESULT_RECOVERABLE 1
/**
* An unrecoverable error.
*/
#define UPLOAD_RESULT_UNRECOVERABLE 2
/**
* A HTTP response code.
*
* The actual response code is encoded in the lower bits.
*/
#define UPLOAD_RESULT_HTTP_STATUS 32768
/**
* The supported metrics' lifetimes.
*
* A metric's lifetime determines when its stored data gets reset.
*/
enum Lifetime {
/**
* The metric is reset with each sent ping
*/
Lifetime_Ping,
/**
* The metric is reset on application restart
*/
Lifetime_Application,
/**
* The metric is reset with each user profile
*/
Lifetime_User,
};
typedef int32_t Lifetime;
/**
* Different resolutions supported by the memory related metric types (e.g.
* MemoryDistributionMetric).
*/
enum MemoryUnit {
/**
* 1 byte
*/
MemoryUnit_Byte,
/**
* 2^10 bytes
*/
MemoryUnit_Kilobyte,
/**
* 2^20 bytes
*/
MemoryUnit_Megabyte,
/**
* 2^30 bytes
*/
MemoryUnit_Gigabyte,
};
typedef int32_t MemoryUnit;
/**
* Different resolutions supported by the time related
* metric types (e.g. DatetimeMetric).
*/
enum TimeUnit {
/**
* Truncate to nanosecond precision.
*/
TimeUnit_Nanosecond,
/**
* Truncate to microsecond precision.
*/
TimeUnit_Microsecond,
/**
* Truncate to millisecond precision.
*/
TimeUnit_Millisecond,
/**
* Truncate to second precision.
*/
TimeUnit_Second,
/**
* Truncate to minute precision.
*/
TimeUnit_Minute,
/**
* Truncate to hour precision.
*/
TimeUnit_Hour,
/**
* Truncate to day precision.
*/
TimeUnit_Day,
};
typedef int32_t TimeUnit;
/**
* `FfiStr<'a>` is a safe (`#[repr(transparent)]`) wrapper around a
* nul-terminated `*const c_char` (e.g. a C string). Conceptually, it is
* similar to [`std::ffi::CStr`], except that it may be used in the signatures
* of extern "C" functions.
*
* Functions accepting strings should use this instead of accepting a C string
* directly. This allows us to write those functions using safe code without
* allowing safe Rust to cause memory unsafety.
*
* A single function for constructing these from Rust ([`FfiStr::from_raw`])
* has been provided. Most of the time, this should not be necessary, and users
* should accept `FfiStr` in the parameter list directly.
*
* ## Caveats
*
* An effort has been made to make this struct hard to misuse, however it is
* still possible, if the `'static` lifetime is manually specified in the
* struct. E.g.
*
* ```rust,no_run
* # use ffi_support::FfiStr;
* // NEVER DO THIS
* #[no_mangle]
* extern "C" fn never_do_this(s: FfiStr<'static>) {
* // save `s` somewhere, and access it after this
* // function returns.
* }
* ```
*
* Instead, one of the following patterns should be used:
*
* ```
* # use ffi_support::FfiStr;
* #[no_mangle]
* extern "C" fn valid_use_1(s: FfiStr<'_>) {
* // Use of `s` after this function returns is impossible
* }
* // Alternative:
* #[no_mangle]
* extern "C" fn valid_use_2(s: FfiStr) {
* // Use of `s` after this function returns is impossible
* }
* ```
*/
typedef const char *FfiStr;
/**
* Configuration over FFI.
*
* **CAUTION**: This must match _exactly_ the definition on the Kotlin side.
* If this side is changed, the Kotlin side need to be changed, too.
*/
typedef struct FfiConfiguration {
FfiStr data_dir;
FfiStr package_name;
FfiStr language_binding_name;
uint8_t upload_enabled;
const int32_t *max_events;
uint8_t delay_ping_lifetime_io;
} FfiConfiguration;
typedef const char *const *RawStringArray;
/**
* ByteBuffer is a struct that represents an array of bytes to be sent over the FFI boundaries.
* There are several cases when you might want to use this, but the primary one for us
* is for returning protobuf-encoded data to Swift and Java. The type is currently rather
* limited (implementing almost no functionality), however in the future it may be
* more expanded.
*
* ## Caveats
*
* Note that the order of the fields is `len` (an i32) then `data` (a `*mut u8`), getting
* this wrong on the other side of the FFI will cause memory corruption and crashes.
* `i32` is used for the length instead of `u64` and `usize` because JNA has interop
* issues with both these types.
*
* ByteBuffer does not implement Drop. This is intentional. Memory passed into it will
* be leaked if it is not explicitly destroyed by calling [`ByteBuffer::destroy`]. This
* is because in the future, we may allow it's use for passing data into Rust code.
* ByteBuffer assuming ownership of the data would make this a problem.
*
* ## Layout/fields
*
* This struct's field are not `pub` (mostly so that we can soundly implement `Send`, but also so
* that we can verify Rust users are constructing them appropriately), the fields, their types, and
* their order are *very much* a part of the public API of this type. Consumers on the other side
* of the FFI will need to know its layout.
*
* If this were a C struct, it would look like
*
* ```c,no_run
* struct ByteBuffer {
* int64_t len;
* uint8_t *data; // note: nullable
* };
* ```
*
* In Rust, there are two fields, in this order: `len: i32`, and `data: *mut u8`.
*
* ### Description of fields
*
* `data` is a pointer to an array of `len` bytes. Not that data can be a null pointer and therefore
* should be checked.
*
* The bytes array is allocated on the heap and must be freed on it as well. Critically, if there
* are multiple rust packages using being used in the same application, it *must be freed on the
* same heap that allocated it*, or you will corrupt both heaps.
*/
typedef struct ByteBuffer {
int32_t len;
uint8_t *data;
} ByteBuffer;
/**
* A FFI-compatible representation for the PingUploadTask.
*
* This is exposed as a C-compatible tagged union, like this:
*
* ```c
* enum FfiPingUploadTask_Tag {
* FfiPingUploadTask_Upload,
* FfiPingUploadTask_Wait,
* FfiPingUploadTask_Done,
* };
* typedef uint8_t FfiPingUploadTask_Tag;
*
* typedef struct {
* FfiPingUploadTask_Tag tag;
* char *document_id;
* char *path;
* char *body;
* char *headers;
* } FfiPingUploadTask_Upload_Body;
*
* typedef union {
* FfiPingUploadTask_Tag tag;
* FfiPingUploadTask_Upload_Body upload;
* } FfiPingUploadTask;
*
* ```
*
* It is therefore always valid to read the `tag` field of the returned union (always the first
* field in memory).
*
* Language bindings should turn this into proper language types (e.g. enums/structs) and
* copy out data.
*
* String fields are encoded into null-terminated UTF-8 C strings.
*
* * The language binding should copy out the data and turn these into their equivalent string type.
* * The language binding should _not_ free these fields individually.
* Instead `glean_process_ping_upload_response` will receive the whole enum, taking care of
* freeing the memory.
*
*
* The order of variants should be the same as in `glean-core/src/upload/mod.rs`
* and `glean-core/android/src/main/java/mozilla/telemetry/glean/net/Upload.kt`.
*
*/
enum FfiPingUploadTask_Tag {
FfiPingUploadTask_Upload,
FfiPingUploadTask_Wait,
FfiPingUploadTask_Done,
};
typedef uint8_t FfiPingUploadTask_Tag;
typedef struct FfiPingUploadTask_Upload_Body {
FfiPingUploadTask_Tag tag;
char *document_id;
char *path;
struct ByteBuffer body;
char *headers;
} FfiPingUploadTask_Upload_Body;
typedef union FfiPingUploadTask {
FfiPingUploadTask_Tag tag;
FfiPingUploadTask_Upload_Body upload;
struct {
FfiPingUploadTask_Tag wait_tag;
uint64_t wait;
};
} FfiPingUploadTask;
typedef const int64_t *RawInt64Array;
typedef const int32_t *RawIntArray;
/**
* Identifier for a running timer.
*/
typedef uint64_t TimerId;
/**
* Initialize the logging system based on the target platform. This ensures
* that logging is shown when executing the Glean SDK unit tests.
*/
void glean_enable_logging(void);
/**
* Initialize the logging system to send JSON messages to a file descriptor
* (Unix) or file handle (Windows).
*
* Not available on Android and iOS.
*
* `fd` is a writable file descriptor (on Unix) or file handle (on Windows).
*
* # Safety
* Unsafe because the fd u64 passed in will be interpreted as either a file
* descriptor (Unix) or file handle (Windows) without any checking.
*/
void glean_enable_logging_to_fd(uint64_t fd);
/**
* # Safety
*
* A valid and non-null configuration object is required for this function.
*/
uint8_t glean_initialize(const struct FfiConfiguration *cfg);
uint8_t glean_on_ready_to_submit_pings(void);
uint8_t glean_is_upload_enabled(void);
void glean_set_upload_enabled(uint8_t flag);
uint8_t glean_submit_ping_by_name(FfiStr ping_name, FfiStr reason);
char *glean_ping_collect(uint64_t ping_type_handle, FfiStr reason);
void glean_set_experiment_active(FfiStr experiment_id,
FfiStr branch,
RawStringArray extra_keys,
RawStringArray extra_values,
int32_t extra_len);
void glean_set_experiment_inactive(FfiStr experiment_id);
uint8_t glean_experiment_test_is_active(FfiStr experiment_id);
char *glean_experiment_test_get_data(FfiStr experiment_id);
void glean_clear_application_lifetime_metrics(void);
/**
* Try to unblock the RLB dispatcher to start processing queued tasks.
*
* **Note**: glean-core does not have its own dispatcher at the moment.
* This tries to detect the RLB and, if loaded, instructs the RLB dispatcher to flush.
* This allows the usage of both the RLB and other language bindings (e.g. Kotlin/Swift)
* within the same application.
*/
void glean_flush_rlb_dispatcher(void);
void glean_set_dirty_flag(uint8_t flag);
uint8_t glean_is_dirty_flag_set(void);
void glean_handle_client_active(void);
void glean_handle_client_inactive(void);
void glean_test_clear_all_stores(void);
void glean_destroy_glean(void);
uint8_t glean_is_first_run(void);
void glean_get_upload_task(union FfiPingUploadTask *result);
/**
* Process and free a `FfiPingUploadTask`.
*
* We need to pass the whole task instead of only the document id,
* so that we can free the strings properly on Drop.
*
* After return the `task` should not be used further by the caller.
*
* # Safety
*
* A valid and non-null upload task object is required for this function.
*/
void glean_process_ping_upload_response(union FfiPingUploadTask *task, uint32_t status);
/**
* # Safety
*
* A valid and non-null configuration object is required for this function.
*/
uint8_t glean_initialize_for_subprocess(const struct FfiConfiguration *cfg);
uint8_t glean_set_debug_view_tag(FfiStr tag);
void glean_set_log_pings(uint8_t value);
uint8_t glean_set_source_tags(RawStringArray raw_tags, int32_t tags_count);
uint64_t glean_get_timestamp_ms(void);
/**
* Public destructor for strings managed by the other side of the FFI.
*
* # Safety
*
* This will free the string pointer it gets passed in as an argument,
* and thus can be wildly unsafe if misused.
*
* See the documentation of `ffi_support::destroy_c_string` and
* `ffi_support::define_string_destructor!` for further info.
*/
void glean_str_free(char *s);
void glean_destroy_boolean_metric(uint64_t v);
uint64_t glean_new_boolean_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
void glean_boolean_set(uint64_t metric_id, uint8_t value);
uint8_t glean_boolean_test_has_value(uint64_t metric_id, FfiStr storage_name);
uint8_t glean_boolean_test_get_value(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_counter_metric(uint64_t v);
uint64_t glean_new_counter_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
int32_t glean_counter_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_counter_add(uint64_t metric_id, int32_t amount);
uint8_t glean_counter_test_has_value(uint64_t metric_id, FfiStr storage_name);
int32_t glean_counter_test_get_value(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_custom_distribution_metric(uint64_t v);
uint64_t glean_new_custom_distribution_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled,
uint64_t range_min,
uint64_t range_max,
uint64_t bucket_count,
int32_t histogram_type);
int32_t glean_custom_distribution_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_custom_distribution_accumulate_samples(uint64_t metric_id,
RawInt64Array raw_samples,
int32_t num_samples);
uint8_t glean_custom_distribution_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_custom_distribution_test_get_value_as_json_string(uint64_t metric_id,
FfiStr storage_name);
void glean_destroy_datetime_metric(uint64_t v);
uint64_t glean_new_datetime_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled,
TimeUnit time_unit);
int32_t glean_datetime_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_datetime_set(uint64_t metric_id,
int32_t year,
uint32_t month,
uint32_t day,
uint32_t hour,
uint32_t minute,
uint32_t second,
int64_t nano,
int32_t offset_seconds);
uint8_t glean_datetime_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_datetime_test_get_value_as_string(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_event_metric(uint64_t v);
int32_t glean_event_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
uint64_t glean_new_event_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
int32_t lifetime,
uint8_t disabled,
RawStringArray extra_keys,
int32_t extra_keys_len);
void glean_event_record(uint64_t metric_id,
uint64_t timestamp,
RawIntArray extra_keys,
RawStringArray extra_values,
int32_t extra_len);
uint8_t glean_event_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_event_test_get_value_as_json_string(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_jwe_metric(uint64_t v);
uint64_t glean_new_jwe_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
int32_t glean_jwe_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_jwe_set_with_compact_representation(uint64_t metric_id, FfiStr value);
void glean_jwe_set(uint64_t metric_id,
FfiStr header,
FfiStr key,
FfiStr init_vector,
FfiStr cipher_text,
FfiStr auth_tag);
uint8_t glean_jwe_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_jwe_test_get_value(uint64_t metric_id, FfiStr storage_name);
char *glean_jwe_test_get_value_as_json_string(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_labeled_counter_metric(uint64_t v);
/**
* Create a new labeled metric.
*/
uint64_t glean_new_labeled_counter_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
int32_t lifetime,
uint8_t disabled,
RawStringArray labels,
int32_t label_count);
/**
* Create a new instance of the sub-metric of this labeled metric.
*/
uint64_t glean_labeled_counter_metric_get(uint64_t handle, FfiStr label);
int32_t glean_labeled_counter_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_destroy_labeled_boolean_metric(uint64_t v);
/**
* Create a new labeled metric.
*/
uint64_t glean_new_labeled_boolean_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
int32_t lifetime,
uint8_t disabled,
RawStringArray labels,
int32_t label_count);
/**
* Create a new instance of the sub-metric of this labeled metric.
*/
uint64_t glean_labeled_boolean_metric_get(uint64_t handle, FfiStr label);
int32_t glean_labeled_boolean_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_destroy_labeled_string_metric(uint64_t v);
/**
* Create a new labeled metric.
*/
uint64_t glean_new_labeled_string_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
int32_t lifetime,
uint8_t disabled,
RawStringArray labels,
int32_t label_count);
/**
* Create a new instance of the sub-metric of this labeled metric.
*/
uint64_t glean_labeled_string_metric_get(uint64_t handle, FfiStr label);
int32_t glean_labeled_string_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_destroy_memory_distribution_metric(uint64_t v);
uint64_t glean_new_memory_distribution_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled,
MemoryUnit memory_unit);
int32_t glean_memory_distribution_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_memory_distribution_accumulate(uint64_t metric_id, uint64_t sample);
void glean_memory_distribution_accumulate_samples(uint64_t metric_id,
RawInt64Array raw_samples,
int32_t num_samples);
uint8_t glean_memory_distribution_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_memory_distribution_test_get_value_as_json_string(uint64_t metric_id,
FfiStr storage_name);
void glean_destroy_ping_type(uint64_t v);
uint64_t glean_new_ping_type(FfiStr ping_name,
uint8_t include_client_id,
uint8_t send_if_empty,
RawStringArray reason_codes,
int32_t reason_codes_len);
uint8_t glean_test_has_ping_type(FfiStr ping_name);
void glean_register_ping_type(uint64_t ping_type_handle);
void glean_destroy_quantity_metric(uint64_t v);
uint64_t glean_new_quantity_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
int32_t glean_quantity_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_quantity_set(uint64_t metric_id, int64_t value);
uint8_t glean_quantity_test_has_value(uint64_t metric_id, FfiStr storage_name);
int64_t glean_quantity_test_get_value(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_string_metric(uint64_t v);
uint64_t glean_new_string_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
int32_t glean_string_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_string_set(uint64_t metric_id, FfiStr value);
uint8_t glean_string_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_string_test_get_value(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_string_list_metric(uint64_t v);
uint64_t glean_new_string_list_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
int32_t glean_string_list_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_string_list_add(uint64_t metric_id, FfiStr value);
void glean_string_list_set(uint64_t metric_id, RawStringArray values, int32_t values_len);
uint8_t glean_string_list_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_string_list_test_get_value_as_json_string(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_timespan_metric(uint64_t v);
uint64_t glean_new_timespan_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled,
int32_t time_unit);
int32_t glean_timespan_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_timespan_set_start(uint64_t metric_id, uint64_t start_time);
void glean_timespan_set_stop(uint64_t metric_id, uint64_t stop_time);
void glean_timespan_cancel(uint64_t metric_id);
void glean_timespan_set_raw_nanos(uint64_t metric_id, uint64_t elapsed_nanos);
uint8_t glean_timespan_test_has_value(uint64_t metric_id, FfiStr storage_name);
uint64_t glean_timespan_test_get_value(uint64_t metric_id, FfiStr storage_name);
void glean_destroy_timing_distribution_metric(uint64_t v);
uint64_t glean_new_timing_distribution_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled,
TimeUnit time_unit);
int32_t glean_timing_distribution_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
TimerId glean_timing_distribution_set_start(uint64_t metric_id, uint64_t start_time);
void glean_timing_distribution_set_stop_and_accumulate(uint64_t metric_id,
TimerId timer_id,
uint64_t stop_time);
void glean_timing_distribution_cancel(uint64_t metric_id, TimerId timer_id);
void glean_timing_distribution_accumulate_samples(uint64_t metric_id,
RawInt64Array raw_samples,
int32_t num_samples);
uint8_t glean_timing_distribution_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_timing_distribution_test_get_value_as_json_string(uint64_t metric_id,
FfiStr storage_name);
void glean_destroy_uuid_metric(uint64_t v);
uint64_t glean_new_uuid_metric(FfiStr category,
FfiStr name,
RawStringArray send_in_pings,
int32_t send_in_pings_len,
Lifetime lifetime,
uint8_t disabled);
int32_t glean_uuid_test_get_num_recorded_errors(uint64_t metric_id,
int32_t error_type,
FfiStr storage_name);
void glean_uuid_set(uint64_t metric_id, FfiStr value);
uint8_t glean_uuid_test_has_value(uint64_t metric_id, FfiStr storage_name);
char *glean_uuid_test_get_value(uint64_t metric_id, FfiStr storage_name);

View File

@ -0,0 +1,41 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use ffi_support::FfiStr;
use crate::{define_metric, handlemap_ext::HandleMapExtension, with_glean_value, Lifetime};
define_metric!(BooleanMetric => BOOLEAN_METRICS {
new -> glean_new_boolean_metric(),
destroy -> glean_destroy_boolean_metric,
});
#[no_mangle]
pub extern "C" fn glean_boolean_set(metric_id: u64, value: u8) {
with_glean_value(|glean| {
BOOLEAN_METRICS.call_infallible(metric_id, |metric| {
metric.set(glean, value != 0);
})
})
}
#[no_mangle]
pub extern "C" fn glean_boolean_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
BOOLEAN_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_boolean_test_get_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
BOOLEAN_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value(glean, storage_name.as_str()).unwrap()
})
})
}

View File

@ -0,0 +1,142 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! ByteBuffer is a struct that represents an array of bytes to be sent over the FFI boundaries.
//!
//! This is a copy of the same struct from [ffi-support],
//! with the difference that the length is encoded as a `i32`.
//!
//! [ffi-support]: https://docs.rs/ffi-support/0.4.0/src/ffi_support/lib.rs.html#390-393
/// ByteBuffer is a struct that represents an array of bytes to be sent over the FFI boundaries.
/// There are several cases when you might want to use this, but the primary one for us
/// is for returning protobuf-encoded data to Swift and Java. The type is currently rather
/// limited (implementing almost no functionality), however in the future it may be
/// more expanded.
///
/// ## Caveats
///
/// Note that the order of the fields is `len` (an i32) then `data` (a `*mut u8`), getting
/// this wrong on the other side of the FFI will cause memory corruption and crashes.
/// `i32` is used for the length instead of `u64` and `usize` because JNA has interop
/// issues with both these types.
///
/// ByteBuffer does not implement Drop. This is intentional. Memory passed into it will
/// be leaked if it is not explicitly destroyed by calling [`ByteBuffer::destroy`]. This
/// is because in the future, we may allow it's use for passing data into Rust code.
/// ByteBuffer assuming ownership of the data would make this a problem.
///
/// ## Layout/fields
///
/// This struct's field are not `pub` (mostly so that we can soundly implement `Send`, but also so
/// that we can verify Rust users are constructing them appropriately), the fields, their types, and
/// their order are *very much* a part of the public API of this type. Consumers on the other side
/// of the FFI will need to know its layout.
///
/// If this were a C struct, it would look like
///
/// ```c,no_run
/// struct ByteBuffer {
/// int64_t len;
/// uint8_t *data; // note: nullable
/// };
/// ```
///
/// In Rust, there are two fields, in this order: `len: i32`, and `data: *mut u8`.
///
/// ### Description of fields
///
/// `data` is a pointer to an array of `len` bytes. Not that data can be a null pointer and therefore
/// should be checked.
///
/// The bytes array is allocated on the heap and must be freed on it as well. Critically, if there
/// are multiple rust packages using being used in the same application, it *must be freed on the
/// same heap that allocated it*, or you will corrupt both heaps.
#[repr(C)]
pub struct ByteBuffer {
len: i32,
data: *mut u8,
}
impl From<Vec<u8>> for ByteBuffer {
#[inline]
fn from(bytes: Vec<u8>) -> Self {
Self::from_vec(bytes)
}
}
impl ByteBuffer {
/// Creates a `ByteBuffer` of the requested size, zero-filled.
///
/// The contents of the vector will not be dropped. Instead, `destroy` must
/// be called later to reclaim this memory or it will be leaked.
///
/// ## Caveats
///
/// This will panic if the buffer length (`usize`) cannot fit into a `i32`.
#[inline]
pub fn new_with_size(size: usize) -> Self {
let mut buf = vec![];
buf.reserve_exact(size);
buf.resize(size, 0);
ByteBuffer::from_vec(buf)
}
/// Creates a `ByteBuffer` instance from a `Vec` instance.
///
/// The contents of the vector will not be dropped. Instead, `destroy` must
/// be called later to reclaim this memory or it will be leaked.
///
/// ## Caveats
///
/// This will panic if the buffer length (`usize`) cannot fit into a `i32`.
#[inline]
pub fn from_vec(bytes: Vec<u8>) -> Self {
use std::convert::TryFrom;
let mut buf = bytes.into_boxed_slice();
let data = buf.as_mut_ptr();
let len = i32::try_from(buf.len()).expect("buffer length cannot fit into a i32.");
std::mem::forget(buf);
Self { len, data }
}
/// Convert this `ByteBuffer` into a Vec<u8>. This is the only way
/// to access the data from inside the buffer.
#[inline]
pub fn into_vec(self) -> Vec<u8> {
if self.data.is_null() {
vec![]
} else {
// This is correct because we convert to a Box<[u8]> first, which is
// a design constraint of RawVec.
unsafe { Vec::from_raw_parts(self.data, self.len as usize, self.len as usize) }
}
}
/// Reclaim memory stored in this ByteBuffer.
///
/// ## Caveats
///
/// This is safe so long as the buffer is empty, or the data was allocated
/// by Rust code, e.g. this is a ByteBuffer created by
/// `ByteBuffer::from_vec` or `Default::default`.
///
/// If the ByteBuffer were passed into Rust (which you shouldn't do, since
/// theres no way to see the data in Rust currently), then calling `destroy`
/// is fundamentally broken.
#[inline]
pub fn destroy(self) {
drop(self.into_vec())
}
}
impl Default for ByteBuffer {
#[inline]
fn default() -> Self {
Self {
len: 0,
data: std::ptr::null_mut(),
}
}
}

View File

@ -0,0 +1,35 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use ffi_support::FfiStr;
use crate::{define_metric, handlemap_ext::HandleMapExtension, with_glean_value, Lifetime};
define_metric!(CounterMetric => COUNTER_METRICS {
new -> glean_new_counter_metric(),
test_get_num_recorded_errors -> glean_counter_test_get_num_recorded_errors,
destroy -> glean_destroy_counter_metric,
add -> glean_counter_add(amount: i32),
});
#[no_mangle]
pub extern "C" fn glean_counter_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
COUNTER_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_counter_test_get_value(metric_id: u64, storage_name: FfiStr) -> i32 {
with_glean_value(|glean| {
COUNTER_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value(glean, storage_name.as_str()).unwrap()
})
})
}

View File

@ -0,0 +1,65 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, from_raw_int64_array, handlemap_ext::HandleMapExtension, with_glean_value,
Lifetime, RawInt64Array,
};
define_metric!(CustomDistributionMetric => CUSTOM_DISTRIBUTION_METRICS {
new -> glean_new_custom_distribution_metric(range_min: u64, range_max: u64, bucket_count: u64, histogram_type: i32),
test_get_num_recorded_errors -> glean_custom_distribution_test_get_num_recorded_errors,
destroy -> glean_destroy_custom_distribution_metric,
});
#[no_mangle]
pub extern "C" fn glean_custom_distribution_accumulate_samples(
metric_id: u64,
raw_samples: RawInt64Array,
num_samples: i32,
) {
with_glean_value(|glean| {
CUSTOM_DISTRIBUTION_METRICS.call_infallible_mut(metric_id, |metric| {
// The Kotlin code is sending Long(s), which are 64 bits, as there's
// currently no stable UInt type. The positive part of [Int] would not
// be enough to represent the values coming in:.
// Here Long(s) are handled as i64 and then casted in `accumulate_samples_signed`
// to u32.
let samples = from_raw_int64_array(raw_samples, num_samples);
metric.accumulate_samples_signed(glean, samples);
})
})
}
#[no_mangle]
pub extern "C" fn glean_custom_distribution_test_has_value(
metric_id: u64,
storage_name: FfiStr,
) -> u8 {
with_glean_value(|glean| {
CUSTOM_DISTRIBUTION_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_custom_distribution_test_get_value_as_json_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
CUSTOM_DISTRIBUTION_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_json_string(glean, storage_name.as_str())
.unwrap()
})
})
}

View File

@ -0,0 +1,81 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, handlemap_ext::HandleMapExtension, with_glean_value, Lifetime, TimeUnit,
};
define_metric!(DatetimeMetric => DATETIME_METRICS {
new -> glean_new_datetime_metric(time_unit: TimeUnit),
test_get_num_recorded_errors -> glean_datetime_test_get_num_recorded_errors,
destroy -> glean_destroy_datetime_metric,
});
#[no_mangle]
pub extern "C" fn glean_datetime_set(
metric_id: u64,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: i64,
offset_seconds: i32,
) {
// Convert and truncate the nanos to u32, as that's what the underlying
// library uses. Unfortunately, not all platform have unsigned integers
// so we need to work with what we have.
if nano < 0 || nano > i64::from(std::u32::MAX) {
log::error!("Unexpected `nano` value coming from platform code {}", nano);
return;
}
// We are within the u32 boundaries for nano, we should be ok converting.
let converted_nanos = nano as u32;
with_glean_value(|glean| {
DATETIME_METRICS.call_infallible(metric_id, |metric| {
metric.set_with_details(
glean,
year,
month,
day,
hour,
minute,
second,
converted_nanos,
offset_seconds,
);
})
})
}
#[no_mangle]
pub extern "C" fn glean_datetime_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
DATETIME_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_string(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_datetime_test_get_value_as_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
DATETIME_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_string(glean, storage_name.as_str())
.unwrap()
})
})
}

93
third_party/rust/glean-ffi/src/event.rs vendored Normal file
View File

@ -0,0 +1,93 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use std::os::raw::c_char;
use ffi_support::FfiStr;
use glean_core::metrics::EventMetric;
use glean_core::{CommonMetricData, Lifetime};
use crate::ffi_string_ext::FallibleToString;
use crate::handlemap_ext::HandleMapExtension;
use crate::{
define_metric, from_raw_int_array_and_string_array, from_raw_string_array, with_glean_value,
RawIntArray, RawStringArray,
};
define_metric!(EventMetric => EVENT_METRICS {
test_get_num_recorded_errors -> glean_event_test_get_num_recorded_errors,
destroy -> glean_destroy_event_metric,
});
#[no_mangle]
pub extern "C" fn glean_new_event_metric(
category: FfiStr,
name: FfiStr,
send_in_pings: RawStringArray,
send_in_pings_len: i32,
lifetime: i32,
disabled: u8,
extra_keys: RawStringArray,
extra_keys_len: i32,
) -> u64 {
EVENT_METRICS.insert_with_log(|| {
let name = name.to_string_fallible()?;
let category = category.to_string_fallible()?;
let send_in_pings = from_raw_string_array(send_in_pings, send_in_pings_len)?;
let lifetime = Lifetime::try_from(lifetime)?;
let extra_keys = from_raw_string_array(extra_keys, extra_keys_len)?;
Ok(EventMetric::new(
CommonMetricData {
name,
category,
send_in_pings,
lifetime,
disabled: disabled != 0,
..Default::default()
},
extra_keys,
))
})
}
#[no_mangle]
pub extern "C" fn glean_event_record(
metric_id: u64,
timestamp: u64,
extra_keys: RawIntArray,
extra_values: RawStringArray,
extra_len: i32,
) {
with_glean_value(|glean| {
EVENT_METRICS.call_with_log(metric_id, |metric| {
let extra = from_raw_int_array_and_string_array(extra_keys, extra_values, extra_len)?;
metric.record(glean, timestamp, extra);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_event_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
EVENT_METRICS.call_infallible(metric_id, |metric| {
metric.test_has_value(glean, storage_name.as_str())
})
})
}
#[no_mangle]
pub extern "C" fn glean_event_test_get_value_as_json_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
EVENT_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value_as_json_string(glean, storage_name.as_str())
})
})
}

View File

@ -0,0 +1,83 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fs::File;
use std::io::Write;
use std::sync::RwLock;
#[cfg(target_os = "windows")]
use std::os::windows::io::FromRawHandle;
#[cfg(target_os = "windows")]
use std::ffi::c_void;
#[cfg(not(target_os = "windows"))]
use std::os::unix::io::FromRawFd;
use serde::Serialize;
/// An implementation of log::Log that writes log messages in JSON format to a
/// file descriptor/handle. The logging level is ignored in this implementation:
/// it is up to the receiver of these log messages (on the language binding
/// side) to filter the log messages based on their level.
/// The JSON payload of each message in an object with the following keys:
/// - `level` (string): One of the logging levels defined here:
/// https://docs.rs/log/0.4.11/log/enum.Level.html
/// - `message` (string): The logging message.
pub struct FdLogger {
pub file: RwLock<File>,
}
#[derive(Serialize)]
struct FdLoggingRecord {
level: String,
message: String,
}
#[cfg(target_os = "windows")]
unsafe fn get_file_from_fd(fd: u64) -> File {
File::from_raw_handle(fd as *mut c_void)
}
#[cfg(not(target_os = "windows"))]
unsafe fn get_file_from_fd(fd: u64) -> File {
File::from_raw_fd(fd as i32)
}
impl FdLogger {
pub unsafe fn new(fd: u64) -> Self {
FdLogger {
file: RwLock::new(get_file_from_fd(fd)),
}
}
}
impl log::Log for FdLogger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
// This logger always emits logging messages of any level, and the
// language binding consuming these messages is responsible for
// filtering and routing them.
true
}
fn log(&self, record: &log::Record) {
// Normally, classes implementing the Log trait would filter based on
// the log level here. But in this case, we want to emit all log
// messages and let the logging system in the language binding filter
// and route them.
let payload = FdLoggingRecord {
level: record.level().to_string(),
message: record.args().to_string(),
};
let _ = writeln!(
self.file.write().unwrap(),
"{}",
serde_json::to_string(&payload).unwrap()
);
}
fn flush(&self) {
let _ = self.file.write().unwrap().flush();
}
}

View File

@ -0,0 +1,21 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use ffi_support::FfiStr;
use glean_core::Result;
pub trait FallibleToString {
/// Convert to a string or fail with an appropriate error.
fn to_string_fallible(&self) -> Result<String>;
}
/// This allows to convert a `FfiStr` (effectively a null-terminated C-like string)
/// to a Rust string, failing when the pointer is null or contains invalid UTF-8 characters.
impl<'a> FallibleToString for FfiStr<'a> {
fn to_string_fallible(&self) -> Result<String> {
self.as_opt_str()
.map(|s| s.to_string())
.ok_or_else(glean_core::Error::utf8_error)
}
}

View File

@ -0,0 +1,344 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::ffi_string_ext::FallibleToString;
pub type RawStringArray = *const *const c_char;
pub type RawIntArray = *const i32;
pub type RawInt64Array = *const i64;
/// Creates a vector of strings from a raw C-like string array.
///
/// Returns an error if any of the strings contain invalid UTF-8 characters.
///
/// # Safety
///
/// * We check the array pointer for validity (non-null).
/// * FfiStr checks each individual char pointer for validity (non-null).
/// * We discard invalid char pointers (null pointer).
/// * Invalid UTF-8 in any string will return an error from this function.
pub fn from_raw_string_array(arr: RawStringArray, len: i32) -> glean_core::Result<Vec<String>> {
unsafe {
if arr.is_null() || len <= 0 {
return Ok(vec![]);
}
let arr_ptrs = std::slice::from_raw_parts(arr, len as usize);
arr_ptrs
.iter()
.map(|&p| {
// Drop invalid strings
FfiStr::from_raw(p).to_string_fallible()
})
.collect()
}
}
/// Creates a HashMap<i32, String> from a pair of C int and string arrays.
///
/// Returns an error if any of the strings contain invalid UTF-8 characters.
///
/// # Safety
///
/// * We check the array pointer for validity (non-null).
/// * FfiStr checks each individual char pointer for validity (non-null).
/// * We discard invalid char pointers (null pointer).
/// * Invalid UTF-8 in any string will return an error from this function.
pub fn from_raw_int_array_and_string_array(
keys: RawIntArray,
values: RawStringArray,
len: i32,
) -> glean_core::Result<Option<HashMap<i32, String>>> {
unsafe {
if keys.is_null() || values.is_null() || len <= 0 {
return Ok(None);
}
let keys_ptrs = std::slice::from_raw_parts(keys, len as usize);
let values_ptrs = std::slice::from_raw_parts(values, len as usize);
let res: glean_core::Result<_> = keys_ptrs
.iter()
.zip(values_ptrs.iter())
.map(|(&k, &v)| FfiStr::from_raw(v).to_string_fallible().map(|s| (k, s)))
.collect();
res.map(Some)
}
}
/// Creates a HashMap<String, String> from a pair of C string arrays.
///
/// Returns an error if any of the strings contain invalid UTF-8 characters.
///
/// # Safety
///
/// * We check the array pointer for validity (non-null).
/// * FfiStr checks each individual char pointer for validity (non-null).
/// * We discard invalid char pointers (null pointer).
/// * Invalid UTF-8 in any string will return an error from this function.
pub fn from_raw_string_array_and_string_array(
keys: RawStringArray,
values: RawStringArray,
len: i32,
) -> glean_core::Result<Option<HashMap<String, String>>> {
unsafe {
if keys.is_null() || values.is_null() || len <= 0 {
return Ok(None);
}
let keys_ptrs = std::slice::from_raw_parts(keys, len as usize);
let values_ptrs = std::slice::from_raw_parts(values, len as usize);
let res: glean_core::Result<_> = keys_ptrs
.iter()
.zip(values_ptrs.iter())
.map(|(&k, &v)| {
let k = FfiStr::from_raw(k).to_string_fallible()?;
let v = FfiStr::from_raw(v).to_string_fallible()?;
Ok((k, v))
})
.collect();
res.map(Some)
}
}
/// Creates a Vec<u32> from a raw C uint64 array.
///
/// This will return an empty `Vec` if the input is empty.
///
/// # Safety
///
/// * We check the array pointer for validity (non-null).
pub fn from_raw_int64_array(values: RawInt64Array, len: i32) -> Vec<i64> {
unsafe {
if values.is_null() || len <= 0 {
return vec![];
}
let value_slice = std::slice::from_raw_parts(values, len as usize);
value_slice.to_vec()
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ffi::CString;
mod raw_string_array {
use super::*;
#[test]
fn parsing_valid_array() {
let expected = vec!["first", "second"];
let array: Vec<CString> = expected
.iter()
.map(|&s| CString::new(&*s).unwrap())
.collect();
let ptr_array: Vec<*const _> = array.iter().map(|s| s.as_ptr()).collect();
let list = from_raw_string_array(ptr_array.as_ptr(), expected.len() as i32).unwrap();
assert_eq!(expected, list);
}
#[test]
fn parsing_empty_array() {
let expected: Vec<String> = vec![];
// Testing a null pointer (length longer to ensure the null pointer is checked)
let list = from_raw_string_array(std::ptr::null(), 2).unwrap();
assert_eq!(expected, list);
// Need a (filled) vector to obtain a valid pointer.
let array = vec![CString::new("glean").unwrap()];
let ptr_array: Vec<*const _> = array.iter().map(|s| s.as_ptr()).collect();
// Check the length with a valid pointer.
let list = from_raw_string_array(ptr_array.as_ptr(), 0).unwrap();
assert_eq!(expected, list);
}
#[test]
fn parsing_invalid_utf8_fails() {
// CAREFUL! We're manually constructing nul-terminated
// Need a (filled) vector to obtain a valid pointer.
let array = vec![
// -1 is definitely an invalid UTF-8 codepoint
// Let's not break anything and append the nul terminator
vec![0x67, 0x6c, -1, 0x65, 0x61, 0x6e, 0x00],
];
let ptr_array: Vec<*const _> = array.iter().map(|s| s.as_ptr()).collect();
let list = from_raw_string_array(ptr_array.as_ptr(), array.len() as i32);
assert!(list.is_err());
}
}
mod raw_int_string_array {
use super::*;
#[test]
fn parsing_valid_array() {
let mut expected_map = HashMap::new();
expected_map.insert(7, "seven".to_string());
expected_map.insert(8, "eight".to_string());
let int_array = vec![7, 8];
let str_array = vec![
CString::new("seven").unwrap(),
CString::new("eight").unwrap(),
];
let ptr_array: Vec<*const _> = str_array.iter().map(|s| s.as_ptr()).collect();
let map = from_raw_int_array_and_string_array(
int_array.as_ptr(),
ptr_array.as_ptr(),
expected_map.len() as i32,
)
.unwrap();
assert_eq!(Some(expected_map), map);
}
#[test]
fn parsing_empty_array() {
// Testing a null pointer (length longer to ensure the null pointer is checked)
let result =
from_raw_int_array_and_string_array(std::ptr::null(), std::ptr::null(), 2).unwrap();
assert_eq!(None, result);
// Need a (filled) vector to obtain a valid pointer.
let int_array = vec![1];
let result =
from_raw_int_array_and_string_array(int_array.as_ptr(), std::ptr::null(), 2)
.unwrap();
assert_eq!(None, result);
let array = vec![CString::new("glean").unwrap()];
let ptr_array: Vec<*const _> = array.iter().map(|s| s.as_ptr()).collect();
let result =
from_raw_int_array_and_string_array(std::ptr::null(), ptr_array.as_ptr(), 2)
.unwrap();
assert_eq!(None, result);
// Check the length with valid pointers.
let result =
from_raw_int_array_and_string_array(int_array.as_ptr(), ptr_array.as_ptr(), 0)
.unwrap();
assert_eq!(None, result);
}
#[test]
fn parsing_invalid_utf8_fails() {
// CAREFUL! We're manually constructing nul-terminated
// Need a (filled) vector to obtain a valid pointer.
let int_array = vec![1];
let array = vec![
// -1 is definitely an invalid UTF-8 codepoint
// Let's not break anything and append the nul terminator
vec![0x67, 0x6c, -1, 0x65, 0x61, 0x6e, 0x00],
];
let ptr_array: Vec<*const _> = array.iter().map(|s| s.as_ptr()).collect();
let map = from_raw_int_array_and_string_array(
int_array.as_ptr(),
ptr_array.as_ptr(),
array.len() as i32,
);
assert!(map.is_err());
}
}
mod raw_string_string_array {
use super::*;
#[test]
fn parsing_valid_array() {
let mut expected_map = HashMap::new();
expected_map.insert("one".to_string(), "seven".to_string());
expected_map.insert("two".to_string(), "eight".to_string());
let key_array = vec![CString::new("one").unwrap(), CString::new("two").unwrap()];
let ptr_key_array: Vec<*const _> = key_array.iter().map(|s| s.as_ptr()).collect();
let str_array = vec![
CString::new("seven").unwrap(),
CString::new("eight").unwrap(),
];
let ptr_array: Vec<*const _> = str_array.iter().map(|s| s.as_ptr()).collect();
let map = from_raw_string_array_and_string_array(
ptr_key_array.as_ptr(),
ptr_array.as_ptr(),
expected_map.len() as i32,
)
.unwrap();
assert_eq!(Some(expected_map), map);
}
#[test]
fn parsing_empty_array() {
// Testing a null pointer (length longer to ensure the null pointer is checked)
let result =
from_raw_string_array_and_string_array(std::ptr::null(), std::ptr::null(), 2)
.unwrap();
assert_eq!(None, result);
// Need a (filled) vector to obtain a valid pointer.
let key_array = vec![CString::new("one").unwrap()];
let ptr_key_array: Vec<*const _> = key_array.iter().map(|s| s.as_ptr()).collect();
let str_array = vec![CString::new("seven").unwrap()];
let ptr_array: Vec<*const _> = str_array.iter().map(|s| s.as_ptr()).collect();
let result =
from_raw_string_array_and_string_array(ptr_key_array.as_ptr(), std::ptr::null(), 2)
.unwrap();
assert_eq!(None, result);
let result =
from_raw_int_array_and_string_array(std::ptr::null(), ptr_array.as_ptr(), 2)
.unwrap();
assert_eq!(None, result);
// Check the length with valid pointers.
let result = from_raw_string_array_and_string_array(
ptr_key_array.as_ptr(),
ptr_array.as_ptr(),
0,
)
.unwrap();
assert_eq!(None, result);
}
#[test]
fn parsing_invalid_utf8_fails() {
// CAREFUL! We're manually constructing nul-terminated
// Need a (filled) vector to obtain a valid pointer.
let key_array = vec![CString::new("one").unwrap()];
let ptr_key_array: Vec<*const _> = key_array.iter().map(|s| s.as_ptr()).collect();
let array = vec![
// -1 is definitely an invalid UTF-8 codepoint
// Let's not break anything and append the nul terminator
vec![0x67, 0x6c, -1, 0x65, 0x61, 0x6e, 0x00],
];
let ptr_array: Vec<*const _> = array.iter().map(|s| s.as_ptr()).collect();
let map = from_raw_string_array_and_string_array(
ptr_key_array.as_ptr(),
ptr_array.as_ptr(),
array.len() as i32,
);
assert!(map.is_err());
}
}
}

View File

@ -0,0 +1,150 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Glean does not pass errors through the FFI component upwards.
//! Most errors are not propagated and simply only logged for debugging.
//! Errors should be recoverable and the platform-side should mostly ignore them.
//! Additionally the platform-side can work asynchronously anyway, leaving us with no good way to
//! pass back errors.
//!
//! The `HandleMapExtension` extension traits adds additional methods that log potential errors.
//!
//! **Note: the platform-side still needs to check for null pointers or other default values before
//! using returned values.
//! This is only relevant for creation of the main object and metrics as its the only things where
//! we return something potentially fallible.
use std::panic::UnwindSafe;
use ffi_support::{ConcurrentHandleMap, ExternError, IntoFfi};
pub fn handle_result<R, F>(callback: F) -> R::Value
where
F: UnwindSafe + FnOnce() -> Result<R, glean_core::Error>,
R: IntoFfi,
{
let mut error = ffi_support::ExternError::success();
let res = ffi_support::abort_on_panic::call_with_result(&mut error, callback);
log_if_error(error);
res
}
/// Warns if an error occurred and then release the allocated memory.
///
/// This is a helper for the case where we aren't exposing this back over the FFI.
///
/// Adopted from the `consume_and_log_if_error` method, but with a changed log message.
///
/// We assume we're not inside a `catch_unwind`, and so we wrap inside one ourselves.
pub fn log_if_error(error: ExternError) {
if !error.get_code().is_success() {
// in practice this should never panic, but you never know...
ffi_support::abort_on_panic::call_with_output(|| {
log::error!(
"Glean failed ({:?}): {}",
error.get_code(),
error.get_message().as_str()
);
unsafe {
error.manually_release();
}
})
}
}
pub trait HandleMapExtension {
type Output;
/// Insert a newly constructed object and return a handle to it.
///
/// This will catch and log any errors.
/// This will not panic on errors in the constructor.
///
/// On success, it returns a new valid handle.
/// On failure, it returns the default FFI value for a handler (`0`).
fn insert_with_log<F>(&self, constructor: F) -> u64
where
F: UnwindSafe + FnOnce() -> Result<Self::Output, glean_core::Error>;
/// Call an infallible callback with the object identified by a handle.
///
/// This will ignore panics.
///
/// On success, it convert the callback return value into an FFI value and returns it.
/// On failure, it will return the default FFI value.
fn call_infallible<R, F>(&self, h: u64, callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&Self::Output) -> R,
R: IntoFfi;
/// Call an infallible callback with the object identified by a handle.
///
/// This will ignore panics.
///
/// On success, it convert the callback return value into an FFI value and returns it.
/// On failure, it will return the default FFI value.
fn call_infallible_mut<R, F>(&self, h: u64, callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&mut Self::Output) -> R,
R: IntoFfi;
/// Call a callback with the object identified by a handle.
///
/// This will catch and log any errors of the callback.
/// This will not panic on errors in the callback.
///
/// On success, it convert the callback return value into an FFI value and returns it.
/// On failure, it will return the default FFI value.
fn call_with_log<R, F>(&self, h: u64, callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&Self::Output) -> Result<R, glean_core::Error>,
R: IntoFfi;
}
impl<T> HandleMapExtension for ConcurrentHandleMap<T> {
type Output = T;
fn insert_with_log<F>(&self, constructor: F) -> u64
where
F: UnwindSafe + FnOnce() -> Result<Self::Output, glean_core::Error>,
{
let mut error = ExternError::success();
let res = self.insert_with_result(&mut error, constructor);
log_if_error(error);
res
}
fn call_infallible<R, F>(&self, h: u64, callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&Self::Output) -> R,
R: IntoFfi,
{
let mut error = ExternError::success();
let res = self.call_with_output(&mut error, h, callback);
debug_assert!(error.get_code().is_success());
res
}
fn call_infallible_mut<R, F>(&self, h: u64, callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&mut Self::Output) -> R,
R: IntoFfi,
{
let mut error = ExternError::success();
let res = self.call_with_output_mut(&mut error, h, callback);
debug_assert!(error.get_code().is_success());
res
}
fn call_with_log<R, F>(&self, h: u64, callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&Self::Output) -> Result<R, glean_core::Error>,
R: IntoFfi,
{
let mut error = ExternError::success();
let res = self.call_with_result(&mut error, h, callback);
log_if_error(error);
res
}
}

83
third_party/rust/glean-ffi/src/jwe.rs vendored Normal file
View File

@ -0,0 +1,83 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::ffi_string_ext::FallibleToString;
use crate::{define_metric, handlemap_ext::HandleMapExtension, with_glean_value, Lifetime};
define_metric!(JweMetric => JWE_METRICS {
new -> glean_new_jwe_metric(),
test_get_num_recorded_errors -> glean_jwe_test_get_num_recorded_errors,
destroy -> glean_destroy_jwe_metric,
});
#[no_mangle]
pub extern "C" fn glean_jwe_set_with_compact_representation(metric_id: u64, value: FfiStr) {
with_glean_value(|glean| {
JWE_METRICS.call_with_log(metric_id, |metric| {
let value = value.to_string_fallible()?;
metric.set_with_compact_representation(glean, value);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_jwe_set(
metric_id: u64,
header: FfiStr,
key: FfiStr,
init_vector: FfiStr,
cipher_text: FfiStr,
auth_tag: FfiStr,
) {
with_glean_value(|glean| {
JWE_METRICS.call_with_log(metric_id, |metric| {
let header = header.to_string_fallible()?;
let key = key.to_string_fallible()?;
let init_vector = init_vector.to_string_fallible()?;
let cipher_text = cipher_text.to_string_fallible()?;
let auth_tag = auth_tag.to_string_fallible()?;
metric.set(glean, header, key, init_vector, cipher_text, auth_tag);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_jwe_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
JWE_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_jwe_test_get_value(metric_id: u64, storage_name: FfiStr) -> *mut c_char {
with_glean_value(|glean| {
JWE_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value(glean, storage_name.as_str()).unwrap()
})
})
}
#[no_mangle]
pub extern "C" fn glean_jwe_test_get_value_as_json_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
JWE_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_json_string(glean, storage_name.as_str())
.unwrap()
})
})
}

View File

@ -0,0 +1,138 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use glean_core::{metrics::*, CommonMetricData, Lifetime};
use crate::boolean::BOOLEAN_METRICS;
use crate::counter::COUNTER_METRICS;
use crate::string::STRING_METRICS;
use crate::*;
/// Generate FFI functions for labeled metrics.
///
/// This can be used to reduce the amount of duplicated boilerplate around calling
/// `LabeledMetric::new` and LabeledMetric.get`.
/// The constructor function takes the general common meta data.
///
/// If any additional data needs to be passed in, this macro cannot be used.
///
/// Arguments:
///
/// * `metric` - The metric type, e.g. `CounterMetric`.
/// * `global` - The name of the newly constructed global to hold instances of the labeled metric.
/// * `metric_global` - The name of the map to hold instances of the underlying metric type.
/// * `new_name` - Function name to create a new labeled metric of this type.
/// * `destroy_name` - Function name to destroy the labeled metric.
/// * `get_name` - Function name to get a new instance of the underlying metric.
macro_rules! impl_labeled_metric {
($metric:ty, $global:ident, $metric_global:ident, $new_name:ident, $destroy_name:ident, $get_name:ident, $test_get_num_recorded_errors:ident) => {
static $global: once_cell::sync::Lazy<ConcurrentHandleMap<LabeledMetric<$metric>>> =
once_cell::sync::Lazy::new(ConcurrentHandleMap::new);
$crate::define_infallible_handle_map_deleter!($global, $destroy_name);
/// Create a new labeled metric.
#[no_mangle]
pub extern "C" fn $new_name(
category: FfiStr,
name: FfiStr,
send_in_pings: RawStringArray,
send_in_pings_len: i32,
lifetime: i32,
disabled: u8,
labels: RawStringArray,
label_count: i32,
) -> u64 {
$global.insert_with_log(|| {
let name = name.to_string_fallible()?;
let category = category.to_string_fallible()?;
let send_in_pings = from_raw_string_array(send_in_pings, send_in_pings_len)?;
let labels = from_raw_string_array(labels, label_count)?;
let labels = if labels.is_empty() {
None
} else {
Some(labels)
};
let lifetime = Lifetime::try_from(lifetime)?;
Ok(LabeledMetric::new(
<$metric>::new(CommonMetricData {
name,
category,
send_in_pings,
lifetime,
disabled: disabled != 0,
..Default::default()
}),
labels,
))
})
}
/// Create a new instance of the sub-metric of this labeled metric.
#[no_mangle]
pub extern "C" fn $get_name(handle: u64, label: FfiStr) -> u64 {
$global.call_infallible_mut(handle, |labeled| {
let metric = labeled.get(label.as_str());
$metric_global.insert_with_log(|| Ok(metric))
})
}
#[no_mangle]
pub extern "C" fn $test_get_num_recorded_errors(
metric_id: u64,
error_type: i32,
storage_name: FfiStr,
) -> i32 {
crate::with_glean_value(|glean| {
crate::HandleMapExtension::call_infallible(&*$global, metric_id, |metric| {
let error_type = std::convert::TryFrom::try_from(error_type).unwrap();
let storage_name =
crate::FallibleToString::to_string_fallible(&storage_name).unwrap();
glean_core::test_get_num_recorded_errors(
glean,
&metric.get_submetric().meta(),
error_type,
Some(&storage_name),
)
.unwrap_or(0)
})
})
}
};
}
// Create the required FFI functions for LabeledMetric<CounterMetric>
impl_labeled_metric!(
CounterMetric,
LABELED_COUNTER,
COUNTER_METRICS,
glean_new_labeled_counter_metric,
glean_destroy_labeled_counter_metric,
glean_labeled_counter_metric_get,
glean_labeled_counter_test_get_num_recorded_errors
);
// Create the required FFI functions for LabeledMetric<BooleanMetric>
impl_labeled_metric!(
BooleanMetric,
LABELED_BOOLEAN,
BOOLEAN_METRICS,
glean_new_labeled_boolean_metric,
glean_destroy_labeled_boolean_metric,
glean_labeled_boolean_metric_get,
glean_labeled_boolean_test_get_num_recorded_errors
);
// Create the required FFI functions for LabeledMetric<StringMetric>
impl_labeled_metric!(
StringMetric,
LABELED_STRING,
STRING_METRICS,
glean_new_labeled_string_metric,
glean_destroy_labeled_string_metric,
glean_labeled_string_metric_get,
glean_labeled_string_test_get_num_recorded_errors
);

542
third_party/rust/glean-ffi/src/lib.rs vendored Normal file
View File

@ -0,0 +1,542 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![deny(broken_intra_doc_links)]
use std::convert::TryFrom;
use std::ffi::CStr;
use std::os::raw::c_char;
use std::panic::UnwindSafe;
use std::path::PathBuf;
use ffi_support::{define_string_destructor, ConcurrentHandleMap, FfiStr, IntoFfi};
#[cfg(all(not(target_os = "android"), not(target_os = "ios")))]
use once_cell::sync::OnceCell;
pub use glean_core::metrics::MemoryUnit;
pub use glean_core::metrics::TimeUnit;
pub use glean_core::upload::ffi_upload_result::*;
use glean_core::Glean;
pub use glean_core::Lifetime;
mod macros;
mod boolean;
pub mod byte_buffer;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod ffi_string_ext;
mod from_raw;
mod handlemap_ext;
mod jwe;
mod labeled;
mod memory_distribution;
pub mod ping_type;
mod quantity;
mod string;
mod string_list;
mod timespan;
mod timing_distribution;
pub mod upload;
mod uuid;
#[cfg(all(not(target_os = "android"), not(target_os = "ios")))]
mod fd_logger;
#[cfg(unix)]
#[macro_use]
mod weak;
use ffi_string_ext::FallibleToString;
use from_raw::*;
use handlemap_ext::HandleMapExtension;
use ping_type::PING_TYPES;
use upload::FfiPingUploadTask;
/// Execute the callback with a reference to the Glean singleton, returning a `Result`.
///
/// The callback returns a `Result<T, E>` while:
///
/// - Catching panics, and logging them.
/// - Converting `T` to a C-compatible type using [`IntoFfi`].
/// - Logging `E` and returning a default value.
pub(crate) fn with_glean<R, F>(callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&Glean) -> Result<R, glean_core::Error>,
R: IntoFfi,
{
let mut error = ffi_support::ExternError::success();
let res =
ffi_support::abort_on_panic::call_with_result(
&mut error,
|| match glean_core::global_glean() {
Some(glean) => {
let glean = glean.lock().unwrap();
callback(&glean)
}
None => Err(glean_core::Error::not_initialized()),
},
);
handlemap_ext::log_if_error(error);
res
}
/// Execute the callback with a mutable reference to the Glean singleton, returning a `Result`.
///
/// The callback returns a `Result<T, E>` while:
///
/// - Catching panics, and logging them.
/// - Converting `T` to a C-compatible type using [`IntoFfi`].
/// - Logging `E` and returning a default value.
pub(crate) fn with_glean_mut<R, F>(callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&mut Glean) -> Result<R, glean_core::Error>,
R: IntoFfi,
{
let mut error = ffi_support::ExternError::success();
let res =
ffi_support::abort_on_panic::call_with_result(
&mut error,
|| match glean_core::global_glean() {
Some(glean) => {
let mut glean = glean.lock().unwrap();
callback(&mut glean)
}
None => Err(glean_core::Error::not_initialized()),
},
);
handlemap_ext::log_if_error(error);
res
}
/// Execute the callback with a reference to the Glean singleton, returning a value.
///
/// The callback returns a value while:
///
/// - Catching panics, and logging them.
/// - Converting the returned value to a C-compatible type using [`IntoFfi`].
pub(crate) fn with_glean_value<R, F>(callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&Glean) -> R,
R: IntoFfi,
{
with_glean(|glean| Ok(callback(glean)))
}
/// Execute the callback with a mutable reference to the Glean singleton, returning a value.
///
/// The callback returns a value while:
///
/// - Catching panics, and logging them.
/// - Converting the returned value to a C-compatible type using [`IntoFfi`].
pub(crate) fn with_glean_value_mut<R, F>(callback: F) -> R::Value
where
F: UnwindSafe + FnOnce(&mut Glean) -> R,
R: IntoFfi,
{
with_glean_mut(|glean| Ok(callback(glean)))
}
/// Initialize the logging system based on the target platform. This ensures
/// that logging is shown when executing the Glean SDK unit tests.
#[no_mangle]
pub extern "C" fn glean_enable_logging() {
#[cfg(target_os = "android")]
{
let _ = std::panic::catch_unwind(|| {
android_logger::init_once(
android_logger::Config::default()
.with_min_level(log::Level::Debug)
.with_tag("libglean_ffi"),
);
log::trace!("Android logging should be hooked up!")
});
}
// On iOS enable logging with a level filter.
#[cfg(target_os = "ios")]
{
// Debug logging in debug mode.
// (Note: `debug_assertions` is the next best thing to determine if this is a debug build)
#[cfg(debug_assertions)]
let level = log::LevelFilter::Debug;
#[cfg(not(debug_assertions))]
let level = log::LevelFilter::Info;
let logger = oslog::OsLogger::new("org.mozilla.glean").level_filter(level);
match logger.init() {
Ok(_) => log::trace!("os_log should be hooked up!"),
// Please note that this is only expected to fail during unit tests,
// where the logger might have already been initialized by a previous
// test. So it's fine to print with the "logger".
Err(_) => log::warn!("os_log was already initialized"),
};
}
// Make sure logging does something on non Android platforms as well. Use
// the RUST_LOG environment variable to set the desired log level, e.g.
// setting RUST_LOG=debug sets the log level to debug.
#[cfg(all(not(target_os = "android"), not(target_os = "ios")))]
{
match env_logger::try_init() {
Ok(_) => log::trace!("stdout logging should be hooked up!"),
// Please note that this is only expected to fail during unit tests,
// where the logger might have already been initialized by a previous
// test. So it's fine to print with the "logger".
Err(_) => log::warn!("stdout logging was already initialized"),
};
}
}
#[cfg(all(not(target_os = "android"), not(target_os = "ios")))]
static FD_LOGGER: OnceCell<fd_logger::FdLogger> = OnceCell::new();
/// Initialize the logging system to send JSON messages to a file descriptor
/// (Unix) or file handle (Windows).
///
/// Not available on Android and iOS.
///
/// `fd` is a writable file descriptor (on Unix) or file handle (on Windows).
///
/// # Safety
/// Unsafe because the fd u64 passed in will be interpreted as either a file
/// descriptor (Unix) or file handle (Windows) without any checking.
#[cfg(all(not(target_os = "android"), not(target_os = "ios")))]
#[no_mangle]
pub unsafe extern "C" fn glean_enable_logging_to_fd(fd: u64) {
// Set up logging to a file descriptor/handle. For this usage, the
// language binding should setup a pipe and pass in the descriptor to
// the writing side of the pipe as the `fd` parameter. Log messages are
// written as JSON to the file descriptor.
if FD_LOGGER.set(fd_logger::FdLogger::new(fd)).is_ok() {
// Set the level so everything goes through to the language
// binding side where it will be filtered by the language
// binding's logging system.
if log::set_logger(FD_LOGGER.get().unwrap()).is_ok() {
log::set_max_level(log::LevelFilter::Debug);
}
}
}
/// Configuration over FFI.
///
/// **CAUTION**: This must match _exactly_ the definition on the Kotlin side.
/// If this side is changed, the Kotlin side need to be changed, too.
#[repr(C)]
pub struct FfiConfiguration<'a> {
pub data_dir: FfiStr<'a>,
pub package_name: FfiStr<'a>,
pub language_binding_name: FfiStr<'a>,
pub upload_enabled: u8,
pub max_events: Option<&'a i32>,
pub delay_ping_lifetime_io: u8,
}
/// Convert the FFI-compatible configuration object into the proper Rust configuration object.
impl TryFrom<&FfiConfiguration<'_>> for glean_core::Configuration {
type Error = glean_core::Error;
fn try_from(cfg: &FfiConfiguration) -> Result<Self, Self::Error> {
let data_path = cfg.data_dir.to_string_fallible()?;
let data_path = PathBuf::from(data_path);
let application_id = cfg.package_name.to_string_fallible()?;
let language_binding_name = cfg.language_binding_name.to_string_fallible()?;
let upload_enabled = cfg.upload_enabled != 0;
let max_events = cfg.max_events.filter(|&&i| i >= 0).map(|m| *m as usize);
let delay_ping_lifetime_io = cfg.delay_ping_lifetime_io != 0;
let app_build = "unknown".to_string();
let use_core_mps = false;
Ok(Self {
upload_enabled,
data_path,
application_id,
language_binding_name,
max_events,
delay_ping_lifetime_io,
app_build,
use_core_mps,
})
}
}
/// # Safety
///
/// A valid and non-null configuration object is required for this function.
#[no_mangle]
pub unsafe extern "C" fn glean_initialize(cfg: *const FfiConfiguration) -> u8 {
assert!(!cfg.is_null());
handlemap_ext::handle_result(|| {
// We can create a reference to the FfiConfiguration struct:
// 1. We did a null check
// 2. We're not holding on to it beyond this function
// and we copy out all data when needed.
let glean_cfg = glean_core::Configuration::try_from(&*cfg)?;
let glean = Glean::new(glean_cfg)?;
glean_core::setup_glean(glean)?;
log::info!("Glean initialized");
Ok(true)
})
}
#[no_mangle]
pub extern "C" fn glean_on_ready_to_submit_pings() -> u8 {
with_glean_value(|glean| glean.on_ready_to_submit_pings())
}
#[no_mangle]
pub extern "C" fn glean_is_upload_enabled() -> u8 {
with_glean_value(|glean| glean.is_upload_enabled())
}
#[no_mangle]
pub extern "C" fn glean_set_upload_enabled(flag: u8) {
with_glean_value_mut(|glean| glean.set_upload_enabled(flag != 0));
// The return value of set_upload_enabled is an implementation detail
// that isn't exposed over FFI.
}
#[no_mangle]
pub extern "C" fn glean_submit_ping_by_name(ping_name: FfiStr, reason: FfiStr) -> u8 {
with_glean(|glean| {
Ok(glean.submit_ping_by_name(&ping_name.to_string_fallible()?, reason.as_opt_str()))
})
}
#[no_mangle]
pub extern "C" fn glean_ping_collect(ping_type_handle: u64, reason: FfiStr) -> *mut c_char {
with_glean_value(|glean| {
PING_TYPES.call_infallible(ping_type_handle, |ping_type| {
let ping_maker = glean_core::ping::PingMaker::new();
let data = ping_maker
.collect_string(glean, ping_type, reason.as_opt_str())
.unwrap_or_else(|| String::from(""));
log::info!("Ping({}): {}", ping_type.name.as_str(), data);
data
})
})
}
#[no_mangle]
pub extern "C" fn glean_set_experiment_active(
experiment_id: FfiStr,
branch: FfiStr,
extra_keys: RawStringArray,
extra_values: RawStringArray,
extra_len: i32,
) {
with_glean(|glean| {
let experiment_id = experiment_id.to_string_fallible()?;
let branch = branch.to_string_fallible()?;
let extra = from_raw_string_array_and_string_array(extra_keys, extra_values, extra_len)?;
glean.set_experiment_active(experiment_id, branch, extra);
Ok(())
})
}
#[no_mangle]
pub extern "C" fn glean_set_experiment_inactive(experiment_id: FfiStr) {
with_glean(|glean| {
let experiment_id = experiment_id.to_string_fallible()?;
glean.set_experiment_inactive(experiment_id);
Ok(())
})
}
#[no_mangle]
pub extern "C" fn glean_experiment_test_is_active(experiment_id: FfiStr) -> u8 {
with_glean(|glean| {
let experiment_id = experiment_id.to_string_fallible()?;
Ok(glean.test_is_experiment_active(experiment_id))
})
}
#[no_mangle]
pub extern "C" fn glean_experiment_test_get_data(experiment_id: FfiStr) -> *mut c_char {
with_glean(|glean| {
let experiment_id = experiment_id.to_string_fallible()?;
Ok(glean.test_get_experiment_data_as_json(experiment_id))
})
}
#[no_mangle]
pub extern "C" fn glean_clear_application_lifetime_metrics() {
with_glean_value(|glean| glean.clear_application_lifetime_metrics());
}
/// Try to unblock the RLB dispatcher to start processing queued tasks.
///
/// **Note**: glean-core does not have its own dispatcher at the moment.
/// This tries to detect the RLB and, if loaded, instructs the RLB dispatcher to flush.
/// This allows the usage of both the RLB and other language bindings (e.g. Kotlin/Swift)
/// within the same application.
#[no_mangle]
pub extern "C" fn glean_flush_rlb_dispatcher() {
#[cfg(unix)]
#[allow(non_upper_case_globals)]
{
weak!(fn rlb_flush_dispatcher() -> ());
if let Some(f) = rlb_flush_dispatcher.get() {
// SAFETY:
//
// We did a dynamic lookup for this symbol.
// This is only called if we found it.
// We don't pass any data and don't read any return value, thus no data we directly
// depend on will be corruptable.
unsafe {
f();
}
} else {
log::info!("No RLB symbol found. Not trying to flush the RLB dispatcher.");
}
}
}
#[no_mangle]
pub extern "C" fn glean_set_dirty_flag(flag: u8) {
with_glean_value_mut(|glean| glean.set_dirty_flag(flag != 0));
}
#[no_mangle]
pub extern "C" fn glean_is_dirty_flag_set() -> u8 {
with_glean_value(|glean| glean.is_dirty_flag_set())
}
#[no_mangle]
pub extern "C" fn glean_handle_client_active() {
with_glean_value_mut(|glean| glean.handle_client_active());
}
#[no_mangle]
pub extern "C" fn glean_handle_client_inactive() {
with_glean_value_mut(|glean| glean.handle_client_inactive());
}
#[no_mangle]
pub extern "C" fn glean_test_clear_all_stores() {
with_glean_value(|glean| glean.test_clear_all_stores())
}
#[no_mangle]
pub extern "C" fn glean_destroy_glean() {
with_glean_value_mut(|glean| glean.destroy_db())
}
#[no_mangle]
pub extern "C" fn glean_is_first_run() -> u8 {
with_glean_value(|glean| glean.is_first_run())
}
// Unfortunately, the way we use CFFI in Python ("out-of-line", "ABI mode") does not
// allow return values to be `union`s, so we need to use an output parameter instead of
// a return value to get the task. The output data will be consumed and freed by the
// `glean_process_ping_upload_response` below.
//
// Arguments:
//
// * `result`: the object the output task will be written to.
#[no_mangle]
pub extern "C" fn glean_get_upload_task(result: *mut FfiPingUploadTask) {
with_glean_value(|glean| {
let ffi_task = FfiPingUploadTask::from(glean.get_upload_task());
unsafe {
std::ptr::write(result, ffi_task);
}
});
}
/// Process and free a `FfiPingUploadTask`.
///
/// We need to pass the whole task instead of only the document id,
/// so that we can free the strings properly on Drop.
///
/// After return the `task` should not be used further by the caller.
///
/// # Safety
///
/// A valid and non-null upload task object is required for this function.
#[no_mangle]
pub unsafe extern "C" fn glean_process_ping_upload_response(
task: *mut FfiPingUploadTask,
status: u32,
) {
// Safety:
// * We null-check the passed task before dereferencing.
// * We replace data behind the pointer with another valid variant.
// * We gracefully handle invalid data in strings.
if task.is_null() {
return;
}
// Take out task and replace with valid value.
// This value should never be read again on the FFI side,
// but as it controls the memory, we put something valid in place, just in case.
let task = std::ptr::replace(task, FfiPingUploadTask::Done);
with_glean(|glean| {
if let FfiPingUploadTask::Upload { document_id, .. } = task {
assert!(!document_id.is_null());
let document_id_str = CStr::from_ptr(document_id)
.to_str()
.map_err(|_| glean_core::Error::utf8_error())?;
glean.process_ping_upload_response(document_id_str, status.into());
};
Ok(())
});
}
/// # Safety
///
/// A valid and non-null configuration object is required for this function.
#[no_mangle]
pub unsafe extern "C" fn glean_initialize_for_subprocess(cfg: *const FfiConfiguration) -> u8 {
assert!(!cfg.is_null());
handlemap_ext::handle_result(|| {
// We can create a reference to the FfiConfiguration struct:
// 1. We did a null check
// 2. We're not holding on to it beyond this function
// and we copy out all data when needed.
let glean_cfg = glean_core::Configuration::try_from(&*cfg)?;
let glean = Glean::new_for_subprocess(&glean_cfg, true)?;
glean_core::setup_glean(glean)?;
log::info!("Glean initialized for subprocess");
Ok(true)
})
}
#[no_mangle]
pub extern "C" fn glean_set_debug_view_tag(tag: FfiStr) -> u8 {
with_glean_mut(|glean| {
let tag = tag.to_string_fallible()?;
Ok(glean.set_debug_view_tag(&tag))
})
}
#[no_mangle]
pub extern "C" fn glean_set_log_pings(value: u8) {
with_glean_mut(|glean| Ok(glean.set_log_pings(value != 0)));
}
#[no_mangle]
pub extern "C" fn glean_set_source_tags(raw_tags: RawStringArray, tags_count: i32) -> u8 {
with_glean_mut(|glean| {
let tags = from_raw_string_array(raw_tags, tags_count)?;
Ok(glean.set_source_tags(tags))
})
}
#[no_mangle]
pub extern "C" fn glean_get_timestamp_ms() -> u64 {
glean_core::get_timestamp_ms()
}
define_string_destructor!(glean_str_free);

116
third_party/rust/glean-ffi/src/macros.rs vendored Normal file
View File

@ -0,0 +1,116 @@
#[macro_export]
macro_rules! define_infallible_handle_map_deleter {
($HANDLE_MAP_NAME:ident, $destructor_name:ident) => {
#[no_mangle]
pub extern "C" fn $destructor_name(v: u64) {
let mut error = ffi_support::ExternError::success();
let res = ffi_support::abort_on_panic::call_with_result(&mut error, || {
let map: &$crate::ConcurrentHandleMap<_> = &*$HANDLE_MAP_NAME;
map.delete_u64(v)
});
$crate::handlemap_ext::log_if_error(error);
res
}
};
}
/// Define the global handle map, constructor and destructor functions and any user-defined
/// functions for a new metric
///
/// This allows to define most common functionality and simple operations for a metric type.
/// More complex operations should be written as plain functions directly.
///
/// # Arguments
///
/// * `$metric_type` - metric type to use from glean_core, e.g. `CounterMetric`.
/// * `$metric_map` - name to use for the global name, should be all uppercase, e.g. `COUNTER_METRICS`.
/// * `$new_fn(...)` - (optional) name of the constructor function, followed by all additional (non-common) arguments.
/// * `$test_get_num_recorded_errors` - (optional) name of the test_get_num_recorded_errors function
/// * `$destroy` - name of the destructor function.
///
/// Additional simple functions can be define as a mapping `$op -> $op_fn`:
///
/// * `$op` - function on the metric type to call.
/// * `$op_fn` - FFI function name for the operation, followed by its arguments.
/// Arguments are converted into the target type using `TryFrom::try_from`.
#[macro_export]
macro_rules! define_metric {
($metric_type:ident => $metric_map:ident {
$(new -> $new_fn:ident($($new_argname:ident: $new_argtyp:ty),* $(,)*),)?
$(test_get_num_recorded_errors -> $test_get_num_recorded_errors_fn:ident,)?
destroy -> $destroy_fn:ident,
$(
$op:ident -> $op_fn:ident($($op_argname:ident: $op_argtyp:ty),* $(,)*)
),* $(,)*
}) => {
pub static $metric_map: once_cell::sync::Lazy<ffi_support::ConcurrentHandleMap<glean_core::metrics::$metric_type>> = once_cell::sync::Lazy::new(ffi_support::ConcurrentHandleMap::new);
$crate::define_infallible_handle_map_deleter!($metric_map, $destroy_fn);
$(
#[no_mangle]
pub extern "C" fn $new_fn(
category: ffi_support::FfiStr,
name: ffi_support::FfiStr,
send_in_pings: crate::RawStringArray,
send_in_pings_len: i32,
lifetime: Lifetime,
disabled: u8,
$($new_argname: $new_argtyp),*
) -> u64 {
$metric_map.insert_with_log(|| {
let name = crate::FallibleToString::to_string_fallible(&name)?;
let category = crate::FallibleToString::to_string_fallible(&category)?;
let send_in_pings = crate::from_raw_string_array(send_in_pings, send_in_pings_len)?;
let lifetime = std::convert::TryFrom::try_from(lifetime)?;
$(
let $new_argname = std::convert::TryFrom::try_from($new_argname)?;
)*
Ok(glean_core::metrics::$metric_type::new(glean_core::CommonMetricData {
name,
category,
send_in_pings,
lifetime,
disabled: disabled != 0,
..Default::default()
}, $($new_argname),*))
})
}
)?
$(
#[no_mangle]
pub extern "C" fn $test_get_num_recorded_errors_fn(
metric_id: u64,
error_type: i32,
storage_name: FfiStr
) -> i32 {
crate::HandleMapExtension::call_infallible(&*$metric_map, metric_id, |metric| {
crate::with_glean_value(|glean| {
let error_type = std::convert::TryFrom::try_from(error_type).unwrap();
let storage_name = crate::FallibleToString::to_string_fallible(&storage_name).unwrap();
glean_core::test_get_num_recorded_errors(
&glean,
glean_core::metrics::MetricType::meta(metric),
error_type,
Some(&storage_name)
).unwrap_or(0)
})
})
}
)?
$(
#[no_mangle]
pub extern "C" fn $op_fn( metric_id: u64, $($op_argname: $op_argtyp),*) {
crate::with_glean_value(|glean| {
crate::HandleMapExtension::call_infallible(&*$metric_map, metric_id, |metric| {
metric.$op(&glean, $($op_argname),*);
})
})
}
)*
}
}

View File

@ -0,0 +1,66 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, from_raw_int64_array, handlemap_ext::HandleMapExtension, with_glean_value,
Lifetime, MemoryUnit, RawInt64Array,
};
define_metric!(MemoryDistributionMetric => MEMORY_DISTRIBUTION_METRICS {
new -> glean_new_memory_distribution_metric(memory_unit: MemoryUnit),
test_get_num_recorded_errors -> glean_memory_distribution_test_get_num_recorded_errors,
destroy -> glean_destroy_memory_distribution_metric,
accumulate -> glean_memory_distribution_accumulate(sample: u64),
});
#[no_mangle]
pub extern "C" fn glean_memory_distribution_accumulate_samples(
metric_id: u64,
raw_samples: RawInt64Array,
num_samples: i32,
) {
with_glean_value(|glean| {
MEMORY_DISTRIBUTION_METRICS.call_infallible_mut(metric_id, |metric| {
// The Kotlin code is sending Long(s), which are 64 bits, as there's
// currently no stable UInt type. The positive part of [Int] would not
// be enough to represent the values coming in:.
// Here Long(s) are handled as i64 and then casted in `accumulate_samples_signed`
// to u32.
let samples = from_raw_int64_array(raw_samples, num_samples);
metric.accumulate_samples_signed(glean, samples);
})
})
}
#[no_mangle]
pub extern "C" fn glean_memory_distribution_test_has_value(
metric_id: u64,
storage_name: FfiStr,
) -> u8 {
with_glean_value(|glean| {
MEMORY_DISTRIBUTION_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_memory_distribution_test_get_value_as_json_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
MEMORY_DISTRIBUTION_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_json_string(glean, storage_name.as_str())
.unwrap()
})
})
}

View File

@ -0,0 +1,48 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use ffi_support::{ConcurrentHandleMap, FfiStr};
use once_cell::sync::Lazy;
use glean_core::metrics::PingType;
use crate::ffi_string_ext::FallibleToString;
use crate::handlemap_ext::HandleMapExtension;
use crate::{from_raw_string_array, with_glean_value, with_glean_value_mut, RawStringArray};
pub(crate) static PING_TYPES: Lazy<ConcurrentHandleMap<PingType>> =
Lazy::new(ConcurrentHandleMap::new);
crate::define_infallible_handle_map_deleter!(PING_TYPES, glean_destroy_ping_type);
#[no_mangle]
pub extern "C" fn glean_new_ping_type(
ping_name: FfiStr,
include_client_id: u8,
send_if_empty: u8,
reason_codes: RawStringArray,
reason_codes_len: i32,
) -> u64 {
PING_TYPES.insert_with_log(|| {
let ping_name = ping_name.to_string_fallible()?;
let reason_codes = from_raw_string_array(reason_codes, reason_codes_len)?;
Ok(PingType::new(
ping_name,
include_client_id != 0,
send_if_empty != 0,
reason_codes,
))
})
}
#[no_mangle]
pub extern "C" fn glean_test_has_ping_type(ping_name: FfiStr) -> u8 {
with_glean_value(|glean| glean.get_ping_by_name(ping_name.as_str()).is_some() as u8)
}
#[no_mangle]
pub extern "C" fn glean_register_ping_type(ping_type_handle: u64) {
PING_TYPES.call_infallible(ping_type_handle, |ping_type| {
with_glean_value_mut(|glean| glean.register_ping_type(ping_type))
})
}

View File

@ -0,0 +1,35 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use ffi_support::FfiStr;
use crate::{define_metric, handlemap_ext::HandleMapExtension, with_glean_value, Lifetime};
define_metric!(QuantityMetric => QUANTITY_METRICS {
new -> glean_new_quantity_metric(),
test_get_num_recorded_errors -> glean_quantity_test_get_num_recorded_errors,
destroy -> glean_destroy_quantity_metric,
set -> glean_quantity_set(value: i64),
});
#[no_mangle]
pub extern "C" fn glean_quantity_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
QUANTITY_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_quantity_test_get_value(metric_id: u64, storage_name: FfiStr) -> i64 {
with_glean_value(|glean| {
QUANTITY_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value(glean, storage_name.as_str()).unwrap()
})
})
}

View File

@ -0,0 +1,49 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, ffi_string_ext::FallibleToString, handlemap_ext::HandleMapExtension,
with_glean_value, Lifetime,
};
define_metric!(StringMetric => STRING_METRICS {
new -> glean_new_string_metric(),
test_get_num_recorded_errors -> glean_string_test_get_num_recorded_errors,
destroy -> glean_destroy_string_metric,
});
#[no_mangle]
pub extern "C" fn glean_string_set(metric_id: u64, value: FfiStr) {
with_glean_value(|glean| {
STRING_METRICS.call_with_log(metric_id, |metric| {
let value = value.to_string_fallible()?;
metric.set(glean, value);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_string_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
STRING_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_string_test_get_value(metric_id: u64, storage_name: FfiStr) -> *mut c_char {
with_glean_value(|glean| {
STRING_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value(glean, storage_name.as_str()).unwrap()
})
})
}

View File

@ -0,0 +1,65 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, ffi_string_ext::FallibleToString, from_raw_string_array,
handlemap_ext::HandleMapExtension, with_glean_value, Lifetime, RawStringArray,
};
define_metric!(StringListMetric => STRING_LIST_METRICS {
new -> glean_new_string_list_metric(),
test_get_num_recorded_errors -> glean_string_list_test_get_num_recorded_errors,
destroy -> glean_destroy_string_list_metric,
});
#[no_mangle]
pub extern "C" fn glean_string_list_add(metric_id: u64, value: FfiStr) {
with_glean_value(|glean| {
STRING_LIST_METRICS.call_with_log(metric_id, |metric| {
let value = value.to_string_fallible()?;
metric.add(glean, value);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_string_list_set(metric_id: u64, values: RawStringArray, values_len: i32) {
with_glean_value(|glean| {
STRING_LIST_METRICS.call_with_log(metric_id, |metric| {
let values = from_raw_string_array(values, values_len)?;
metric.set(glean, values);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_string_list_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
STRING_LIST_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_string_list_test_get_value_as_json_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
STRING_LIST_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_json_string(glean, storage_name.as_str())
.unwrap()
})
})
}

View File

@ -0,0 +1,70 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::time::Duration;
use ffi_support::FfiStr;
use crate::{define_metric, handlemap_ext::HandleMapExtension, with_glean_value, Lifetime};
define_metric!(TimespanMetric => TIMESPAN_METRICS {
new -> glean_new_timespan_metric(time_unit: i32),
test_get_num_recorded_errors -> glean_timespan_test_get_num_recorded_errors,
destroy -> glean_destroy_timespan_metric,
});
#[no_mangle]
pub extern "C" fn glean_timespan_set_start(metric_id: u64, start_time: u64) {
with_glean_value(|glean| {
TIMESPAN_METRICS.call_infallible_mut(metric_id, |metric| {
metric.set_start(glean, start_time);
})
})
}
#[no_mangle]
pub extern "C" fn glean_timespan_set_stop(metric_id: u64, stop_time: u64) {
with_glean_value(|glean| {
TIMESPAN_METRICS.call_infallible_mut(metric_id, |metric| {
metric.set_stop(glean, stop_time);
})
})
}
#[no_mangle]
pub extern "C" fn glean_timespan_cancel(metric_id: u64) {
TIMESPAN_METRICS.call_infallible_mut(metric_id, |metric| {
metric.cancel();
})
}
#[no_mangle]
pub extern "C" fn glean_timespan_set_raw_nanos(metric_id: u64, elapsed_nanos: u64) {
let elapsed = Duration::from_nanos(elapsed_nanos);
with_glean_value(|glean| {
TIMESPAN_METRICS.call_infallible(metric_id, |metric| {
metric.set_raw(glean, elapsed);
})
})
}
#[no_mangle]
pub extern "C" fn glean_timespan_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
TIMESPAN_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_timespan_test_get_value(metric_id: u64, storage_name: FfiStr) -> u64 {
with_glean_value(|glean| {
TIMESPAN_METRICS.call_infallible(metric_id, |metric| {
metric.test_get_value(glean, storage_name.as_str()).unwrap()
})
})
}

View File

@ -0,0 +1,92 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, from_raw_int64_array, handlemap_ext::HandleMapExtension, with_glean_value,
Lifetime, RawInt64Array, TimeUnit,
};
use glean_core::metrics::TimerId;
define_metric!(TimingDistributionMetric => TIMING_DISTRIBUTION_METRICS {
new -> glean_new_timing_distribution_metric(time_unit: TimeUnit),
test_get_num_recorded_errors -> glean_timing_distribution_test_get_num_recorded_errors,
destroy -> glean_destroy_timing_distribution_metric,
});
#[no_mangle]
pub extern "C" fn glean_timing_distribution_set_start(metric_id: u64, start_time: u64) -> TimerId {
TIMING_DISTRIBUTION_METRICS
.call_infallible_mut(metric_id, |metric| metric.set_start(start_time))
}
#[no_mangle]
pub extern "C" fn glean_timing_distribution_set_stop_and_accumulate(
metric_id: u64,
timer_id: TimerId,
stop_time: u64,
) {
with_glean_value(|glean| {
TIMING_DISTRIBUTION_METRICS.call_infallible_mut(metric_id, |metric| {
metric.set_stop_and_accumulate(glean, timer_id, stop_time);
})
})
}
#[no_mangle]
pub extern "C" fn glean_timing_distribution_cancel(metric_id: u64, timer_id: TimerId) {
TIMING_DISTRIBUTION_METRICS.call_infallible_mut(metric_id, |metric| {
metric.cancel(timer_id);
})
}
#[no_mangle]
pub extern "C" fn glean_timing_distribution_accumulate_samples(
metric_id: u64,
raw_samples: RawInt64Array,
num_samples: i32,
) {
with_glean_value(|glean| {
TIMING_DISTRIBUTION_METRICS.call_infallible_mut(metric_id, |metric| {
// The Kotlin code is sending Long(s), which are 64 bits, as there's
// currently no stable UInt type. The positive part of [Int] would not
// be enough to represent the values coming in:.
// Here Long(s) are handled as i64 and then casted in `accumulate_samples_signed`
// to u32.
let samples = from_raw_int64_array(raw_samples, num_samples);
metric.accumulate_samples_signed(glean, samples);
})
})
}
#[no_mangle]
pub extern "C" fn glean_timing_distribution_test_has_value(
metric_id: u64,
storage_name: FfiStr,
) -> u8 {
with_glean_value(|glean| {
TIMING_DISTRIBUTION_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_timing_distribution_test_get_value_as_json_string(
metric_id: u64,
storage_name: FfiStr,
) -> *mut c_char {
with_glean_value(|glean| {
TIMING_DISTRIBUTION_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value_as_json_string(glean, storage_name.as_str())
.unwrap()
})
})
}

193
third_party/rust/glean-ffi/src/upload.rs vendored Normal file
View File

@ -0,0 +1,193 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! FFI compatible types for the upload mechanism.
//!
//! These are used in the `glean_get_upload_task` and `glean_process_ping_upload_response`
//! functions.
use std::ffi::CString;
use std::os::raw::c_char;
use ffi_support::IntoFfi;
use crate::{byte_buffer::ByteBuffer, glean_str_free};
use glean_core::upload::PingUploadTask;
/// Result values of attempted ping uploads encoded for FFI use.
///
/// These are exposed as C `define`s, e.g.:
///
/// ```c
/// #define UPLOAD_RESULT_RECOVERABLE 1
/// #define UPLOAD_RESULT_UNRECOVERABLE 2
/// #define UPLOAD_RESULT_HTTP_STATUS 0x8000
/// ```
///
/// The language binding needs to replicate these constants exactly.
///
/// The `HTTP_STATUS` result can carry additional data (the HTTP response code).
/// This is encoded in the lower bits.
///
/// The FFI layer can convert from a 32-bit integer (`u32`) representing the upload result (and
/// associated HTTP response code) into the Glean-compatible `UploadResult` type.
///
/// These are defined in `glean-core/src/upload/result.rs`,
/// but for cbindgen to also export them in header files we need to define them here as constants.
///
/// Inline tests ensure they match across crates.
#[allow(dead_code)]
pub mod upload_result {
/// A recoverable error.
pub const UPLOAD_RESULT_RECOVERABLE: u32 = 0x1;
/// An unrecoverable error.
pub const UPLOAD_RESULT_UNRECOVERABLE: u32 = 0x2;
/// A HTTP response code.
///
/// The actual response code is encoded in the lower bits.
pub const UPLOAD_RESULT_HTTP_STATUS: u32 = 0x8000;
}
/// A FFI-compatible representation for the PingUploadTask.
///
/// This is exposed as a C-compatible tagged union, like this:
///
/// ```c
/// enum FfiPingUploadTask_Tag {
/// FfiPingUploadTask_Upload,
/// FfiPingUploadTask_Wait,
/// FfiPingUploadTask_Done,
/// };
/// typedef uint8_t FfiPingUploadTask_Tag;
///
/// typedef struct {
/// FfiPingUploadTask_Tag tag;
/// char *document_id;
/// char *path;
/// char *body;
/// char *headers;
/// } FfiPingUploadTask_Upload_Body;
///
/// typedef union {
/// FfiPingUploadTask_Tag tag;
/// FfiPingUploadTask_Upload_Body upload;
/// } FfiPingUploadTask;
///
/// ```
///
/// It is therefore always valid to read the `tag` field of the returned union (always the first
/// field in memory).
///
/// Language bindings should turn this into proper language types (e.g. enums/structs) and
/// copy out data.
///
/// String fields are encoded into null-terminated UTF-8 C strings.
///
/// * The language binding should copy out the data and turn these into their equivalent string type.
/// * The language binding should _not_ free these fields individually.
/// Instead `glean_process_ping_upload_response` will receive the whole enum, taking care of
/// freeing the memory.
///
///
/// The order of variants should be the same as in `glean-core/src/upload/mod.rs`
/// and `glean-core/android/src/main/java/mozilla/telemetry/glean/net/Upload.kt`.
///
/// cbindgen:prefix-with-name
#[repr(u8)]
pub enum FfiPingUploadTask {
Upload {
document_id: *mut c_char,
path: *mut c_char,
body: ByteBuffer,
headers: *mut c_char,
},
Wait(u64),
Done,
}
impl From<PingUploadTask> for FfiPingUploadTask {
fn from(task: PingUploadTask) -> Self {
match task {
PingUploadTask::Upload(request) => {
// Safe unwraps:
// 1. CString::new(..) should not fail as we are the ones that created the strings being transformed;
// 2. serde_json::to_string(&request.headers) should not fail as request.headers is a HashMap of Strings.
let document_id = CString::new(request.document_id.to_owned()).unwrap();
let path = CString::new(request.path.to_owned()).unwrap();
let headers =
CString::new(serde_json::to_string(&request.headers).unwrap()).unwrap();
FfiPingUploadTask::Upload {
document_id: document_id.into_raw(),
path: path.into_raw(),
body: ByteBuffer::from_vec(request.body),
headers: headers.into_raw(),
}
}
PingUploadTask::Wait(time) => FfiPingUploadTask::Wait(time),
PingUploadTask::Done => FfiPingUploadTask::Done,
}
}
}
impl Drop for FfiPingUploadTask {
fn drop(&mut self) {
if let FfiPingUploadTask::Upload {
document_id,
path,
body,
headers,
} = self
{
// We need to free the previously allocated strings before dropping.
unsafe {
glean_str_free(*document_id);
glean_str_free(*path);
glean_str_free(*headers);
}
// Unfortunately, we cannot directly call `body.destroy();` as
// we're behind a mutable reference, so we have to manually take the
// ownership and drop. Moreover, `ByteBuffer::new_with_size(0)`
// does not allocate, so we are not leaking memory.
let body = std::mem::replace(body, ByteBuffer::new_with_size(0));
body.destroy();
}
}
}
unsafe impl IntoFfi for FfiPingUploadTask {
type Value = FfiPingUploadTask;
#[inline]
fn ffi_default() -> FfiPingUploadTask {
FfiPingUploadTask::Done
}
#[inline]
fn into_ffi_value(self) -> FfiPingUploadTask {
self
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn constants_match_with_glean_core() {
assert_eq!(
upload_result::UPLOAD_RESULT_RECOVERABLE,
glean_core::upload::ffi_upload_result::UPLOAD_RESULT_RECOVERABLE
);
assert_eq!(
upload_result::UPLOAD_RESULT_UNRECOVERABLE,
glean_core::upload::ffi_upload_result::UPLOAD_RESULT_UNRECOVERABLE
);
assert_eq!(
upload_result::UPLOAD_RESULT_HTTP_STATUS,
glean_core::upload::ffi_upload_result::UPLOAD_RESULT_HTTP_STATUS
);
}
}

52
third_party/rust/glean-ffi/src/uuid.rs vendored Normal file
View File

@ -0,0 +1,52 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::os::raw::c_char;
use ffi_support::FfiStr;
use crate::{
define_metric, ffi_string_ext::FallibleToString, handlemap_ext::HandleMapExtension,
with_glean_value, Lifetime,
};
define_metric!(UuidMetric => UUID_METRICS {
new -> glean_new_uuid_metric(),
test_get_num_recorded_errors -> glean_uuid_test_get_num_recorded_errors,
destroy -> glean_destroy_uuid_metric,
});
#[no_mangle]
pub extern "C" fn glean_uuid_set(metric_id: u64, value: FfiStr) {
with_glean_value(|glean| {
UUID_METRICS.call_with_log(metric_id, |metric| {
let value = value.to_string_fallible()?;
metric.set_from_str(glean, &value);
Ok(())
})
})
}
#[no_mangle]
pub extern "C" fn glean_uuid_test_has_value(metric_id: u64, storage_name: FfiStr) -> u8 {
with_glean_value(|glean| {
UUID_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.is_some()
})
})
}
#[no_mangle]
pub extern "C" fn glean_uuid_test_get_value(metric_id: u64, storage_name: FfiStr) -> *mut c_char {
with_glean_value(|glean| {
UUID_METRICS.call_infallible(metric_id, |metric| {
metric
.test_get_value(glean, storage_name.as_str())
.unwrap()
.to_string()
})
})
}

61
third_party/rust/glean-ffi/src/weak.rs vendored Normal file
View File

@ -0,0 +1,61 @@
//! Based on `library/std/src/sys/unix/weak.rs` from the Rust libstd code base (Rust v1.49.0).
//!
//! Original License: MIT/Apache.
//! <https://github.com/rust-lang/rust/blob/master/LICENSE-MIT>
//! <https://github.com/rust-lang/rust/blob/master/LICENSE-APACHE>
//!
//! Support for "weak linkage" to symbols on Unix
//!
//! We use `dlsym` to get a symbol value at runtime.
//! It assumes that the symbol we're looking for is linked in to our application somehow.
//! In case of glean-ffi and the RLB this is the case if everything is built as one dynamic
//! library.
use std::ffi::CStr;
use std::marker;
use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering};
macro_rules! weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
static $name: crate::weak::Weak<unsafe extern fn($($t),*) -> $ret> =
crate::weak::Weak::new(concat!(stringify!($name), '\0'));
)
}
pub struct Weak<F> {
name: &'static str,
addr: AtomicUsize,
_marker: marker::PhantomData<F>,
}
impl<F> Weak<F> {
pub const fn new(name: &'static str) -> Weak<F> {
Weak {
name,
addr: AtomicUsize::new(1),
_marker: marker::PhantomData,
}
}
pub fn get(&self) -> Option<F> {
assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
unsafe {
if self.addr.load(Ordering::SeqCst) == 1 {
self.addr.store(fetch(self.name), Ordering::SeqCst);
}
match self.addr.load(Ordering::SeqCst) {
0 => None,
addr => Some(mem::transmute_copy::<usize, F>(&addr)),
}
}
}
}
unsafe fn fetch(name: &str) -> usize {
let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
Ok(cstr) => cstr,
Err(..) => return 0,
};
::libc::dlsym(::libc::RTLD_DEFAULT, name.as_ptr()) as usize
}

View File

@ -1 +1 @@
{"files":{"Cargo.toml":"33c4a9d73a7bb7561d70522ecf4be9530d52eb6e021d07c6b09f4578f6878dad","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"fd9e0ca6907917ea6bec5de05e15dd21d20fae1cb7f3250467bb20231a8e1065","src/common_test.rs":"bd7ab2f6384bea8971f97ba68b11c946899303891bc534898f7aabbf27f9008a","src/configuration.rs":"4acbedba16d45f6404ccedde86e8aa33eea8c1b9554210cb69c79ff2ec9040c9","src/core_metrics.rs":"0ecf9ec7de9032f45e6c0feaebf17c614f9be88c1a28043e397eaf7d3b18ba37","src/dispatcher/global.rs":"6dae43ceb6bb8f5c6f20311ce610e4173eb880b4314aafd5791be03240a72096","src/dispatcher/mod.rs":"3254ad8f33439c1c0159e3e5b63f028f9e1a4a6c069c6d73dd82bd9cfe3e6086","src/glean_metrics.rs":"151b6e5acc12436c33c75e2e0a18f9769179d3c7fdc60a22fa02afb76feaf00f","src/lib.rs":"25b9e3dd21ff49579d31f07b1ce858f14dbb61ded3aa5623e0370d4af9f22810","src/net/http_uploader.rs":"9e8c1837ca0d3f6ea165ec936ab054173c4fe95a958710176c33b4d4d1d98beb","src/net/mod.rs":"284bcf182156c52ea25fa33bcc48d80b4970ee3c187a4ea3a06602cc34c710bf","src/pings.rs":"2dfccd84848e1933aa4f6a7a707c58ec794c8f73ef2d93ea4d4df71d4e6abc31","src/private/boolean.rs":"eeadc0529e2c69a930479f208746799b064b27facab8306c1c10c650e83fb63c","src/private/counter.rs":"75ad96cd5f53d024230357223244c0a8f58f8b32a9a4d0dbc7cc6ecd74db13b5","src/private/custom_distribution.rs":"0de9cd030a555d93352a7fd251febf4de3a2ca4eeb7666abe5baa884d59168b8","src/private/datetime.rs":"f7e68491b267452fc7d0bb50a2e2f73337d2580435b0518e4cb375a5a30d3017","src/private/denominator.rs":"746c5dfd81fe4027061d799421e35c2cf47b14b98e18e15f2e0d21379604f3f0","src/private/event.rs":"d7d7473fa4de0bcc53179aedc1dac1b5affb3f59e8595260304c06de0a40019e","src/private/labeled.rs":"ab9a89aa964d23c4b909cb222996cbd3073afaa348acfd8dac527d84d1ee9695","src/private/memory_distribution.rs":"201ce833900fca33f2e4bdd65d9055927627c5e97c9df001351ca40e8e11efae","src/private/mod.rs":"413a41942a48de3d39e9346c2a0803a3ce184978173f8a79b13a116be4abaffe","src/private/numerator.rs":"4133f4a1f2a20931176ecaa7e85a96a4d639ba1b3737441a5713c18909892a42","src/private/ping.rs":"915fc42994e0929656daee5511946ac1f56fe0d4d704e97e13795771d9890180","src/private/quantity.rs":"0fa3c6fb00a4c4d659284a87a4cfbfc5153a73e65ed802f27d74c1bd7fc06273","src/private/rate.rs":"bb7f1a1c9aa2413eb4c606f04aa58199a18d9d12a97fb6548d410f939b01ed09","src/private/recorded_experiment_data.rs":"66b2601902a2dc2b7a283717c21ce754de94fcca30d12e0398195c8ad49c90af","src/private/string.rs":"cab1b0a3a5368a1650dc253bcb5a4622f0d016913bf323c7d74c4130ab22f886","src/private/string_list.rs":"2f4df2aefdf9130a9913cd06dbf91747953ac79648af1c1b924053af18944bac","src/private/timespan.rs":"4094a6020269324e84d52a921c580ce156ea45e5af70a46540a64d23dfb2002e","src/private/timing_distribution.rs":"e6f45e4d57b1dde12b047f7f402787d69f0702fede88af1f0691663aeddaa161","src/private/uuid.rs":"7b76b815f08ac70522c65785f765c59d397f54ee257d47f8290029b456dce0ed","src/system.rs":"98aae0e0c9bf53f92fce4ca3d6040439f540023b63aab022c8c26381f04a4185","src/test.rs":"0cbe4f51fa01b1ca04e4b726e8eb729c3504086bc6b0d644e2114a5a4473165a","tests/common/mod.rs":"4837df2e771929cc077e6fb9a9239645e8e0f7bc6c9f409b71c4d147edf334fc","tests/init_fails.rs":"1e832fe454962ddb1155d546bb71b6395aa9936f848ff0fbe88affaaab7dfae3","tests/never_init.rs":"1f33b8ce7ca3514b57b48cc16d98408974c85cf8aa7d13257ffc2ad878ebb295","tests/no_time_to_init.rs":"4d61e4196d8eef23f3bcb24b59bd0b0379c1f2cb50f03434a53996ab097bfb17","tests/overflowing_preinit.rs":"81ff97745789fd5f58f86364488c011b4503648f1366b12e26755edc54323150","tests/schema.rs":"621caef0cc7f98c79740422835485fea2343ca105d0d9a7eec6ded9cfad6232c","tests/simple.rs":"2f58d3ff90005231f2febd21f66ee41d06302618408ea990b446510449c3444f"},"package":"c8e24825d3123194a212e3daf8ddac150713cec4f1e126dc3c12ba207f0b5d77"}
{"files":{"Cargo.toml":"0d844695d5803ea743d47cc0a58a62ae8fc3cd5133cd14320879ccaa57dbffaa","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"fd9e0ca6907917ea6bec5de05e15dd21d20fae1cb7f3250467bb20231a8e1065","src/common_test.rs":"bd7ab2f6384bea8971f97ba68b11c946899303891bc534898f7aabbf27f9008a","src/configuration.rs":"4acbedba16d45f6404ccedde86e8aa33eea8c1b9554210cb69c79ff2ec9040c9","src/core_metrics.rs":"0ecf9ec7de9032f45e6c0feaebf17c614f9be88c1a28043e397eaf7d3b18ba37","src/dispatcher/global.rs":"6dae43ceb6bb8f5c6f20311ce610e4173eb880b4314aafd5791be03240a72096","src/dispatcher/mod.rs":"3254ad8f33439c1c0159e3e5b63f028f9e1a4a6c069c6d73dd82bd9cfe3e6086","src/glean_metrics.rs":"151b6e5acc12436c33c75e2e0a18f9769179d3c7fdc60a22fa02afb76feaf00f","src/lib.rs":"1e40847ef95c385057924fc8472dcfe643f1fb85c748ad25625c467434212d69","src/net/http_uploader.rs":"9e8c1837ca0d3f6ea165ec936ab054173c4fe95a958710176c33b4d4d1d98beb","src/net/mod.rs":"284bcf182156c52ea25fa33bcc48d80b4970ee3c187a4ea3a06602cc34c710bf","src/pings.rs":"2dfccd84848e1933aa4f6a7a707c58ec794c8f73ef2d93ea4d4df71d4e6abc31","src/private/boolean.rs":"eeadc0529e2c69a930479f208746799b064b27facab8306c1c10c650e83fb63c","src/private/counter.rs":"0bc8a2d0df72e47b7365ff80bfc16427a5da701fd0adadeedbcce13cebcd79ce","src/private/custom_distribution.rs":"6d1271fb91e9d51a8dcf5eb9d540b3757ebe9cc998b196943ed8c729f62afc67","src/private/datetime.rs":"cb8f26f74d318e2118d6ae1b15972557eb205d4d8b24795fb0d08fdea2bc3f56","src/private/denominator.rs":"95332737f3ac80346f4811440a2141cd427692819bd04d5d3ac7374299dc20b0","src/private/event.rs":"b674ceb85351b7989bd25ed4f5d98c5c9b31e2a03f13b054a8c0dbef54190e49","src/private/labeled.rs":"2cd90d132954ee3ada43ff1ad538072ba43eece7a53ed89811a2a7b43a4819f1","src/private/memory_distribution.rs":"8b78a0e33601081d76639445c8b4875a4fe7c3aded720bb43afdabe86e0fd6ee","src/private/mod.rs":"413a41942a48de3d39e9346c2a0803a3ce184978173f8a79b13a116be4abaffe","src/private/numerator.rs":"334ac2ad3d8dd7b9f02f1ca5391b683d50fbc8c6728a12882a68bb067604c800","src/private/ping.rs":"915fc42994e0929656daee5511946ac1f56fe0d4d704e97e13795771d9890180","src/private/quantity.rs":"528675cd388010b89e6ac23c9152701c78d32c2dcd0b5e9abf1a50a52ee818a5","src/private/rate.rs":"7ddfdb3d5f2d1887b378caa3769ade92ea0fbd193f6e760f5f383c8b3e9f3aff","src/private/recorded_experiment_data.rs":"66b2601902a2dc2b7a283717c21ce754de94fcca30d12e0398195c8ad49c90af","src/private/string.rs":"c85ded40b1409793ae5b78da692bc2e708f8d55defb76ec5f515096d32f206c9","src/private/string_list.rs":"472ad79fba4b9bcde0ff5b3e05fd8e0aaa3d1d2941fc181faf2ceb90f1d518bd","src/private/timespan.rs":"19ed08aa5103b685a3a0b9f06f2c60250d55f3c8f36337f8c7bdbb2dfdb92786","src/private/timing_distribution.rs":"ee7fa0c3d5427e371b5413373cb1f5841ac10df9b7ca08316ef724e7ad3591d9","src/private/uuid.rs":"2b69ddaf3978aaa31c625c0f3eb948c44369744334aacc6b5a2b217268d244a7","src/system.rs":"98aae0e0c9bf53f92fce4ca3d6040439f540023b63aab022c8c26381f04a4185","src/test.rs":"0cbe4f51fa01b1ca04e4b726e8eb729c3504086bc6b0d644e2114a5a4473165a","tests/common/mod.rs":"4837df2e771929cc077e6fb9a9239645e8e0f7bc6c9f409b71c4d147edf334fc","tests/init_fails.rs":"1e832fe454962ddb1155d546bb71b6395aa9936f848ff0fbe88affaaab7dfae3","tests/never_init.rs":"1f33b8ce7ca3514b57b48cc16d98408974c85cf8aa7d13257ffc2ad878ebb295","tests/no_time_to_init.rs":"4d61e4196d8eef23f3bcb24b59bd0b0379c1f2cb50f03434a53996ab097bfb17","tests/overflowing_preinit.rs":"81ff97745789fd5f58f86364488c011b4503648f1366b12e26755edc54323150","tests/schema.rs":"621caef0cc7f98c79740422835485fea2343ca105d0d9a7eec6ded9cfad6232c","tests/simple.rs":"2f58d3ff90005231f2febd21f66ee41d06302618408ea990b446510449c3444f"},"package":"4467acdfed9d396d5c8c1f47c658c6781476cfbb2e73a3c985e40204a9f4d350"}

View File

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "glean"
version = "39.0.0"
version = "40.0.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["/README.md", "/LICENSE", "/src", "/tests", "/Cargo.toml"]
description = "Glean SDK Rust language bindings"
@ -29,7 +29,7 @@ features = ["serde"]
version = "0.5"
[dependencies.glean-core]
version = "39.0.0"
version = "40.0.0"
[dependencies.inherent]
version = "0.1.4"
@ -60,7 +60,7 @@ features = ["v4"]
[dependencies.whatsys]
version = "0.1.2"
[dev-dependencies.env_logger]
version = "0.7.1"
version = "0.8.0"
features = ["termcolor", "atty", "humantime"]
default-features = false

View File

@ -296,7 +296,7 @@ fn initialize_internal(
// The next times we start, we would have them around already.
let is_first_run = glean.is_first_run();
if is_first_run {
initialize_core_metrics(&glean, &state.client_info, state.channel.clone());
initialize_core_metrics(glean, &state.client_info, state.channel.clone());
}
// Deal with any pending events so we can start recording new ones
@ -333,7 +333,7 @@ fn initialize_internal(
// Any new value will be sent in newly generated pings after startup.
if !is_first_run {
glean.clear_application_lifetime_metrics();
initialize_core_metrics(&glean, &state.client_info, state.channel.clone());
initialize_core_metrics(glean, &state.client_info, state.channel.clone());
}
});
@ -342,7 +342,7 @@ fn initialize_internal(
Ok(task_count) if task_count > 0 => {
with_glean(|glean| {
glean_metrics::error::preinit_tasks_overflow
.add_sync(&glean, task_count as i32);
.add_sync(glean, task_count as i32);
});
}
Ok(_) => {}
@ -477,7 +477,7 @@ pub fn set_upload_enabled(enabled: bool) {
glean.start_metrics_ping_scheduler();
// If uploading is being re-enabled, we have to restore the
// application-lifetime metrics.
initialize_core_metrics(&glean, &state.client_info, state.channel.clone());
initialize_core_metrics(glean, &state.client_info, state.channel.clone());
}
if old_enabled && !enabled {
@ -564,7 +564,7 @@ pub(crate) fn submit_ping_by_name_sync(ping: &str, reason: Option<&str>) {
return false;
}
glean.submit_ping_by_name(&ping, reason.as_deref())
glean.submit_ping_by_name(ping, reason.as_deref())
});
if submitted_ping {

View File

@ -59,7 +59,7 @@ impl glean_core::traits::Counter for CounterMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -70,7 +70,7 @@ impl glean_core::traits::CustomDistribution for CustomDistributionMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -56,7 +56,7 @@ impl glean_core::traits::Datetime for DatetimeMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -57,7 +57,7 @@ impl glean_core::traits::Counter for DenominatorMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -87,7 +87,7 @@ impl<K: traits::ExtraKeys> traits::Event for EventMetric<K> {
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(
&glean,
glean,
self.inner.meta(),
error,
ping_name.into(),

View File

@ -136,7 +136,7 @@ where
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(
&glean,
glean,
self.0.get_submetric().meta(),
error,
ping_name.into(),

View File

@ -58,7 +58,7 @@ impl glean_core::traits::MemoryDistribution for MemoryDistributionMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -57,7 +57,7 @@ impl glean_core::traits::Numerator for NumeratorMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -54,7 +54,7 @@ impl glean_core::traits::Quantity for QuantityMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

View File

@ -64,7 +64,7 @@ impl glean_core::traits::Rate for RateMetric {
crate::block_on_dispatcher();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
glean_core::test_get_num_recorded_errors(glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}

Some files were not shown because too many files have changed in this diff Show More