Bug 1637647 - Update glean_core to v30.0.0 for ping upload redesign r=janerik

Differential Revision: https://phabricator.services.mozilla.com/D75164
This commit is contained in:
Chris H-C 2020-05-14 07:47:51 +00:00
parent 2cd3bcb9c9
commit 2cba0004c0
37 changed files with 1027 additions and 482 deletions

5
Cargo.lock generated
View File

@ -1917,14 +1917,13 @@ dependencies = [
[[package]]
name = "glean-core"
version = "25.1.0"
version = "30.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a893aa65acba615064c53e2ab091a54227a0253852b8e4a2c813c8338f6095a7"
checksum = "7fa358765c47adf5c9aa458cd553c7f4a41e631ff0d65ec6bbc826b35b243deb"
dependencies = [
"bincode",
"chrono",
"ffi-support",
"lazy_static",
"log",
"once_cell",
"regex",

File diff suppressed because one or more lines are too long

View File

@ -45,20 +45,11 @@ version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "c2-chacha"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb"
dependencies = [
"ppv-lite86",
]
[[package]]
name = "cc"
version = "1.0.50"
version = "1.0.52"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
checksum = "c3d87b23d6a92cd03af510a5ade527033f6aa6fa92161e2d5863a907d4c5e31d"
[[package]]
name = "cfg-if"
@ -68,9 +59,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "chrono"
version = "0.4.10"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01"
checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
dependencies = [
"num-integer",
"num-traits",
@ -80,9 +71,9 @@ dependencies = [
[[package]]
name = "ctor"
version = "0.1.13"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1"
checksum = "cf6b25ee9ac1995c54d7adb2eff8cfffb7260bc774fb63c601ec65467f43cd9d"
dependencies = [
"quote",
"syn",
@ -102,18 +93,18 @@ dependencies = [
[[package]]
name = "failure"
version = "0.1.6"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9"
checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86"
dependencies = [
"failure_derive",
]
[[package]]
name = "failure_derive"
version = "0.1.6"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08"
checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4"
dependencies = [
"proc-macro2",
"quote",
@ -144,7 +135,7 @@ dependencies = [
[[package]]
name = "glean-core"
version = "25.1.0"
version = "30.0.0"
dependencies = [
"bincode",
"chrono",
@ -152,7 +143,6 @@ dependencies = [
"env_logger",
"ffi-support",
"iso8601",
"lazy_static",
"log",
"once_cell",
"regex",
@ -160,14 +150,14 @@ dependencies = [
"serde",
"serde_json",
"tempfile",
"uuid 0.8.1",
"uuid",
]
[[package]]
name = "hermit-abi"
version = "0.1.8"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8"
checksum = "61565ff7aaace3525556587bd2dc31d4a07071957be715e63ce7b1eccf51a8f4"
dependencies = [
"libc",
]
@ -194,9 +184,9 @@ dependencies = [
[[package]]
name = "iso8601"
version = "0.3.0"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43e86914a73535f3f541a765adea0a9fafcf53fa6adb73662c4988fd9233766f"
checksum = "cee08a007a59a8adfc96f69738ddf59e374888dfd84b49c4b916543067644d58"
dependencies = [
"nom",
]
@ -215,15 +205,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.67"
version = "0.2.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018"
checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f"
[[package]]
name = "lmdb-rkv"
version = "0.12.3"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "605061e5465304475be2041f19967a900175ea1b6d8f47fbab84a84fb8c48452"
checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b"
dependencies = [
"bitflags",
"byteorder",
@ -233,9 +223,9 @@ dependencies = [
[[package]]
name = "lmdb-rkv-sys"
version = "0.9.6"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7982ba0460e939e26a52ee12c8075deab0ebd44ed21881f656841b70e021b7c8"
checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5"
dependencies = [
"cc",
"libc",
@ -265,9 +255,9 @@ checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
[[package]]
name = "nom"
version = "4.2.3"
version = "5.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6"
checksum = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6"
dependencies = [
"memchr",
"version_check",
@ -327,9 +317,9 @@ checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
[[package]]
name = "proc-macro2"
version = "1.0.9"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435"
checksum = "8872cf6f48eee44265156c111456a700ab3483686b3f96df4cf5481c89157319"
dependencies = [
"unicode-xid",
]
@ -342,9 +332,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]]
name = "quote"
version = "1.0.2"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"
checksum = "42934bc9c8ab0d3b273a16d8551c8f0fcff46be73276ca083ec2414c15c4ba5e"
dependencies = [
"proc-macro2",
]
@ -364,11 +354,11 @@ dependencies = [
[[package]]
name = "rand_chacha"
version = "0.2.1"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"c2-chacha",
"ppv-lite86",
"rand_core",
]
@ -398,18 +388,18 @@ checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
[[package]]
name = "regex"
version = "1.3.4"
version = "1.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8"
checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692"
dependencies = [
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.14"
version = "0.6.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06"
checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
[[package]]
name = "remove_dir_all"
@ -422,9 +412,9 @@ dependencies = [
[[package]]
name = "rkv"
version = "0.10.2"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aab7c645d32e977e186448b0a5c2c3139a91a7f630cfd8a8c314d1d145e78bf"
checksum = "30a3dbc1f4971372545ed4175f23ef206c81e5874cd574d153646e7ee78f6793"
dependencies = [
"arrayref",
"bincode",
@ -437,29 +427,29 @@ dependencies = [
"serde",
"serde_derive",
"url",
"uuid 0.7.4",
"uuid",
]
[[package]]
name = "ryu"
version = "1.0.2"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8"
checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1"
[[package]]
name = "serde"
version = "1.0.104"
version = "1.0.110"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449"
checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.104"
version = "1.0.110"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64"
checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984"
dependencies = [
"proc-macro2",
"quote",
@ -468,9 +458,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.48"
version = "1.0.53"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25"
checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2"
dependencies = [
"itoa",
"ryu",
@ -479,15 +469,15 @@ dependencies = [
[[package]]
name = "smallvec"
version = "1.2.0"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc"
checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4"
[[package]]
name = "syn"
version = "1.0.16"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859"
checksum = "4696caa4048ac7ce2bcd2e484b3cef88c1004e41b8e945a277e2c25dc0b72060"
dependencies = [
"proc-macro2",
"quote",
@ -531,12 +521,11 @@ dependencies = [
[[package]]
name = "time"
version = "0.1.42"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f"
checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
dependencies = [
"libc",
"redox_syscall",
"winapi",
]
@ -575,12 +564,6 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "uuid"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a"
[[package]]
name = "uuid"
version = "0.8.1"
@ -592,9 +575,9 @@ dependencies = [
[[package]]
name = "version_check"
version = "0.1.5"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd"
checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce"
[[package]]
name = "wasi"
@ -620,9 +603,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.3"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]

View File

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "glean-core"
version = "25.1.0"
version = "30.0.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["README.md", "LICENSE", "src/**/*", "examples/**/*", "tests/**/*", "Cargo.toml"]
description = "A modern Telemetry library"
@ -31,9 +31,6 @@ features = ["serde"]
[dependencies.ffi-support]
version = "0.4.0"
[dependencies.lazy_static]
version = "1.4.0"
[dependencies.log]
version = "0.4.8"
@ -46,7 +43,7 @@ features = ["std"]
default-features = false
[dependencies.rkv]
version = "0.10.2"
version = "0.10.3"
[dependencies.serde]
version = "1.0.104"
@ -67,13 +64,10 @@ features = ["termcolor", "atty", "humantime"]
default-features = false
[dev-dependencies.iso8601]
version = "0.3"
version = "0.4"
[dev-dependencies.tempfile]
version = "3.1.0"
[features]
upload = []
[badges.circle-ci]
branch = "master"
repository = "mozilla/glean"

View File

@ -132,9 +132,9 @@ impl Database {
let store = self.get_store(Lifetime::Ping);
let mut iter = unwrap_or!(store.iter_start(&reader), return);
while let Some(Ok((metric_name, value))) = iter.next() {
let metric_name = match str::from_utf8(metric_name) {
Ok(metric_name) => metric_name.to_string(),
while let Some(Ok((metric_id, value))) = iter.next() {
let metric_id = match str::from_utf8(metric_id) {
Ok(metric_id) => metric_id.to_string(),
_ => continue,
};
let metric: Metric = match value.expect("Value missing in iteration") {
@ -142,7 +142,7 @@ impl Database {
_ => continue,
};
data.insert(metric_name, metric);
data.insert(metric_id, metric);
}
}
}
@ -164,7 +164,7 @@ impl Database {
/// a given labeled metric. If not provided, the entire storage for the given lifetime
/// will be iterated over.
/// * `transaction_fn`: Called for each entry being iterated over. It is
/// passed two arguments: `(metric_name: &[u8], metric: &Metric)`.
/// passed two arguments: `(metric_id: &[u8], metric: &Metric)`.
///
/// ## Panics
///
@ -204,17 +204,17 @@ impl Database {
return
);
while let Some(Ok((metric_name, value))) = iter.next() {
if !metric_name.starts_with(iter_start.as_bytes()) {
while let Some(Ok((metric_id, value))) = iter.next() {
if !metric_id.starts_with(iter_start.as_bytes()) {
break;
}
let metric_name = &metric_name[len..];
let metric_id = &metric_id[len..];
let metric: Metric = match value.expect("Value missing in iteration") {
rkv::Value::Blob(blob) => unwrap_or!(bincode::deserialize(blob), continue),
_ => continue,
};
transaction_fn(metric_name, &metric);
transaction_fn(metric_id, &metric);
}
}
@ -436,12 +436,12 @@ impl Database {
let mut metrics = Vec::new();
{
let mut iter = store.iter_from(&writer, &storage_name)?;
while let Some(Ok((metric_name, _))) = iter.next() {
if let Ok(metric_name) = std::str::from_utf8(metric_name) {
if !metric_name.starts_with(&storage_name) {
while let Some(Ok((metric_id, _))) = iter.next() {
if let Ok(metric_id) = std::str::from_utf8(metric_id) {
if !metric_id.starts_with(&storage_name) {
break;
}
metrics.push(metric_name.to_owned());
metrics.push(metric_id.to_owned());
}
}
}
@ -465,7 +465,7 @@ impl Database {
///
/// * `lifetime` - the lifetime of the storage in which to look for the metric.
/// * `storage_name` - the name of the storage to store/fetch data from.
/// * `metric_key` - the metric key/name.
/// * `metric_id` - the metric category + name.
///
/// ## Return value
///
@ -481,9 +481,9 @@ impl Database {
&self,
lifetime: Lifetime,
storage_name: &str,
metric_name: &str,
metric_id: &str,
) -> Result<()> {
let final_key = Self::get_storage_key(storage_name, Some(metric_name));
let final_key = Self::get_storage_key(storage_name, Some(metric_id));
// Lifetime::Ping data is not persisted to disk if
// Glean has `delay_ping_lifetime_io` set to true
@ -622,9 +622,9 @@ mod test {
// Verify that the data is correctly recorded.
let mut found_metrics = 0;
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
found_metrics += 1;
let metric_id = String::from_utf8_lossy(metric_name).into_owned();
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
assert_eq!(test_metric_id, metric_id);
match metric {
Metric::String(s) => assert_eq!(test_value, s),
@ -657,9 +657,9 @@ mod test {
// Verify that the data is correctly recorded.
let mut found_metrics = 0;
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
found_metrics += 1;
let metric_id = String::from_utf8_lossy(metric_name).into_owned();
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
assert_eq!(test_metric_id, metric_id);
match metric {
Metric::String(s) => assert_eq!(test_value, s),
@ -695,9 +695,9 @@ mod test {
// Verify that the data is correctly recorded.
let mut found_metrics = 0;
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
found_metrics += 1;
let metric_id = String::from_utf8_lossy(metric_name).into_owned();
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
assert_eq!(test_metric_id, metric_id);
match metric {
Metric::String(s) => assert_eq!(test_value, s),
@ -743,10 +743,10 @@ mod test {
// Take a snapshot for the data, all the lifetimes.
{
let mut snapshot: HashMap<String, String> = HashMap::new();
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let metric_name = String::from_utf8_lossy(metric_name).into_owned();
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
match metric {
Metric::String(s) => snapshot.insert(metric_name, s.to_string()),
Metric::String(s) => snapshot.insert(metric_id, s.to_string()),
_ => panic!("Unexpected data found"),
};
};
@ -767,10 +767,10 @@ mod test {
// Take a snapshot again and check that we're only clearing the Ping lifetime.
{
let mut snapshot: HashMap<String, String> = HashMap::new();
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let metric_name = String::from_utf8_lossy(metric_name).into_owned();
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
match metric {
Metric::String(s) => snapshot.insert(metric_name, s.to_string()),
Metric::String(s) => snapshot.insert(metric_id, s.to_string()),
_ => panic!("Unexpected data found"),
};
};
@ -823,9 +823,9 @@ mod test {
// Verify that "telemetry_test.single_metric_retain" is still around for all lifetimes.
for lifetime in lifetimes.iter() {
let mut found_metrics = 0;
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
found_metrics += 1;
let metric_id = String::from_utf8_lossy(metric_name).into_owned();
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
assert_eq!(format!("{}_retain", metric_id_pattern), metric_id);
match metric {
Metric::String(s) => assert_eq!("retain", s),

View File

@ -54,6 +54,9 @@ pub enum ErrorKind {
/// Unknown error
Utf8Error,
/// Glean not initialized
NotInitialized,
#[doc(hidden)]
__NonExhaustive,
}
@ -75,6 +78,13 @@ impl Error {
kind: ErrorKind::Utf8Error,
}
}
/// Indicates an error that no global Glean object is initialized
pub fn not_initialized() -> Error {
Error {
kind: ErrorKind::NotInitialized,
}
}
}
impl std::error::Error for Error {}
@ -92,7 +102,8 @@ impl Display for Error {
MemoryUnit(m) => write!(f, "MemoryUnit conversion from {} failed", m),
HistogramType(h) => write!(f, "HistogramType conversion from {} failed", h),
OsString(s) => write!(f, "OsString conversion from {:?} failed", s),
Utf8Error => write!(f, "Invalid UTF-8 byte sequence in string."),
Utf8Error => write!(f, "Invalid UTF-8 byte sequence in string"),
NotInitialized => write!(f, "Global Glean object missing"),
__NonExhaustive => write!(f, "Unknown error"),
}
}

View File

@ -39,7 +39,7 @@ pub enum ErrorType {
}
impl ErrorType {
/// The error type's metric name
/// The error type's metric id
pub fn as_str(&self) -> &'static str {
match self {
ErrorType::InvalidValue => "invalid_value",
@ -101,7 +101,7 @@ fn get_error_metric_for_metric(meta: &CommonMetricData, error: ErrorType) -> Cou
/// * meta - The metric's meta data
/// * error - The error type to record
/// * message - The message to log. This message is not sent with the ping.
/// It does not need to include the metric name, as that is automatically prepended to the message.
/// It does not need to include the metric id, as that is automatically prepended to the message.
/// * num_errors - The number of errors of the same type to report.
pub fn record_error<O: Into<Option<i32>>>(
glean: &Glean,

View File

@ -13,27 +13,41 @@ use std::path::{Path, PathBuf};
use std::sync::RwLock;
use serde::{Deserialize, Serialize};
use serde_json;
use serde_json::{json, Value as JsonValue};
use crate::CommonMetricData;
use crate::Glean;
use crate::Result;
/// Represents the data for a single event.
/// Represents the recorded data for a single event.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedEventData {
pub struct RecordedEvent {
/// The timestamp of when the event was recorded.
///
/// This allows to order events from a single process run.
pub timestamp: u64,
/// The event's category.
///
/// This is defined by users in the metrics file.
pub category: String,
/// The event's name.
///
/// This is defined by users in the metrics file.
pub name: String,
/// A map of all extra data values.
///
/// The set of allowed extra keys is defined by users in the metrics file.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra: Option<HashMap<String, String>>,
}
impl RecordedEventData {
impl RecordedEvent {
/// Serialize an event to JSON, adjusting its timestamp relative to a base timestamp
pub fn serialize_relative(&self, timestamp_offset: u64) -> JsonValue {
json!(&RecordedEventData {
fn serialize_relative(&self, timestamp_offset: u64) -> JsonValue {
json!(&RecordedEvent {
timestamp: self.timestamp - timestamp_offset,
category: self.category.clone(),
name: self.name.clone(),
@ -59,7 +73,7 @@ pub struct EventDatabase {
/// Path to directory of on-disk event files
pub path: PathBuf,
/// The in-memory list of events
event_stores: RwLock<HashMap<String, Vec<RecordedEventData>>>,
event_stores: RwLock<HashMap<String, Vec<RecordedEvent>>>,
/// A lock to be held when doing operations on the filesystem
file_lock: RwLock<()>,
}
@ -124,7 +138,7 @@ impl EventDatabase {
store_name,
file.lines()
.filter_map(|line| line.ok())
.filter_map(|line| serde_json::from_str::<RecordedEventData>(&line).ok())
.filter_map(|line| serde_json::from_str::<RecordedEvent>(&line).ok())
.collect(),
);
}
@ -140,7 +154,7 @@ impl EventDatabase {
let mut ping_sent = false;
for store_name in store_names {
if let Err(err) = glean.submit_ping_by_name(&store_name, None) {
if let Err(err) = glean.submit_ping_by_name(&store_name, Some("startup")) {
log::error!(
"Error flushing existing events to the '{}' ping: {}",
store_name,
@ -172,9 +186,9 @@ impl EventDatabase {
timestamp: u64,
extra: Option<HashMap<String, String>>,
) {
// Create RecordedEventData object, and its JSON form for serialization
// Create RecordedEvent object, and its JSON form for serialization
// on disk.
let event = RecordedEventData {
let event = RecordedEvent {
timestamp,
category: meta.category.to_string(),
name: meta.name.to_string(),
@ -199,7 +213,7 @@ impl EventDatabase {
// If any of the event stores reached maximum size, submit the pings
// containing those events immediately.
for store_name in stores_to_submit {
if let Err(err) = glean.submit_ping_by_name(store_name, None) {
if let Err(err) = glean.submit_ping_by_name(store_name, Some("max_capacity")) {
log::error!(
"Got more than {} events, but could not send {} ping: {}",
glean.get_max_events(),
@ -320,8 +334,8 @@ impl EventDatabase {
&'a self,
meta: &'a CommonMetricData,
store_name: &str,
) -> Option<Vec<RecordedEventData>> {
let value: Vec<RecordedEventData> = self
) -> Option<Vec<RecordedEvent>> {
let value: Vec<RecordedEvent> = self
.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
@ -367,7 +381,7 @@ mod test {
#[test]
fn stable_serialization() {
let event_empty = RecordedEventData {
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
@ -376,7 +390,7 @@ mod test {
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEventData {
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
@ -414,7 +428,7 @@ mod test {
}
"#;
let event_empty = RecordedEventData {
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
@ -423,7 +437,7 @@ mod test {
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEventData {
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),

View File

@ -9,6 +9,8 @@ use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// Create the possible ranges in an exponential distribution from `min` to `max` with
/// `bucket_count` buckets.
///
@ -19,6 +21,9 @@ use super::{Bucketing, Histogram};
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn exponential_range(min: u64, max: u64, bucket_count: usize) -> Vec<u64> {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let log_max = (max as f64).ln();
let mut ranges = Vec::with_capacity(bucket_count);

View File

@ -8,6 +8,8 @@ use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// A functional bucketing algorithm.
///
/// Bucketing is performed by a function, rather than pre-computed buckets.
@ -23,8 +25,11 @@ pub struct Functional {
impl Functional {
/// Instantiate a new functional bucketing.
fn new(log_base: f64, buckets_per_magnitutde: f64) -> Functional {
let exponent = log_base.powf(1.0 / buckets_per_magnitutde);
fn new(log_base: f64, buckets_per_magnitude: f64) -> Functional {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let exponent = log_base.powf(1.0 / buckets_per_magnitude);
Functional { exponent }
}
@ -34,11 +39,17 @@ impl Functional {
/// mathematical concept, even though the internal representation is stored and
/// sent using the minimum value in each bucket.
fn sample_to_bucket_index(&self, sample: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
((sample + 1) as f64).log(self.exponent) as u64
}
/// Determines the minimum value of a bucket, given a bucket index.
fn bucket_index_to_bucket_minimum(&self, index: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
self.exponent.powf(index as f64) as u64
}
}
@ -60,12 +71,12 @@ impl Bucketing for Functional {
impl Histogram<Functional> {
/// Create a histogram with functional buckets.
pub fn functional(log_base: f64, buckets_per_magnitutde: f64) -> Histogram<Functional> {
pub fn functional(log_base: f64, buckets_per_magnitude: f64) -> Histogram<Functional> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: Functional::new(log_base, buckets_per_magnitutde),
bucketing: Functional::new(log_base, buckets_per_magnitude),
}
}

View File

@ -84,18 +84,6 @@ pub trait Bucketing {
fn ranges(&self) -> &[u64];
}
/// Implement the bucketing algorithm on every object that has that algorithm using dynamic
/// dispatch.
impl Bucketing for Box<dyn Bucketing> {
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
(**self).sample_to_bucket_minimum(sample)
}
fn ranges(&self) -> &[u64] {
(**self).ranges()
}
}
impl<B: Bucketing> Histogram<B> {
/// Get the number of buckets in this histogram.
pub fn bucket_count(&self) -> usize {
@ -149,15 +137,3 @@ impl<B: Bucketing> Histogram<B> {
res
}
}
impl<B: Bucketing + 'static> Histogram<B> {
/// Box the contained bucketing algorithm to allow for dynamic dispatch.
pub fn boxed(self) -> Histogram<Box<dyn Bucketing>> {
Histogram {
values: self.values,
count: self.count,
sum: self.sum,
bucketing: Box::new(self.bucketing),
}
}
}

View File

@ -8,6 +8,7 @@ use super::{metrics::*, CommonMetricData, Lifetime};
pub struct CoreMetrics {
pub client_id: UuidMetric,
pub first_run_date: DatetimeMetric,
pub os: StringMetric,
}
impl CoreMetrics {
@ -33,6 +34,15 @@ impl CoreMetrics {
},
TimeUnit::Day,
),
os: StringMetric::new(CommonMetricData {
name: "os".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
}
}
}

View File

@ -16,7 +16,7 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use chrono::{DateTime, FixedOffset};
use lazy_static::lazy_static;
use once_cell::sync::Lazy;
use once_cell::sync::OnceCell;
use std::sync::Mutex;
use uuid::Uuid;
@ -36,8 +36,8 @@ mod internal_pings;
pub mod metrics;
pub mod ping;
pub mod storage;
#[cfg(feature = "upload")]
mod upload;
mod system;
pub mod upload;
mod util;
pub use crate::common_metric_data::{CommonMetricData, Lifetime};
@ -50,17 +50,14 @@ use crate::internal_pings::InternalPings;
use crate::metrics::{Metric, MetricType, PingType};
use crate::ping::PingMaker;
use crate::storage::StorageManager;
#[cfg(feature = "upload")]
use crate::upload::{PingUploadManager, PingUploadTask};
use crate::upload::{PingUploadManager, PingUploadTask, UploadResult};
use crate::util::{local_now_with_offset, sanitize_application_id};
const GLEAN_VERSION: &str = env!("CARGO_PKG_VERSION");
const GLEAN_SCHEMA_VERSION: u32 = 1;
const DEFAULT_MAX_EVENTS: usize = 500;
lazy_static! {
static ref KNOWN_CLIENT_ID: Uuid =
Uuid::parse_str("c0ffeec0-ffee-c0ff-eec0-ffeec0ffeec0").unwrap();
}
static KNOWN_CLIENT_ID: Lazy<Uuid> =
Lazy::new(|| Uuid::parse_str("c0ffeec0-ffee-c0ff-eec0-ffeec0ffeec0").unwrap());
// An internal ping name, not to be touched by anything else
pub(crate) const INTERNAL_STORAGE: &str = "glean_internal_info";
@ -78,17 +75,31 @@ pub(crate) const DELETION_REQUEST_PINGS_DIRECTORY: &str = "deletion_request";
static GLEAN: OnceCell<Mutex<Glean>> = OnceCell::new();
/// Get a reference to the global Glean object.
///
/// Panics if no global Glean object was set.
pub fn global_glean() -> &'static Mutex<Glean> {
GLEAN.get().unwrap()
pub fn global_glean() -> Option<&'static Mutex<Glean>> {
GLEAN.get()
}
/// Set or replace the global Glean object.
pub fn setup_glean(glean: Glean) -> Result<()> {
// The `OnceCell` type wrapping our Glean is thread-safe and can only be set once.
// Therefore even if our check for it being empty succeeds, setting it could fail if a
// concurrent thread is quicker in setting it.
// However this will not cause a bigger problem, as the second `set` operation will just fail.
// We can log it and move on.
//
// For all wrappers this is not a problem, as the Glean object is intialized exactly once on
// calling `initialize` on the global singleton and further operations check that it has been
// initialized.
if GLEAN.get().is_none() {
GLEAN.set(Mutex::new(glean)).unwrap();
if GLEAN.set(Mutex::new(glean)).is_err() {
log::error!(
"Global Glean object is initialized already. This probably happened concurrently."
)
}
} else {
// We allow overriding the global Glean object to support test mode.
// In test mode the Glean object is fully destroyed and recreated.
// This all happens behind a mutex and is therefore also thread-safe..
let mut lock = GLEAN.get().unwrap().lock().unwrap();
*lock = glean;
}
@ -161,7 +172,6 @@ pub struct Glean {
start_time: DateTime<FixedOffset>,
max_events: usize,
is_first_run: bool,
#[cfg(feature = "upload")]
upload_manager: PingUploadManager,
}
@ -171,7 +181,7 @@ impl Glean {
/// This will create the necessary directories and files in `data_path`.
/// This will also initialize the core metrics.
pub fn new(cfg: Configuration) -> Result<Self> {
log::info!("Creating new Glean");
log::info!("Creating new Glean v{}", GLEAN_VERSION);
let application_id = sanitize_application_id(&cfg.application_id);
@ -186,7 +196,6 @@ impl Glean {
event_data_store,
core_metrics: CoreMetrics::new(),
internal_pings: InternalPings::new(),
#[cfg(feature = "upload")]
upload_manager: PingUploadManager::new(&cfg.data_path),
data_path: PathBuf::from(cfg.data_path),
application_id,
@ -195,7 +204,38 @@ impl Glean {
max_events: cfg.max_events.unwrap_or(DEFAULT_MAX_EVENTS),
is_first_run: false,
};
glean.on_change_upload_enabled(cfg.upload_enabled);
// The upload enabled flag may have changed since the last run, for
// example by the changing of a config file.
if cfg.upload_enabled {
// If upload is enabled, just follow the normal code path to
// instantiate the core metrics.
glean.on_upload_enabled();
} else {
// If upload is disabled, and we've never run before, only set the
// client_id to KNOWN_CLIENT_ID, but do not send a deletion request
// ping.
// If we have run before, and if the client_id is not equal to
// the KNOWN_CLIENT_ID, do the full upload disabled operations to
// clear metrics, set the client_id to KNOWN_CLIENT_ID, and send a
// deletion request ping.
match glean
.core_metrics
.client_id
.get_value(&glean, "glean_client_info")
{
None => glean.clear_metrics(),
Some(uuid) => {
if uuid != *KNOWN_CLIENT_ID {
// Temporarily enable uploading so we can submit a
// deletion request ping.
glean.upload_enabled = true;
glean.on_upload_disabled();
}
}
}
}
Ok(glean)
}
@ -249,6 +289,8 @@ impl Glean {
// time it is set, that's indeed our "first run".
self.is_first_run = true;
}
self.set_application_lifetime_core_metrics();
}
/// Called when Glean is initialized to the point where it can correctly
@ -286,15 +328,11 @@ impl Glean {
log::info!("Upload enabled: {:?}", flag);
if self.upload_enabled != flag {
// When upload is disabled, submit a deletion-request ping
if !flag {
if let Err(err) = self.internal_pings.deletion_request.submit(self, None) {
log::error!("Failed to submit deletion-request ping on optout: {}", err);
}
if flag {
self.on_upload_enabled();
} else {
self.on_upload_disabled();
}
self.upload_enabled = flag;
self.on_change_upload_enabled(flag);
true
} else {
false
@ -308,29 +346,38 @@ impl Glean {
self.upload_enabled
}
/// Handles the changing of state when upload_enabled changes.
/// Handles the changing of state from upload disabled to enabled.
///
/// Should only be called when the state actually changes.
/// When disabling, all pending metrics, events and queued pings are cleared.
///
/// When enabling, the core Glean metrics are recreated.
/// The upload_enabled flag is set to true and the core Glean metrics are
/// recreated.
fn on_upload_enabled(&mut self) {
self.upload_enabled = true;
self.initialize_core_metrics();
}
/// Handles the changing of state from upload enabled to disabled.
///
/// # Arguments
/// Should only be called when the state actually changes.
///
/// * `flag` - When true, enable metric collection.
fn on_change_upload_enabled(&mut self, flag: bool) {
if flag {
self.initialize_core_metrics();
} else {
self.clear_metrics();
/// A deletion_request ping is sent, all pending metrics, events and queued
/// pings are cleared, and the client_id is set to KNOWN_CLIENT_ID.
/// Afterward, the upload_enabled flag is set to false.
fn on_upload_disabled(&mut self) {
// The upload_enabled flag should be true here, or the deletion ping
// won't be submitted.
if let Err(err) = self.internal_pings.deletion_request.submit(self, None) {
log::error!("Failed to submit deletion-request ping on optout: {}", err);
}
self.clear_metrics();
self.upload_enabled = false;
}
/// Clear any pending metrics when telemetry is disabled.
fn clear_metrics(&mut self) {
// Clear the pending pings queue and acquire the lock
// so that it can't be accessed until this function is done.
#[cfg(feature = "upload")]
let _lock = self.upload_manager.clear_ping_queue();
// There is only one metric that we want to survive after clearing all
@ -412,16 +459,17 @@ impl Glean {
self.max_events
}
/// Gets the next task for an uploader. Which can be either:
/// Gets the next task for an uploader.
///
/// * Wait - which means the requester should ask again later;
/// * Upload(PingRequest) - which means there is a ping to upload. This wraps the actual request object;
/// * Done - which means there are no more pings queued right now.
/// This can be one of:
///
/// * `Wait` - which means the requester should ask again later;
/// * `Upload(PingRequest)` - which means there is a ping to upload. This wraps the actual request object;
/// * `Done` - which means there are no more pings queued right now.
///
/// # Return value
///
/// `PingUploadTask` - an enum representing the possible tasks.
#[cfg(feature = "upload")]
pub fn get_upload_task(&self) -> PingUploadTask {
self.upload_manager.get_upload_task()
}
@ -430,10 +478,9 @@ impl Glean {
///
/// # Arguments
///
/// `uuid` - The UUID of the ping in question.
/// `status` - The HTTP status of the response.
#[cfg(feature = "upload")]
pub fn process_ping_upload_response(&self, uuid: &str, status: u16) {
/// * `uuid` - The UUID of the ping in question.
/// * `status` - The upload result.
pub fn process_ping_upload_response(&self, uuid: &str, status: UploadResult) {
self.upload_manager
.process_ping_upload_response(uuid, status);
}
@ -481,7 +528,7 @@ impl Glean {
/// * `reason`: A reason code to include in the ping
pub fn submit_ping(&self, ping: &PingType, reason: Option<&str>) -> Result<bool> {
if !self.is_upload_enabled() {
log::error!("Glean must be enabled before sending pings.");
log::error!("Glean disabled: not submitting any pings.");
return Ok(false);
}
@ -508,7 +555,6 @@ impl Glean {
return Err(e.into());
}
#[cfg(feature = "upload")]
self.upload_manager
.enqueue_ping(&doc_id, &url_path, content);
@ -611,6 +657,11 @@ impl Glean {
Ok(())
}
/// Set internally-handled application lifetime metrics.
fn set_application_lifetime_core_metrics(&self) {
self.core_metrics.os.set(self, system::OS);
}
/// ** This is not meant to be used directly.**
///
/// Clear all the metrics that have `Lifetime::Application`.
@ -619,6 +670,9 @@ impl Glean {
if let Some(data) = self.data_store.as_ref() {
data.clear_lifetime(Lifetime::Application);
}
// Set internally handled app lifetime metrics again.
self.set_application_lifetime_core_metrics();
}
/// Return whether or not this is the first run on this profile.

View File

@ -5,9 +5,12 @@
// NOTE: This is a test-only file that contains unit tests for
// the lib.rs file.
use std::collections::HashSet;
use std::iter::FromIterator;
use super::*;
use crate::metrics::RecordedExperimentData;
use crate::metrics::StringMetric;
use crate::metrics::{StringMetric, TimeUnit, TimespanMetric, TimingDistributionMetric};
const GLOBAL_APPLICATION_ID: &str = "org.mozilla.glean.test.app";
pub fn new_glean(tempdir: Option<tempfile::TempDir>) -> (Glean, tempfile::TempDir) {
@ -585,3 +588,178 @@ fn test_dirty_bit() {
assert!(!glean.is_dirty_flag_set());
}
}
#[test]
fn test_change_metric_type_runtime() {
let dir = tempfile::tempdir().unwrap();
let (glean, _) = new_glean(Some(dir));
// We attempt to create two metrics: one with a 'string' type and the other
// with a 'timespan' type, both being sent in the same pings and having the
// same lifetime.
let metric_name = "type_swap";
let metric_category = "test";
let metric_lifetime = Lifetime::Ping;
let ping_name = "store1";
let string_metric = StringMetric::new(CommonMetricData {
name: metric_name.into(),
category: metric_category.into(),
send_in_pings: vec![ping_name.into()],
disabled: false,
lifetime: metric_lifetime,
..Default::default()
});
let string_value = "definitely-a-string!";
string_metric.set(&glean, string_value);
assert_eq!(
string_metric.test_get_value(&glean, ping_name).unwrap(),
string_value,
"Expected properly deserialized string"
);
let mut timespan_metric = TimespanMetric::new(
CommonMetricData {
name: metric_name.into(),
category: metric_category.into(),
send_in_pings: vec![ping_name.into()],
disabled: false,
lifetime: metric_lifetime,
..Default::default()
},
TimeUnit::Nanosecond,
);
let duration = 60;
timespan_metric.set_start(&glean, 0);
timespan_metric.set_stop(&glean, duration);
assert_eq!(
timespan_metric.test_get_value(&glean, ping_name).unwrap(),
60,
"Expected properly deserialized time"
);
// We expect old data to be lost forever. See the following bug comment
// https://bugzilla.mozilla.org/show_bug.cgi?id=1621757#c1 for more context.
assert_eq!(None, string_metric.test_get_value(&glean, ping_name));
}
#[test]
fn timing_distribution_truncation() {
let dir = tempfile::tempdir().unwrap();
let (glean, _) = new_glean(Some(dir));
let max_sample_time = 1000 * 1000 * 1000 * 60 * 10;
for (unit, expected_keys) in &[
(
TimeUnit::Nanosecond,
HashSet::<u64>::from_iter(vec![961_548, 939, 599_512_966_122, 1]),
),
(
TimeUnit::Microsecond,
HashSet::<u64>::from_iter(vec![939, 562_949_953_421_318, 599_512_966_122, 961_548]),
),
(
TimeUnit::Millisecond,
HashSet::<u64>::from_iter(vec![
961_548,
576_460_752_303_431_040,
599_512_966_122,
562_949_953_421_318,
]),
),
] {
let mut dist = TimingDistributionMetric::new(
CommonMetricData {
name: format!("local_metric_{:?}", unit),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
},
*unit,
);
for &value in &[
1,
1_000,
1_000_000,
max_sample_time,
max_sample_time * 1_000,
max_sample_time * 1_000_000,
] {
let timer_id = dist.set_start(0);
dist.set_stop_and_accumulate(&glean, timer_id, value);
}
let snapshot = dist.test_get_value(&glean, "baseline").unwrap();
let mut keys = HashSet::new();
let mut recorded_values = 0;
for (&key, &value) in &snapshot.values {
// A snapshot potentially includes buckets with a 0 count.
// We can ignore them here.
if value > 0 {
assert!(key < max_sample_time * unit.as_nanos(1));
keys.insert(key);
recorded_values += 1;
}
}
assert_eq!(4, recorded_values);
assert_eq!(keys, *expected_keys);
// The number of samples was originally designed around 1ns to
// 10minutes, with 8 steps per power of 2, which works out to 316 items.
// This is to ensure that holds even when the time unit is changed.
assert!(snapshot.values.len() < 316);
}
}
#[test]
fn timing_distribution_truncation_accumulate() {
let dir = tempfile::tempdir().unwrap();
let (glean, _) = new_glean(Some(dir));
let max_sample_time = 1000 * 1000 * 1000 * 60 * 10;
for &unit in &[
TimeUnit::Nanosecond,
TimeUnit::Microsecond,
TimeUnit::Millisecond,
] {
let mut dist = TimingDistributionMetric::new(
CommonMetricData {
name: format!("local_metric_{:?}", unit),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
},
unit,
);
dist.accumulate_samples_signed(
&glean,
vec![
1,
1_000,
1_000_000,
max_sample_time,
max_sample_time * 1_000,
max_sample_time * 1_000_000,
],
);
let snapshot = dist.test_get_value(&glean, "baseline").unwrap();
// The number of samples was originally designed around 1ns to
// 10minutes, with 8 steps per power of 2, which works out to 316 items.
// This is to ensure that holds even when the time unit is changed.
assert!(snapshot.values.len() < 316);
}
}

View File

@ -2,14 +2,9 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde::Serialize;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Bucketing, Histogram, HistogramType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
@ -26,18 +21,11 @@ pub struct CustomDistributionMetric {
histogram_type: HistogramType,
}
/// A serializable representation of a snapshotted histogram.
#[derive(Debug, Serialize)]
pub struct Snapshot {
values: HashMap<u64, u64>,
sum: u64,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> Snapshot {
Snapshot {
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
DistributionData {
values: hist.snapshot_values(),
sum: hist.sum(),
}
@ -161,19 +149,15 @@ impl CustomDistributionMetric {
/// Get the currently stored histogram.
///
/// This doesn't clear the stored value.
pub fn test_get_value(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<Histogram<Box<dyn Bucketing>>> {
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
// Boxing the value, in order to return either of the possible buckets
Some(Metric::CustomDistributionExponential(hist)) => Some(hist.boxed()),
Some(Metric::CustomDistributionLinear(hist)) => Some(hist.boxed()),
Some(Metric::CustomDistributionExponential(hist)) => Some(snapshot(&hist)),
Some(Metric::CustomDistributionLinear(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
@ -189,6 +173,6 @@ impl CustomDistributionMetric {
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|hist| serde_json::to_string(&snapshot(&hist)).unwrap())
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

View File

@ -7,7 +7,7 @@ use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::error_recording::{record_error, ErrorType};
use crate::event_database::RecordedEventData;
use crate::event_database::RecordedEvent;
use crate::metrics::MetricType;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
@ -116,11 +116,7 @@ impl EventMetric {
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_get_value(
&self,
glean: &Glean,
store_name: &str,
) -> Option<Vec<RecordedEventData>> {
pub fn test_get_value(&self, glean: &Glean, store_name: &str) -> Option<Vec<RecordedEvent>> {
glean.event_storage().test_get_value(&self.meta, store_name)
}

View File

@ -2,7 +2,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use lazy_static::lazy_static;
use once_cell::sync::Lazy;
use regex::Regex;
use crate::common_metric_data::CommonMetricData;
@ -13,29 +13,27 @@ use crate::Glean;
const MAX_LABELS: usize = 16;
const OTHER_LABEL: &str = "__other__";
const MAX_LABEL_LENGTH: usize = 61;
lazy_static! {
/// This regex is used for matching against labels and should allow for dots, underscores,
/// and/or hyphens. Labels are also limited to starting with either a letter or an
/// underscore character.
/// Some examples of good and bad labels:
///
/// Good:
/// * `this.is.fine`
/// * `this_is_fine_too`
/// * `this.is_still_fine`
/// * `thisisfine`
/// * `_.is_fine`
/// * `this.is-fine`
/// * `this-is-fine`
/// Bad:
/// * `this.is.not_fine_due_tu_the_length_being_too_long_i_thing.i.guess`
/// * `1.not_fine`
/// * `this.$isnotfine`
/// * `-.not_fine`
static ref LABEL_REGEX: Regex = Regex::new(
"^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
).unwrap();
}
/// This regex is used for matching against labels and should allow for dots, underscores,
/// and/or hyphens. Labels are also limited to starting with either a letter or an
/// underscore character.
/// Some examples of good and bad labels:
///
/// Good:
/// * `this.is.fine`
/// * `this_is_fine_too`
/// * `this.is_still_fine`
/// * `thisisfine`
/// * `_.is_fine`
/// * `this.is-fine`
/// * `this-is-fine`
/// Bad:
/// * `this.is.not_fine_due_tu_the_length_being_too_long_i_thing.i.guess`
/// * `1.not_fine`
/// * `this.$isnotfine`
/// * `-.not_fine`
static LABEL_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new("^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$").unwrap());
/// A labeled metric.
///

View File

@ -2,15 +2,10 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde::Serialize;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::memory_unit::MemoryUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
@ -34,18 +29,11 @@ pub struct MemoryDistributionMetric {
memory_unit: MemoryUnit,
}
/// A serializable representation of a snapshotted histogram.
#[derive(Debug, Serialize)]
pub struct Snapshot {
values: HashMap<u64, u64>,
sum: u64,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> Snapshot {
Snapshot {
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
@ -171,7 +159,7 @@ impl MemoryDistributionMetric {
if num_too_log_samples > 0 {
let msg = format!(
"Accumulated {} samples longer than 10 minutes",
"Accumulated {} samples larger than 1TB",
num_too_log_samples
);
record_error(
@ -189,17 +177,13 @@ impl MemoryDistributionMetric {
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<Histogram<Functional>> {
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::MemoryDistribution(hist)) => Some(hist),
Some(Metric::MemoryDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
@ -215,6 +199,6 @@ impl MemoryDistributionMetric {
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|hist| serde_json::to_string(&snapshot(&hist)).unwrap())
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

View File

@ -4,6 +4,8 @@
//! The different metric types supported by the Glean SDK to handle data.
use std::collections::HashMap;
use chrono::{DateTime, FixedOffset};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
@ -26,6 +28,7 @@ mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
use crate::histogram::{Functional, Histogram, PrecomputedExponential, PrecomputedLinear};
use crate::util::get_iso_time_string;
use crate::CommonMetricData;
@ -57,6 +60,18 @@ pub use self::timing_distribution::TimerId;
pub use self::timing_distribution::TimingDistributionMetric;
pub use self::uuid::UuidMetric;
/// A snapshot of all buckets and the accumulated sum of a distribution.
#[derive(Debug, Serialize)]
pub struct DistributionData {
/// A map containig the bucket index mapped to the accumulated count.
///
/// This can contain buckets with a count of `0`.
pub values: HashMap<u64, u64>,
/// The accumulated sum of all the samples in the distribution.
pub sum: u64,
}
/// The available metrics.
///
/// This is the in-memory and persisted layout of a metric.

View File

@ -28,8 +28,9 @@ impl PingType {
/// ## Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when.
/// sending.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when submitting.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,

View File

@ -4,13 +4,10 @@
use std::collections::HashMap;
use serde::Serialize;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
@ -21,8 +18,13 @@ const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
// Maximum time of 10 minutes in nanoseconds. This maximum means we
// retain a maximum of 313 buckets.
// Maximum time, which means we retain a maximum of 316 buckets.
// It is automatically adjusted based on the `time_unit` parameter
// so that:
//
// - `nanosecond`: 10 minutes
// - `microsecond`: ~6.94 days
// - `millisecond`: ~19 years
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
/// Identifier for a running timer.
@ -98,18 +100,11 @@ pub struct TimingDistributionMetric {
timings: Timings,
}
/// A serializable representation of a snapshotted histogram with a time unit.
#[derive(Debug, Serialize)]
pub struct Snapshot {
values: HashMap<u64, u64>,
sum: u64,
}
/// Create a snapshot of the histogram with a time unit.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> Snapshot {
Snapshot {
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
@ -174,11 +169,23 @@ impl TimingDistributionMetric {
Ok(duration) => duration,
};
if duration > MAX_SAMPLE_TIME {
let msg = "Sample is longer than 10 minutes";
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
duration = if duration < min_sample_time {
// If measurement is less than the minimum, just truncate. This is
// not recorded as an error.
min_sample_time
} else if duration > max_sample_time {
let msg = format!(
"Sample is longer than the max for a time_unit of {:?} ({} ns)",
self.time_unit, max_sample_time
);
record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
duration = MAX_SAMPLE_TIME;
}
max_sample_time
} else {
duration
};
if !self.should_record(glean) {
return;
@ -236,6 +243,7 @@ impl TimingDistributionMetric {
pub fn accumulate_samples_signed(&mut self, glean: &Glean, samples: Vec<i64>) {
let mut num_negative_samples = 0;
let mut num_too_long_samples = 0;
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
@ -247,13 +255,20 @@ impl TimingDistributionMetric {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
let mut sample = self.time_unit.as_nanos(sample);
if sample > MAX_SAMPLE_TIME {
let mut sample = sample as u64;
// Check the range prior to converting the incoming unit to
// nanoseconds, so we can compare against the constant
// MAX_SAMPLE_TIME.
if sample == 0 {
sample = 1;
} else if sample > MAX_SAMPLE_TIME {
num_too_long_samples += 1;
sample = MAX_SAMPLE_TIME;
}
sample = self.time_unit.as_nanos(sample);
hist.accumulate(sample);
}
}
@ -273,8 +288,8 @@ impl TimingDistributionMetric {
if num_too_long_samples > 0 {
let msg = format!(
"Accumulated {} samples longer than 10 minutes",
num_too_long_samples
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
@ -291,17 +306,13 @@ impl TimingDistributionMetric {
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<Histogram<Functional>> {
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::TimingDistribution(hist)) => Some(hist),
Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
@ -317,7 +328,7 @@ impl TimingDistributionMetric {
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|hist| serde_json::to_string(&snapshot(&hist)).unwrap())
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

View File

@ -135,9 +135,8 @@ impl PingMaker {
fn get_client_info(&self, glean: &Glean, include_client_id: bool) -> JsonValue {
// Add the "telemetry_sdk_build", which is the glean-core version.
let version = env!("CARGO_PKG_VERSION");
let mut map = json!({
"telemetry_sdk_build": version,
"telemetry_sdk_build": crate::GLEAN_VERSION,
});
// Flatten the whole thing.

View File

@ -17,24 +17,24 @@ use crate::Lifetime;
/// Snapshot metrics from the underlying database.
pub struct StorageManager;
/// Labeled metrics are stored as `<metric name>/<label>`.
/// Labeled metrics are stored as `<metric id>/<label>`.
/// They need to go into a nested object in the final snapshot.
///
/// We therefore extract the metric name and the label from the key and construct the new object or
/// We therefore extract the metric id and the label from the key and construct the new object or
/// add to it.
fn snapshot_labeled_metrics(
snapshot: &mut HashMap<String, HashMap<String, JsonValue>>,
metric_name: &str,
metric_id: &str,
metric: &Metric,
) {
let ping_section = format!("labeled_{}", metric.ping_section());
let map = snapshot.entry(ping_section).or_insert_with(HashMap::new);
let mut s = metric_name.splitn(2, '/');
let metric_name = s.next().unwrap(); // Safe unwrap, the function is only called when the name does contain a '/'
let mut s = metric_id.splitn(2, '/');
let metric_id = s.next().unwrap(); // Safe unwrap, the function is only called when the id does contain a '/'
let label = s.next().unwrap(); // Safe unwrap, the function is only called when the name does contain a '/'
let obj = map.entry(metric_name.into()).or_insert_with(|| json!({}));
let obj = map.entry(metric_id.into()).or_insert_with(|| json!({}));
let obj = obj.as_object_mut().unwrap(); // safe unwrap, we constructed the object above
obj.insert(label.into(), metric.as_json());
}
@ -82,15 +82,15 @@ impl StorageManager {
) -> Option<JsonValue> {
let mut snapshot: HashMap<String, HashMap<String, JsonValue>> = HashMap::new();
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let metric_name = String::from_utf8_lossy(metric_name).into_owned();
if metric_name.contains('/') {
snapshot_labeled_metrics(&mut snapshot, &metric_name, &metric);
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.contains('/') {
snapshot_labeled_metrics(&mut snapshot, &metric_id, &metric);
} else {
let map = snapshot
.entry(metric.ping_section().into())
.or_insert_with(HashMap::new);
map.insert(metric_name, metric.as_json());
map.insert(metric_id, metric.as_json());
}
};
@ -132,9 +132,9 @@ impl StorageManager {
) -> Option<Metric> {
let mut snapshot: Option<Metric> = None;
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let metric_name = String::from_utf8_lossy(metric_name).into_owned();
if metric_name == metric_id {
let mut snapshotter = |id: &[u8], metric: &Metric| {
let id = String::from_utf8_lossy(id).into_owned();
if id == metric_id {
snapshot = Some(metric.clone())
}
};
@ -177,10 +177,10 @@ impl StorageManager {
) -> Option<JsonValue> {
let mut snapshot: HashMap<String, JsonValue> = HashMap::new();
let mut snapshotter = |metric_name: &[u8], metric: &Metric| {
let metric_name = String::from_utf8_lossy(metric_name).into_owned();
if metric_name.ends_with("#experiment") {
let name = metric_name.splitn(2, '#').next().unwrap(); // safe unwrap, first field of a split always valid
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.ends_with("#experiment") {
let name = metric_id.splitn(2, '#').next().unwrap(); // safe unwrap, first field of a split always valid
snapshot.insert(name.to_string(), metric.as_json());
}
};

View File

@ -0,0 +1,81 @@
// Copyright (c) 2017 The Rust Project Developers
// Licensed under the MIT License.
// Original license:
// https://github.com/RustSec/platforms-crate/blob/ebbd3403243067ba3096f31684557285e352b639/LICENSE-MIT
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Detect and expose `target_os` as a constant.
//!
//! Code adopted from the "platforms" crate: <https://github.com/RustSec/platforms-crate>.
#[cfg(target_os = "android")]
/// `target_os` when building this crate: `android`
pub const OS: &str = "Android";
#[cfg(target_os = "ios")]
/// `target_os` when building this crate: `ios`
pub const OS: &str = "iOS";
#[cfg(target_os = "linux")]
/// `target_os` when building this crate: `linux`
pub const OS: &str = "Linux";
#[cfg(target_os = "macos")]
/// `target_os` when building this crate: `macos`
pub const OS: &str = "Darwin";
#[cfg(target_os = "windows")]
/// `target_os` when building this crate: `windows`
pub const OS: &str = "Windows";
#[cfg(target_os = "freebsd")]
/// `target_os` when building this crate: `freebsd`
pub const OS: &str = "FreeBSD";
#[cfg(target_os = "netbsd")]
/// `target_os` when building this crate: `netbsd`
pub const OS: &str = "NetBSD";
#[cfg(target_os = "openbsd")]
/// `target_os` when building this crate: `openbsd`
pub const OS: &str = "OpenBSD";
#[cfg(target_os = "solaris")]
/// `target_os` when building this crate: `solaris`
pub const OS: &str = "Solaris";
#[cfg(not(any(
target_os = "android",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris",
)))]
pub const OS: &str = "unknown";

View File

@ -9,7 +9,6 @@ use std::fs::{self, File};
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use log;
use serde_json::Value as JsonValue;
use uuid::Uuid;
@ -79,7 +78,7 @@ impl PingDirectoryManager {
};
match fs::remove_file(&path) {
Err(e) => log::error!("Error deleting file {}. {}", path.display(), e),
_ => log::info!("Files was deleted {}", path.display()),
_ => log::info!("File was deleted {}", path.display()),
};
}
@ -89,12 +88,12 @@ impl PingDirectoryManager {
///
/// ## Arguments
///
/// * `uuid` - The UUID of the ping file to be processed
pub fn process_file(&self, uuid: &str) -> Option<PingRequest> {
let path = match self.get_file_path(uuid) {
/// * `document_id` - The UUID of the ping file to be processed
pub fn process_file(&self, document_id: &str) -> Option<PingRequest> {
let path = match self.get_file_path(document_id) {
Some(path) => path,
None => {
log::error!("Cannot find ping file to process {}", uuid);
log::error!("Cannot find ping file to process {}", document_id);
return None;
}
};
@ -114,20 +113,20 @@ impl PingDirectoryManager {
let mut lines = BufReader::new(file).lines();
if let (Some(Ok(path)), Some(Ok(body))) = (lines.next(), lines.next()) {
if let Ok(parsed_body) = serde_json::from_str::<JsonValue>(&body) {
return Some(PingRequest::new(uuid, &path, parsed_body));
return Some(PingRequest::new(document_id, &path, parsed_body));
} else {
log::warn!(
"Error processing ping file: {}. Can't parse ping contents as JSON.",
uuid
document_id
);
}
} else {
log::warn!(
"Error processing ping file: {}. Ping file is not formatted as expected.",
uuid
document_id
);
}
self.delete_file(uuid);
self.delete_file(document_id);
None
}
@ -179,7 +178,7 @@ impl PingDirectoryManager {
// We might not be able to get the modified date for a given file,
// in which case we just put it at the end.
if let (Ok(a), Ok(b)) = (a, b) {
a.partial_cmp(b).unwrap()
a.cmp(b)
} else {
Ordering::Less
}
@ -203,13 +202,13 @@ impl PingDirectoryManager {
result
}
/// Get the path for a ping file based on its uuid.
/// Get the path for a ping file based on its document_id.
///
/// Will look for files in each ping directory until something is found.
/// If nothing is found, returns `None`.
fn get_file_path(&self, uuid: &str) -> Option<PathBuf> {
fn get_file_path(&self, document_id: &str) -> Option<PathBuf> {
for dir in &self.pings_dirs {
let path = dir.join(uuid);
let path = dir.join(document_id);
if path.exists() {
return Some(path);
}

View File

@ -20,17 +20,20 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock, RwLockWriteGuard};
use std::thread;
use log;
use serde_json::Value as JsonValue;
use directory::PingDirectoryManager;
use request::PingRequest;
pub use request::PingRequest;
pub use result::{ffi_upload_result, UploadResult};
mod directory;
mod request;
mod result;
/// When asking for the next ping request to upload,
/// the requester may receive one out of three possible tasks.
///
/// If new variants are added, this should be reflected in `glean-core/ffi/src/upload.rs` as well.
#[derive(PartialEq, Debug)]
pub enum PingUploadTask {
/// A PingRequest popped from the front of the queue.
@ -98,23 +101,30 @@ impl PingUploadManager {
}
/// Creates a `PingRequest` and adds it to the queue.
pub fn enqueue_ping(&self, uuid: &str, path: &str, body: JsonValue) {
pub fn enqueue_ping(&self, document_id: &str, path: &str, body: JsonValue) {
log::trace!("Enqueuing ping {} at {}", document_id, path);
let mut queue = self
.queue
.write()
.expect("Can't write to pending pings queue.");
let request = PingRequest::new(uuid, path, body);
let request = PingRequest::new(document_id, path, body);
queue.push_back(request);
}
/// Clears the pending pings queue, leaves the deletion-request pings.
pub fn clear_ping_queue(&self) -> RwLockWriteGuard<'_, VecDeque<PingRequest>> {
log::trace!("Clearing ping queue");
let mut queue = self
.queue
.write()
.expect("Can't write to pending pings queue.");
queue.retain(|ping| ping.is_deletion_request());
log::trace!(
"{} pings left in the queue (only deletion-request expected)",
queue.len()
);
queue
}
@ -125,6 +135,9 @@ impl PingUploadManager {
/// `PingUploadTask` - see [`PingUploadTask`](enum.PingUploadTask.html) for more information.
pub fn get_upload_task(&self) -> PingUploadTask {
if !self.has_processed_pings_dir() {
log::info!(
"Tried getting an upload task, but processing is ongoing. Will come back later."
);
return PingUploadTask::Wait;
}
@ -133,8 +146,18 @@ impl PingUploadManager {
.write()
.expect("Can't write to pending pings queue.");
match queue.pop_front() {
Some(request) => PingUploadTask::Upload(request),
None => PingUploadTask::Done,
Some(request) => {
log::info!(
"New upload task with id {} (path: {})",
request.document_id,
request.path
);
PingUploadTask::Upload(request)
}
None => {
log::info!("No more pings to upload! You are done.");
PingUploadTask::Done
}
}
}
@ -173,29 +196,32 @@ impl PingUploadManager {
///
/// # Arguments
///
/// `uuid` - The UUID of the ping in question.
/// `document_id` - The UUID of the ping in question.
/// `status` - The HTTP status of the response.
pub fn process_ping_upload_response(&self, uuid: &str, status: u16) {
pub fn process_ping_upload_response(&self, document_id: &str, status: UploadResult) {
use UploadResult::*;
match status {
200..=299 => {
log::info!("Ping {} successfully sent {}.", uuid, status);
self.directory_manager.delete_file(uuid);
HttpStatus(status @ 200..=299) => {
log::info!("Ping {} successfully sent {}.", document_id, status);
self.directory_manager.delete_file(document_id);
}
400..=499 => {
UnrecoverableFailure | HttpStatus(400..=499) => {
log::error!(
"Server returned client error code {} while attempting to send ping {}.",
status,
uuid
"Unrecoverable upload failure while attempting to send ping {}. Error was {:?}",
document_id,
status
);
self.directory_manager.delete_file(uuid);
self.directory_manager.delete_file(document_id);
}
_ => {
RecoverableFailure | HttpStatus(_) => {
log::error!(
"Server returned response code {} while attempting to send ping {}.",
status,
uuid
"Recoverable upload failure while attempting to send ping {}, will retry. Error was {:?}",
document_id,
status
);
if let Some(request) = self.directory_manager.process_file(uuid) {
if let Some(request) = self.directory_manager.process_file(document_id) {
let mut queue = self
.queue
.write()
@ -214,11 +240,12 @@ mod test {
use serde_json::json;
use super::UploadResult::*;
use super::*;
use crate::metrics::PingType;
use crate::{tests::new_glean, PENDING_PINGS_DIRECTORY};
const UUID: &str = "40e31919-684f-43b0-a5aa-e15c2d56a674"; // Just a random UUID.
const DOCUMENT_ID: &str = "40e31919-684f-43b0-a5aa-e15c2d56a674"; // Just a random UUID.
const PATH: &str = "/submit/app_id/ping_name/schema_version/doc_id";
#[test]
@ -249,7 +276,7 @@ mod test {
}
// Enqueue a ping
upload_manager.enqueue_ping(UUID, PATH, json!({}));
upload_manager.enqueue_ping(DOCUMENT_ID, PATH, json!({}));
// Try and get the next request.
// Verify request was returned
@ -273,7 +300,7 @@ mod test {
// Enqueue a ping multiple times
let n = 10;
for _ in 0..n {
upload_manager.enqueue_ping(UUID, PATH, json!({}));
upload_manager.enqueue_ping(DOCUMENT_ID, PATH, json!({}));
}
// Verify a request is returned for each submitted ping
@ -301,11 +328,11 @@ mod test {
// Enqueue a ping multiple times
for _ in 0..10 {
upload_manager.enqueue_ping(UUID, PATH, json!({}));
upload_manager.enqueue_ping(DOCUMENT_ID, PATH, json!({}));
}
// Clear the queue
let _ = upload_manager.clear_ping_queue();
drop(upload_manager.clear_ping_queue());
// Verify there really isn't any ping in the queue
assert_eq!(upload_manager.get_upload_task(), PingUploadTask::Done);
@ -332,7 +359,7 @@ mod test {
.unwrap();
// Clear the queue
let _ = glean.upload_manager.clear_ping_queue();
drop(glean.upload_manager.clear_ping_queue());
let upload_task = glean.get_upload_task();
match upload_task {
@ -410,10 +437,10 @@ mod test {
match upload_task {
PingUploadTask::Upload(request) => {
// Simulate the processing of a sucessfull request
let uuid = request.uuid;
upload_manager.process_ping_upload_response(&uuid, 200);
let document_id = request.document_id;
upload_manager.process_ping_upload_response(&document_id, HttpStatus(200));
// Verify file was deleted
assert!(!pending_pings_dir.join(uuid).exists());
assert!(!pending_pings_dir.join(document_id).exists());
}
_ => panic!("Expected upload manager to return the next request!"),
}
@ -450,10 +477,10 @@ mod test {
match upload_task {
PingUploadTask::Upload(request) => {
// Simulate the processing of a client error
let uuid = request.uuid;
upload_manager.process_ping_upload_response(&uuid, 404);
let document_id = request.document_id;
upload_manager.process_ping_upload_response(&document_id, HttpStatus(404));
// Verify file was deleted
assert!(!pending_pings_dir.join(uuid).exists());
assert!(!pending_pings_dir.join(document_id).exists());
}
_ => panic!("Expected upload manager to return the next request!"),
}
@ -487,12 +514,12 @@ mod test {
match upload_task {
PingUploadTask::Upload(request) => {
// Simulate the processing of a client error
let uuid = request.uuid;
upload_manager.process_ping_upload_response(&uuid, 500);
let document_id = request.document_id;
upload_manager.process_ping_upload_response(&document_id, HttpStatus(500));
// Verify this ping was indeed re-enqueued
match upload_manager.get_upload_task() {
PingUploadTask::Upload(request) => {
assert_eq!(uuid, request.uuid);
assert_eq!(document_id, request.document_id);
}
_ => panic!("Expected upload manager to return the next request!"),
}
@ -503,4 +530,94 @@ mod test {
// Verify that after request is returned, none are left
assert_eq!(upload_manager.get_upload_task(), PingUploadTask::Done);
}
#[test]
fn test_processes_correctly_unrecoverable_upload_response() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, /* send_if_empty */ true, vec![]);
glean.register_ping_type(&ping_type);
// Submit a ping
glean.submit_ping(&ping_type, None).unwrap();
// Create a new upload_manager
let upload_manager = PingUploadManager::new(&dir.path());
// Wait for processing of pending pings directory to finish.
let mut upload_task = upload_manager.get_upload_task();
while upload_task == PingUploadTask::Wait {
thread::sleep(Duration::from_millis(10));
upload_task = upload_manager.get_upload_task();
}
// Get the pending ping directory path
let pending_pings_dir = dir.path().join(PENDING_PINGS_DIRECTORY);
// Get the submitted PingRequest
match upload_task {
PingUploadTask::Upload(request) => {
// Simulate the processing of a client error
let document_id = request.document_id;
upload_manager.process_ping_upload_response(&document_id, UnrecoverableFailure);
// Verify file was deleted
assert!(!pending_pings_dir.join(document_id).exists());
}
_ => panic!("Expected upload manager to return the next request!"),
}
// Verify that after request is returned, none are left
assert_eq!(upload_manager.get_upload_task(), PingUploadTask::Done);
}
#[test]
fn new_pings_are_added_while_upload_in_progress() {
// Create a new upload_manager
let dir = tempfile::tempdir().unwrap();
let upload_manager = PingUploadManager::new(dir.path());
// Wait for processing of pending pings directory to finish.
while upload_manager.get_upload_task() == PingUploadTask::Wait {
thread::sleep(Duration::from_millis(10));
}
let doc1 = "684fa150-8dff-11ea-8faf-cb1ff3b11119";
let path1 = format!("/submit/app_id/test-ping/1/{}", doc1);
let doc2 = "74f14e9a-8dff-11ea-b45a-6f936923f639";
let path2 = format!("/submit/app_id/test-ping/1/{}", doc2);
// Enqueue a ping
upload_manager.enqueue_ping(doc1, &path1, json!({}));
// Try and get the first request.
let req = match upload_manager.get_upload_task() {
PingUploadTask::Upload(req) => req,
_ => panic!("Expected upload manager to return the next request!"),
};
assert_eq!(doc1, req.document_id);
// Schedule the next one while the first one is "in progress"
upload_manager.enqueue_ping(doc2, &path2, json!({}));
// Mark as processed
upload_manager.process_ping_upload_response(&req.document_id, HttpStatus(200));
// Get the second request.
let req = match upload_manager.get_upload_task() {
PingUploadTask::Upload(req) => req,
_ => panic!("Expected upload manager to return the next request!"),
};
assert_eq!(doc2, req.document_id);
// Mark as processed
upload_manager.process_ping_upload_response(&req.document_id, HttpStatus(200));
// ... and then we're done.
match upload_manager.get_upload_task() {
PingUploadTask::Done => {}
_ => panic!("Expected upload manager to return the next request!"),
}
}
}

View File

@ -7,20 +7,20 @@
use std::collections::HashMap;
use chrono::prelude::{DateTime, Utc};
use serde_json::Value as JsonValue;
use serde_json::{self, Value as JsonValue};
/// Represents a request to upload a ping.
#[derive(PartialEq, Debug, Clone)]
pub struct PingRequest {
/// The Job ID to identify this request,
/// this is the same as the ping UUID.
pub uuid: String,
pub document_id: String,
/// The path for the server to upload the ping to.
pub path: String,
/// The body of the request.
pub body: JsonValue,
/// A map with all the headers to be sent with the request.
pub headers: HashMap<String, String>,
pub headers: HashMap<&'static str, String>,
}
impl PingRequest {
@ -28,15 +28,16 @@ impl PingRequest {
///
/// Automatically creates the default request headers.
/// Clients may add more headers such as `userAgent` to this list.
pub fn new(uuid: &str, path: &str, body: JsonValue) -> Self {
pub fn new(document_id: &str, path: &str, body: JsonValue) -> Self {
Self {
uuid: uuid.into(),
document_id: document_id.into(),
path: path.into(),
body,
headers: Self::create_request_headers(),
}
}
/// Verifies if current request is for a deletion-request ping.
pub fn is_deletion_request(&self) -> bool {
// The path format should be `/submit/<app_id>/<ping_name>/<schema_version/<doc_id>`
self.path
@ -47,19 +48,16 @@ impl PingRequest {
}
/// Creates the default request headers.
fn create_request_headers() -> HashMap<String, String> {
fn create_request_headers() -> HashMap<&'static str, String> {
let mut headers = HashMap::new();
let date: DateTime<Utc> = Utc::now();
headers.insert("Date".to_string(), date.to_string());
headers.insert("X-Client-Type".to_string(), "Glean".to_string());
headers.insert("Date", date.to_string());
headers.insert("X-Client-Type", "Glean".to_string());
headers.insert(
"Content-Type".to_string(),
"Content-Type",
"application/json; charset=utf-8".to_string(),
);
headers.insert(
"X-Client-Version".to_string(),
env!("CARGO_PKG_VERSION").to_string(),
);
headers.insert("X-Client-Version", crate::GLEAN_VERSION.to_string());
headers
}
}

View File

@ -0,0 +1,66 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// Result values of attempted ping uploads encoded for FFI use.
///
/// In a perfect world this would live in `glean-ffi`,
/// but because we also want to convert from pure integer values to a proper Rust enum
/// using Rust's `From` and `Into` trait, we need to have it in this crate
/// (The coherence rules don't allow to implement an external trait for an external type).
///
/// Due to restrictions of cbindgen they are re-defined in `glean-core/ffi/src/upload.rs`.
///
/// NOTE:
/// THEY MUST BE THE SAME ACROSS BOTH FILES!
pub mod ffi_upload_result {
/// A recoverable error.
pub const UPLOAD_RESULT_RECOVERABLE: u32 = 0x1;
/// An unrecoverable error.
pub const UPLOAD_RESULT_UNRECOVERABLE: u32 = 0x2;
/// A HTTP response code.
///
/// The actual response code is encoded in the lower bits.
pub const UPLOAD_RESULT_HTTP_STATUS: u32 = 0x8000;
}
use ffi_upload_result::*;
/// The result of an attempted ping upload.
#[derive(Debug)]
pub enum UploadResult {
/// A recoverable failure.
///
/// During upload something went wrong,
/// e.g. the network connection failed.
/// The upload should be retried at a later time.
RecoverableFailure,
/// An unrecoverable upload failure.
///
/// A possible cause might be a malformed URL.
UnrecoverableFailure,
/// A HTTP response code.
///
/// This can still indicate an error, depending on the status code.
HttpStatus(u32),
}
impl From<u32> for UploadResult {
fn from(status: u32) -> Self {
match status {
status if (status & UPLOAD_RESULT_HTTP_STATUS) == UPLOAD_RESULT_HTTP_STATUS => {
// Extract the status code from the lower bits.
let http_status = status & !UPLOAD_RESULT_HTTP_STATUS;
UploadResult::HttpStatus(http_status)
}
UPLOAD_RESULT_RECOVERABLE => UploadResult::RecoverableFailure,
UPLOAD_RESULT_UNRECOVERABLE => UploadResult::UnrecoverableFailure,
// Any unknown result code is treated as unrecoverable.
_ => UploadResult::UnrecoverableFailure,
}
}
}

View File

@ -111,6 +111,74 @@ pub(crate) fn truncate_string_at_boundary_with_error<S: Into<String>>(
}
}
// On i686 on Windows, the CPython interpreter sets the FPU precision control
// flag to 53 bits of precision, rather than the 64 bit default. On x86_64 on
// Windows, the CPython interpreter changes the rounding control settings. This
// causes different floating point results than on other architectures. This
// context manager makes it easy to set the correct precision and rounding control
// to match our other targets and platforms.
//
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1623335 for additional context.
#[cfg(all(target_os = "windows", target_env = "gnu"))]
pub mod floating_point_context {
// `size_t` is "pointer size", which is equivalent to Rust's `usize`.
// It's defined as such in libc:
// * https://github.com/rust-lang/libc/blob/bcbfeb5516cd5bb055198dbfbddf8d626fa2be07/src/unix/mod.rs#L19
// * https://github.com/rust-lang/libc/blob/bcbfeb5516cd5bb055198dbfbddf8d626fa2be07/src/windows/mod.rs#L16
#[allow(non_camel_case_types)]
type size_t = usize;
#[link(name = "m")]
extern "C" {
// Gets and sets the floating point control word.
// See documentation here:
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/controlfp-s
fn _controlfp_s(current: *mut size_t, new: size_t, mask: size_t) -> size_t;
}
// Rounding control mask
const MCW_RC: size_t = 0x00000300;
// Round by truncation
const RC_CHOP: size_t = 0x00000300;
// Precision control mask
const MCW_PC: size_t = 0x00030000;
// Values for 64-bit precision
const PC_64: size_t = 0x00000000;
pub struct FloatingPointContext {
original_value: size_t,
}
impl FloatingPointContext {
pub fn new() -> Self {
let mut current: size_t = 0;
let _err = unsafe { _controlfp_s(&mut current, PC_64 | RC_CHOP, MCW_PC | MCW_RC) };
FloatingPointContext {
original_value: current,
}
}
}
impl Drop for FloatingPointContext {
fn drop(&mut self) {
let mut current: size_t = 0;
let _err = unsafe { _controlfp_s(&mut current, self.original_value, MCW_PC | MCW_RC) };
}
}
}
#[cfg(not(all(target_os = "windows", target_env = "gnu")))]
pub mod floating_point_context {
pub struct FloatingPointContext {}
impl FloatingPointContext {
pub fn new() -> Self {
FloatingPointContext {}
}
}
}
#[cfg(test)]
mod test {
use super::*;

View File

@ -11,9 +11,7 @@ use std::fs::{read_dir, File};
use std::io::{BufRead, BufReader};
use std::path::Path;
use chrono;
use chrono::offset::TimeZone;
use iso8601;
use iso8601::Date::YMD;
use serde_json::Value as JsonValue;

View File

@ -42,11 +42,11 @@ mod linear {
metric.accumulate_samples_signed(&glean, vec![50]);
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(val.sum(), 50);
assert_eq!(snapshot.sum, 50);
}
// Make a new Glean instance here, which should force reloading of the data from disk
@ -128,20 +128,19 @@ mod linear {
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6);
assert_eq!(val.count(), 3);
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, val.values()[&1]);
assert_eq!(1, val.values()[&2]);
assert_eq!(1, val.values()[&3]);
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
@ -175,20 +174,19 @@ mod linear {
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6);
assert_eq!(val.count(), 3);
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, val.values()[&1]);
assert_eq!(1, val.values()[&2]);
assert_eq!(1, val.values()[&3]);
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
@ -255,11 +253,11 @@ mod exponential {
metric.accumulate_samples_signed(&glean, vec![50]);
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(val.sum(), 50);
assert_eq!(snapshot.sum, 50);
}
// Make a new Glean instance here, which should force reloading of the data from disk
@ -341,20 +339,19 @@ mod exponential {
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6);
assert_eq!(val.count(), 3);
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, val.values()[&1]);
assert_eq!(1, val.values()[&2]);
assert_eq!(1, val.values()[&3]);
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
@ -388,20 +385,19 @@ mod exponential {
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6);
assert_eq!(val.count(), 3);
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, val.values()[&1]);
assert_eq!(1, val.values()[&2]);
assert_eq!(1, val.values()[&3]);
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(

View File

@ -180,6 +180,12 @@ fn test_sending_of_event_ping_when_it_fills_up() {
let (url, json) = &get_queued_pings(glean.get_data_path()).unwrap()[0];
assert!(url.starts_with(format!("/submit/{}/events/", glean.get_application_id()).as_str()));
assert_eq!(500, json["events"].as_array().unwrap().len());
assert_eq!(
"max_capacity",
json["ping_info"].as_object().unwrap()["reason"]
.as_str()
.unwrap()
);
for i in 0..500 {
let event = &json["events"].as_array().unwrap()[i];

View File

@ -46,11 +46,11 @@ fn serializer_should_correctly_serialize_memory_distribution() {
metric.accumulate(&glean, 100_000);
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(val.sum(), 100_000 * kb);
assert_eq!(snapshot.sum, 100_000 * kb);
}
// Make a new Glean instance here, which should force reloading of the data from disk
@ -126,22 +126,21 @@ fn the_accumulate_samples_api_correctly_stores_memory_values() {
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let kb = 1024;
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6 * kb);
assert_eq!(val.count(), 3);
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6 * kb);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, val.values()[&1023]);
assert_eq!(1, val.values()[&2047]);
assert_eq!(1, val.values()[&3024]);
assert_eq!(1, snapshot.values[&1023]);
assert_eq!(1, snapshot.values[&2047]);
assert_eq!(1, snapshot.values[&3024]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
@ -172,22 +171,21 @@ fn the_accumulate_samples_api_correctly_handles_negative_values() {
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let kb = 1024;
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6 * kb);
assert_eq!(val.count(), 3);
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6 * kb);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, val.values()[&1023]);
assert_eq!(1, val.values()[&2047]);
assert_eq!(1, val.values()[&3024]);
assert_eq!(1, snapshot.values[&1023]);
assert_eq!(1, snapshot.values[&2047]);
assert_eq!(1, snapshot.values[&3024]);
// 1 error should be reported.
assert_eq!(

View File

@ -5,8 +5,6 @@
mod common;
use crate::common::*;
use iso8601;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean, Lifetime};

View File

@ -49,11 +49,11 @@ fn serializer_should_correctly_serialize_timing_distribution() {
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, duration);
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(val.sum(), duration);
assert_eq!(snapshot.sum, duration);
}
// Make a new Glean instance here, which should force reloading of the data from disk
@ -167,22 +167,21 @@ fn the_accumulate_samples_api_correctly_stores_timing_values() {
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let seconds_to_nanos = 1000 * 1000 * 1000;
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6 * seconds_to_nanos);
assert_eq!(val.count(), 3);
assert_eq!(snapshot.sum, 6 * seconds_to_nanos);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = 1..=3`.
assert_eq!(1, val.values()[&984_625_593]);
assert_eq!(1, val.values()[&1_969_251_187]);
assert_eq!(1, val.values()[&2_784_941_737]);
assert_eq!(1, snapshot.values[&984_625_593]);
assert_eq!(1, snapshot.values[&1_969_251_187]);
assert_eq!(1, snapshot.values[&2_784_941_737]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
@ -213,18 +212,17 @@ fn the_accumulate_samples_api_correctly_handles_negative_values() {
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum(), 6);
assert_eq!(val.count(), 3);
assert_eq!(snapshot.sum, 6);
// We should get a sample in each of the first 3 buckets.
assert_eq!(1, val.values()[&1]);
assert_eq!(1, val.values()[&2]);
assert_eq!(1, val.values()[&3]);
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
@ -260,18 +258,17 @@ fn the_accumulate_samples_api_correctly_handles_overflowing_values() {
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [overflowing_val, 1, 2, 3].to_vec());
let val = metric
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Overflowing values are truncated to MAX_SAMPLE_TIME and recorded.
assert_eq!(val.sum(), MAX_SAMPLE_TIME + 6);
assert_eq!(val.count(), 4);
assert_eq!(snapshot.sum, MAX_SAMPLE_TIME + 6);
// We should get a sample in each of the first 3 buckets.
assert_eq!(1, val.values()[&1]);
assert_eq!(1, val.values()[&2]);
assert_eq!(1, val.values()[&3]);
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
@ -312,7 +309,7 @@ fn large_nanoseconds_values() {
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum() as u64, time);
assert_eq!(val.sum, time);
}
#[test]

View File

@ -6,7 +6,7 @@ edition = "2018"
license = "MPL-2.0"
[dependencies]
glean-core = "25.1.0"
glean-core = "30.0.0"
log = "0.4"
nserror = { path = "../../../xpcom/rust/nserror" }
nsstring = { path = "../../../xpcom/rust/nsstring" }

View File

@ -7,7 +7,7 @@ publish = false
[dependencies]
chrono = "0.4.10"
glean-core = "25.1.0"
glean-core = "30.0.0"
log = "0.4"
once_cell = "1.2.0"
uuid = { version = "0.8.1", features = ["v4"] }