mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-23 12:51:06 +00:00
Bug 1916645 - update neqo to v0.9.0 r=kershaw,necko-reviewers
Differential Revision: https://phabricator.services.mozilla.com/D221019
This commit is contained in:
parent
fd17a9b0c5
commit
bf284bbab1
@ -90,9 +90,9 @@ git = "https://github.com/mozilla/mp4parse-rust"
|
|||||||
rev = "a138e40ec1c603615873e524b5b22e11c0ec4820"
|
rev = "a138e40ec1c603615873e524b5b22e11c0ec4820"
|
||||||
replace-with = "vendored-sources"
|
replace-with = "vendored-sources"
|
||||||
|
|
||||||
[source."git+https://github.com/mozilla/neqo?tag=v0.8.2"]
|
[source."git+https://github.com/mozilla/neqo?tag=v0.9.0"]
|
||||||
git = "https://github.com/mozilla/neqo"
|
git = "https://github.com/mozilla/neqo"
|
||||||
tag = "v0.8.2"
|
tag = "v0.9.0"
|
||||||
replace-with = "vendored-sources"
|
replace-with = "vendored-sources"
|
||||||
|
|
||||||
[source."git+https://github.com/servo/unicode-bidi?rev=ca612daf1c08c53abe07327cb3e6ef6e0a760f0c"]
|
[source."git+https://github.com/servo/unicode-bidi?rev=ca612daf1c08c53abe07327cb3e6ef6e0a760f0c"]
|
||||||
|
31
Cargo.lock
generated
31
Cargo.lock
generated
@ -4045,8 +4045,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-bin"
|
name = "neqo-bin"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap",
|
"clap",
|
||||||
"clap-verbosity-flag",
|
"clap-verbosity-flag",
|
||||||
@ -4067,21 +4067,20 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-common"
|
name = "neqo-common"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"enum-map",
|
"enum-map",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"log",
|
"log",
|
||||||
"qlog",
|
"qlog",
|
||||||
"time 0.3.36",
|
"windows",
|
||||||
"winapi",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-crypto"
|
name = "neqo-crypto"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bindgen 0.69.4",
|
"bindgen 0.69.4",
|
||||||
"log",
|
"log",
|
||||||
@ -4095,8 +4094,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-http3"
|
name = "neqo-http3"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"enumset",
|
"enumset",
|
||||||
"log",
|
"log",
|
||||||
@ -4111,8 +4110,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-qpack"
|
name = "neqo-qpack"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"neqo-common",
|
"neqo-common",
|
||||||
@ -4123,8 +4122,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-transport"
|
name = "neqo-transport"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"enum-map",
|
"enum-map",
|
||||||
"indexmap 2.2.6",
|
"indexmap 2.2.6",
|
||||||
@ -4138,8 +4137,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "neqo-udp"
|
name = "neqo-udp"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"neqo-common",
|
"neqo-common",
|
||||||
|
@ -9,11 +9,11 @@ license = "MPL-2.0"
|
|||||||
name = "neqo_glue"
|
name = "neqo_glue"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
neqo-udp = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-udp = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-http3 = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-http3 = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-transport = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-transport = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-common = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-common = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-qpack = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-qpack = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
nserror = { path = "../../../xpcom/rust/nserror" }
|
nserror = { path = "../../../xpcom/rust/nserror" }
|
||||||
nsstring = { path = "../../../xpcom/rust/nsstring" }
|
nsstring = { path = "../../../xpcom/rust/nsstring" }
|
||||||
xpcom = { path = "../../../xpcom/rust/xpcom" }
|
xpcom = { path = "../../../xpcom/rust/xpcom" }
|
||||||
@ -28,7 +28,7 @@ uuid = { version = "1.0", features = ["v4"] }
|
|||||||
winapi = {version = "0.3", features = ["ws2def"] }
|
winapi = {version = "0.3", features = ["ws2def"] }
|
||||||
|
|
||||||
[dependencies.neqo-crypto]
|
[dependencies.neqo-crypto]
|
||||||
tag = "v0.8.2"
|
tag = "v0.9.0"
|
||||||
git = "https://github.com/mozilla/neqo"
|
git = "https://github.com/mozilla/neqo"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["gecko"]
|
features = ["gecko"]
|
||||||
|
@ -5,9 +5,7 @@
|
|||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
use libc::{AF_INET, AF_INET6};
|
use libc::{AF_INET, AF_INET6};
|
||||||
use neqo_common::event::Provider;
|
use neqo_common::event::Provider;
|
||||||
use neqo_common::{
|
use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qwarn, Datagram, Header, IpTos, Role};
|
||||||
self as common, qdebug, qerror, qlog::NeqoQlog, qwarn, Datagram, Header, IpTos, Role,
|
|
||||||
};
|
|
||||||
use neqo_crypto::{init, PRErrorCode};
|
use neqo_crypto::{init, PRErrorCode};
|
||||||
use neqo_http3::{
|
use neqo_http3::{
|
||||||
features::extended_connect::SessionCloseReason, Error as Http3Error, Http3Client,
|
features::extended_connect::SessionCloseReason, Error as Http3Error, Http3Client,
|
||||||
@ -19,14 +17,12 @@ use neqo_transport::{
|
|||||||
};
|
};
|
||||||
use nserror::*;
|
use nserror::*;
|
||||||
use nsstring::*;
|
use nsstring::*;
|
||||||
use qlog::streamer::QlogStreamer;
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::cmp::{max, min};
|
use std::cmp::{max, min};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::ffi::c_void;
|
use std::ffi::c_void;
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@ -282,34 +278,24 @@ impl NeqoHttp3Conn {
|
|||||||
|
|
||||||
if !qlog_dir.is_empty() {
|
if !qlog_dir.is_empty() {
|
||||||
let qlog_dir_conv = str::from_utf8(qlog_dir).map_err(|_| NS_ERROR_INVALID_ARG)?;
|
let qlog_dir_conv = str::from_utf8(qlog_dir).map_err(|_| NS_ERROR_INVALID_ARG)?;
|
||||||
let mut qlog_path = PathBuf::from(qlog_dir_conv);
|
let qlog_path = PathBuf::from(qlog_dir_conv);
|
||||||
qlog_path.push(format!("{}_{}.qlog", origin, Uuid::new_v4()));
|
|
||||||
|
|
||||||
// Emit warnings but to not return an error if qlog initialization
|
match NeqoQlog::enabled_with_file(
|
||||||
// fails.
|
qlog_path.clone(),
|
||||||
match OpenOptions::new()
|
Role::Client,
|
||||||
.write(true)
|
Some("Firefox Client qlog".to_string()),
|
||||||
.create(true)
|
Some("Firefox Client qlog".to_string()),
|
||||||
.truncate(true)
|
format!("{}_{}.qlog", origin, Uuid::new_v4()),
|
||||||
.open(&qlog_path)
|
) {
|
||||||
{
|
Ok(qlog) => conn.set_qlog(qlog),
|
||||||
Err(_) => qwarn!("Could not open qlog path: {}", qlog_path.display()),
|
Err(e) => {
|
||||||
Ok(f) => {
|
// Emit warnings but to not return an error if qlog initialization
|
||||||
let streamer = QlogStreamer::new(
|
// fails.
|
||||||
qlog::QLOG_VERSION.to_string(),
|
qwarn!(
|
||||||
Some("Firefox Client qlog".to_string()),
|
"failed to create NeqoQlog at {}: {}",
|
||||||
Some("Firefox Client qlog".to_string()),
|
qlog_path.display(),
|
||||||
None,
|
e
|
||||||
std::time::Instant::now(),
|
|
||||||
common::qlog::new_trace(Role::Client),
|
|
||||||
qlog::events::EventImportance::Base,
|
|
||||||
Box::new(f),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
match NeqoQlog::enabled(streamer, &qlog_path) {
|
|
||||||
Err(_) => qwarn!("Could not write to qlog path: {}", qlog_path.display()),
|
|
||||||
Ok(nq) => conn.set_qlog(nq),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,11 +6,11 @@ edition = "2018"
|
|||||||
license = "MPL-2.0"
|
license = "MPL-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
neqo-bin = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-bin = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-transport = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-transport = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-common = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-common = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-http3 = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-http3 = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
neqo-qpack = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
neqo-qpack = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||||
log = "0.4.0"
|
log = "0.4.0"
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
cfg-if = "1.0"
|
cfg-if = "1.0"
|
||||||
@ -20,7 +20,7 @@ tokio = { version = "1", features = ["rt-multi-thread"] }
|
|||||||
mozilla-central-workspace-hack = { version = "0.1", features = ["http3server"], optional = true }
|
mozilla-central-workspace-hack = { version = "0.1", features = ["http3server"], optional = true }
|
||||||
|
|
||||||
[dependencies.neqo-crypto]
|
[dependencies.neqo-crypto]
|
||||||
tag = "v0.8.2"
|
tag = "v0.9.0"
|
||||||
git = "https://github.com/mozilla/neqo"
|
git = "https://github.com/mozilla/neqo"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["gecko"]
|
features = ["gecko"]
|
||||||
|
@ -1 +1 @@
|
|||||||
{"files":{"Cargo.toml":"213791380401f74b5f2407818759035833dbbdcda76e35d791cd352651400f96","benches/main.rs":"aa39bf1f08863e3bace034a991c60a4723f1a7d30b3fc1d1f8c4d7f73bc748c3","src/bin/client.rs":"db77efd75dc0745b6dd983ab8fa3bc8f5f9111967f0d90d23cb19140a940246d","src/bin/server.rs":"2f7ab3c7a98117bd162e6fd07abef1d21791d1bb240db3aae61afa6ff72df83a","src/client/http09.rs":"868a55062e864e7c290e345e3049afbd49796ec3655259a681457540efa3650f","src/client/http3.rs":"7ffba6396ab5875cda5f3ab092d4cc34ab16adad30277b017bc667086d374d18","src/client/mod.rs":"3bf40a6dcc5fde24c823f55ee9d34a2e7d96d2d19980b234d3ec22e33771c14c","src/lib.rs":"e41fe10d5f45b4472ca97a8be531a6b959ec47f094cf2fad3f4f50954ce09046","src/server/http09.rs":"7b0b0459d2b71ecb1d4c93177304a8b7dc0a74dc4cb0a9875df18295ab04b271","src/server/http3.rs":"9d5361a724be1d0e234bbc4b3893a8830825e5886a24a40b96e3f87f35c7b968","src/server/mod.rs":"91f8cd6278c42eef20b6e16f3d903705073d741093bcdf161b58c01914aca2de","src/udp.rs":"81391238621282fae1efc4e5b28be7226733e1bfef7e790f21fb23395cb738bc"},"package":null}
|
{"files":{"Cargo.toml":"a11377f6773fd9dd49cbc5e434fa67a94b78556b8da45eb831bc3898fc388e8c","benches/main.rs":"aa39bf1f08863e3bace034a991c60a4723f1a7d30b3fc1d1f8c4d7f73bc748c3","src/bin/client.rs":"db77efd75dc0745b6dd983ab8fa3bc8f5f9111967f0d90d23cb19140a940246d","src/bin/server.rs":"2f7ab3c7a98117bd162e6fd07abef1d21791d1bb240db3aae61afa6ff72df83a","src/client/http09.rs":"1849b2ba103ad0e6b365aa63a272457d798d0635db2711e0a88496feb6336d5b","src/client/http3.rs":"de98fc88347b5216911c9536420e6557c50241267064c0f62b5b77789db62ffa","src/client/mod.rs":"6423e41fc351ae36868a165e9bca172aac9c08195f67ca91b692f0ca58979c95","src/lib.rs":"3264b53d5d9d99420dab92578572ac7c4b3ece747840c115d2a0db6a420d56e8","src/server/http09.rs":"9ffb0f62c6202a2914086b7e1d8ba77e016c1b4f4a9895b268a6312a04ad70e3","src/server/http3.rs":"0bdab101bffda37257360f9a968d32ff8884b40f292878f3dc27b055e0b5864b","src/server/mod.rs":"e1edfc71853f8b5be96287391919dc84d24191e865f7b9b4a38eebfda07ce453","src/udp.rs":"9042b73c20223e1c7b45d862dea9417fc367032db09dd05d48ca06ac33638435"},"package":null}
|
2
third_party/rust/neqo-bin/Cargo.toml
vendored
2
third_party/rust/neqo-bin/Cargo.toml
vendored
@ -16,7 +16,7 @@ test = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-bin"
|
name = "neqo-bin"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = false
|
build = false
|
||||||
autobins = false
|
autobins = false
|
||||||
|
39
third_party/rust/neqo-bin/src/client/http09.rs
vendored
39
third_party/rust/neqo-bin/src/client/http09.rs
vendored
@ -26,14 +26,27 @@ use neqo_transport::{
|
|||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
use super::{get_output_file, qlog_new, Args, CloseState, Res};
|
use super::{get_output_file, qlog_new, Args, CloseState, Res};
|
||||||
|
use crate::STREAM_IO_BUFFER_SIZE;
|
||||||
|
|
||||||
pub struct Handler<'a> {
|
pub struct Handler<'a> {
|
||||||
streams: HashMap<StreamId, Option<BufWriter<File>>>,
|
streams: HashMap<StreamId, Option<BufWriter<File>>>,
|
||||||
url_queue: VecDeque<Url>,
|
url_queue: VecDeque<Url>,
|
||||||
|
handled_urls: Vec<Url>,
|
||||||
all_paths: Vec<PathBuf>,
|
all_paths: Vec<PathBuf>,
|
||||||
args: &'a Args,
|
args: &'a Args,
|
||||||
token: Option<ResumptionToken>,
|
token: Option<ResumptionToken>,
|
||||||
needs_key_update: bool,
|
needs_key_update: bool,
|
||||||
|
read_buffer: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Handler<'a> {
|
||||||
|
fn reinit(&mut self) {
|
||||||
|
for url in self.handled_urls.drain(..) {
|
||||||
|
self.url_queue.push_front(url);
|
||||||
|
}
|
||||||
|
self.streams.clear();
|
||||||
|
self.all_paths.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> super::Handler for Handler<'a> {
|
impl<'a> super::Handler for Handler<'a> {
|
||||||
@ -78,6 +91,12 @@ impl<'a> super::Handler for Handler<'a> {
|
|||||||
qdebug!("{event:?}");
|
qdebug!("{event:?}");
|
||||||
self.download_urls(client);
|
self.download_urls(client);
|
||||||
}
|
}
|
||||||
|
ConnectionEvent::ZeroRttRejected => {
|
||||||
|
qdebug!("{event:?}");
|
||||||
|
// All 0-RTT data was rejected. We need to retransmit it.
|
||||||
|
self.reinit();
|
||||||
|
self.download_urls(client);
|
||||||
|
}
|
||||||
ConnectionEvent::ResumptionToken(token) => {
|
ConnectionEvent::ResumptionToken(token) => {
|
||||||
self.token = Some(token);
|
self.token = Some(token);
|
||||||
}
|
}
|
||||||
@ -92,10 +111,7 @@ impl<'a> super::Handler for Handler<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.args.resume && self.token.is_none() {
|
if self.args.resume && self.token.is_none() {
|
||||||
let Some(token) = client.take_resumption_token(Instant::now()) else {
|
self.token = client.take_resumption_token(Instant::now());
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
self.token = Some(token);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
@ -199,10 +215,12 @@ impl<'b> Handler<'b> {
|
|||||||
Self {
|
Self {
|
||||||
streams: HashMap::new(),
|
streams: HashMap::new(),
|
||||||
url_queue,
|
url_queue,
|
||||||
|
handled_urls: Vec::new(),
|
||||||
all_paths: Vec::new(),
|
all_paths: Vec::new(),
|
||||||
args,
|
args,
|
||||||
token: None,
|
token: None,
|
||||||
needs_key_update: args.key_update,
|
needs_key_update: args.key_update,
|
||||||
|
read_buffer: vec![0; STREAM_IO_BUFFER_SIZE],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,6 +257,7 @@ impl<'b> Handler<'b> {
|
|||||||
client.stream_close_send(client_stream_id).unwrap();
|
client.stream_close_send(client_stream_id).unwrap();
|
||||||
let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths);
|
let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths);
|
||||||
self.streams.insert(client_stream_id, out_file);
|
self.streams.insert(client_stream_id, out_file);
|
||||||
|
self.handled_urls.push(url);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => {
|
Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => {
|
||||||
@ -257,25 +276,26 @@ impl<'b> Handler<'b> {
|
|||||||
fn read_from_stream(
|
fn read_from_stream(
|
||||||
client: &mut Connection,
|
client: &mut Connection,
|
||||||
stream_id: StreamId,
|
stream_id: StreamId,
|
||||||
|
read_buffer: &mut [u8],
|
||||||
output_read_data: bool,
|
output_read_data: bool,
|
||||||
maybe_out_file: &mut Option<BufWriter<File>>,
|
maybe_out_file: &mut Option<BufWriter<File>>,
|
||||||
) -> Res<bool> {
|
) -> Res<bool> {
|
||||||
let mut data = vec![0; 4096];
|
|
||||||
loop {
|
loop {
|
||||||
let (sz, fin) = client.stream_recv(stream_id, &mut data)?;
|
let (sz, fin) = client.stream_recv(stream_id, read_buffer)?;
|
||||||
if sz == 0 {
|
if sz == 0 {
|
||||||
return Ok(fin);
|
return Ok(fin);
|
||||||
}
|
}
|
||||||
|
let read_buffer = &read_buffer[0..sz];
|
||||||
|
|
||||||
if let Some(out_file) = maybe_out_file {
|
if let Some(out_file) = maybe_out_file {
|
||||||
out_file.write_all(&data[..sz])?;
|
out_file.write_all(read_buffer)?;
|
||||||
} else if !output_read_data {
|
} else if !output_read_data {
|
||||||
qdebug!("READ[{stream_id}]: {sz} bytes");
|
qdebug!("READ[{stream_id}]: {} bytes", read_buffer.len());
|
||||||
} else {
|
} else {
|
||||||
qdebug!(
|
qdebug!(
|
||||||
"READ[{}]: {}",
|
"READ[{}]: {}",
|
||||||
stream_id,
|
stream_id,
|
||||||
String::from_utf8(data.clone()).unwrap()
|
std::str::from_utf8(read_buffer).unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if fin {
|
if fin {
|
||||||
@ -294,6 +314,7 @@ impl<'b> Handler<'b> {
|
|||||||
let fin_recvd = Self::read_from_stream(
|
let fin_recvd = Self::read_from_stream(
|
||||||
client,
|
client,
|
||||||
stream_id,
|
stream_id,
|
||||||
|
&mut self.read_buffer,
|
||||||
self.args.output_read_data,
|
self.args.output_read_data,
|
||||||
maybe_out_file,
|
maybe_out_file,
|
||||||
)?;
|
)?;
|
||||||
|
54
third_party/rust/neqo-bin/src/client/http3.rs
vendored
54
third_party/rust/neqo-bin/src/client/http3.rs
vendored
@ -28,18 +28,21 @@ use neqo_transport::{
|
|||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
use super::{get_output_file, qlog_new, Args, CloseState, Res};
|
use super::{get_output_file, qlog_new, Args, CloseState, Res};
|
||||||
|
use crate::STREAM_IO_BUFFER_SIZE;
|
||||||
|
|
||||||
pub struct Handler<'a> {
|
pub struct Handler<'a> {
|
||||||
#[allow(clippy::struct_field_names)]
|
#[allow(clippy::struct_field_names)]
|
||||||
url_handler: UrlHandler<'a>,
|
url_handler: UrlHandler<'a>,
|
||||||
token: Option<ResumptionToken>,
|
token: Option<ResumptionToken>,
|
||||||
output_read_data: bool,
|
output_read_data: bool,
|
||||||
|
read_buffer: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Handler<'a> {
|
impl<'a> Handler<'a> {
|
||||||
pub(crate) fn new(url_queue: VecDeque<Url>, args: &'a Args) -> Self {
|
pub(crate) fn new(url_queue: VecDeque<Url>, args: &'a Args) -> Self {
|
||||||
let url_handler = UrlHandler {
|
let url_handler = UrlHandler {
|
||||||
url_queue,
|
url_queue,
|
||||||
|
handled_urls: Vec::new(),
|
||||||
stream_handlers: HashMap::new(),
|
stream_handlers: HashMap::new(),
|
||||||
all_paths: Vec::new(),
|
all_paths: Vec::new(),
|
||||||
handler_type: if args.test.is_some() {
|
handler_type: if args.test.is_some() {
|
||||||
@ -54,6 +57,7 @@ impl<'a> Handler<'a> {
|
|||||||
url_handler,
|
url_handler,
|
||||||
token: None,
|
token: None,
|
||||||
output_read_data: args.output_read_data,
|
output_read_data: args.output_read_data,
|
||||||
|
read_buffer: vec![0; STREAM_IO_BUFFER_SIZE],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -151,6 +155,16 @@ impl super::Client for Http3Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> Handler<'a> {
|
||||||
|
fn reinit(&mut self) {
|
||||||
|
for url in self.url_handler.handled_urls.drain(..) {
|
||||||
|
self.url_handler.url_queue.push_front(url);
|
||||||
|
}
|
||||||
|
self.url_handler.stream_handlers.clear();
|
||||||
|
self.url_handler.all_paths.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a> super::Handler for Handler<'a> {
|
impl<'a> super::Handler for Handler<'a> {
|
||||||
type Client = Http3Client;
|
type Client = Http3Client;
|
||||||
|
|
||||||
@ -182,16 +196,14 @@ impl<'a> super::Handler for Handler<'a> {
|
|||||||
qwarn!("Data on unexpected stream: {stream_id}");
|
qwarn!("Data on unexpected stream: {stream_id}");
|
||||||
}
|
}
|
||||||
Some(handler) => loop {
|
Some(handler) => loop {
|
||||||
let mut data = vec![0; 4096];
|
|
||||||
let (sz, fin) = client
|
let (sz, fin) = client
|
||||||
.read_data(Instant::now(), stream_id, &mut data)
|
.read_data(Instant::now(), stream_id, &mut self.read_buffer)
|
||||||
.expect("Read should succeed");
|
.expect("Read should succeed");
|
||||||
|
|
||||||
handler.process_data_readable(
|
handler.process_data_readable(
|
||||||
stream_id,
|
stream_id,
|
||||||
fin,
|
fin,
|
||||||
data,
|
&self.read_buffer[..sz],
|
||||||
sz,
|
|
||||||
self.output_read_data,
|
self.output_read_data,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -222,6 +234,13 @@ impl<'a> super::Handler for Handler<'a> {
|
|||||||
}
|
}
|
||||||
Http3ClientEvent::StateChange(Http3State::Connected)
|
Http3ClientEvent::StateChange(Http3State::Connected)
|
||||||
| Http3ClientEvent::RequestsCreatable => {
|
| Http3ClientEvent::RequestsCreatable => {
|
||||||
|
qinfo!("{event:?}");
|
||||||
|
self.url_handler.process_urls(client);
|
||||||
|
}
|
||||||
|
Http3ClientEvent::ZeroRttRejected => {
|
||||||
|
qinfo!("{event:?}");
|
||||||
|
// All 0-RTT data was rejected. We need to retransmit it.
|
||||||
|
self.reinit();
|
||||||
self.url_handler.process_urls(client);
|
self.url_handler.process_urls(client);
|
||||||
}
|
}
|
||||||
Http3ClientEvent::ResumptionToken(t) => self.token = Some(t),
|
Http3ClientEvent::ResumptionToken(t) => self.token = Some(t),
|
||||||
@ -245,8 +264,7 @@ trait StreamHandler {
|
|||||||
&mut self,
|
&mut self,
|
||||||
stream_id: StreamId,
|
stream_id: StreamId,
|
||||||
fin: bool,
|
fin: bool,
|
||||||
data: Vec<u8>,
|
data: &[u8],
|
||||||
sz: usize,
|
|
||||||
output_read_data: bool,
|
output_read_data: bool,
|
||||||
) -> Res<bool>;
|
) -> Res<bool>;
|
||||||
fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId);
|
fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId);
|
||||||
@ -275,7 +293,7 @@ impl StreamHandlerType {
|
|||||||
Self::Upload => Box::new(UploadStreamHandler {
|
Self::Upload => Box::new(UploadStreamHandler {
|
||||||
data: vec![42; args.upload_size],
|
data: vec![42; args.upload_size],
|
||||||
offset: 0,
|
offset: 0,
|
||||||
chunk_size: 32768,
|
chunk_size: STREAM_IO_BUFFER_SIZE,
|
||||||
start: Instant::now(),
|
start: Instant::now(),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
@ -297,21 +315,20 @@ impl StreamHandler for DownloadStreamHandler {
|
|||||||
&mut self,
|
&mut self,
|
||||||
stream_id: StreamId,
|
stream_id: StreamId,
|
||||||
fin: bool,
|
fin: bool,
|
||||||
data: Vec<u8>,
|
data: &[u8],
|
||||||
sz: usize,
|
|
||||||
output_read_data: bool,
|
output_read_data: bool,
|
||||||
) -> Res<bool> {
|
) -> Res<bool> {
|
||||||
if let Some(out_file) = &mut self.out_file {
|
if let Some(out_file) = &mut self.out_file {
|
||||||
if sz > 0 {
|
if !data.is_empty() {
|
||||||
out_file.write_all(&data[..sz])?;
|
out_file.write_all(data)?;
|
||||||
}
|
}
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
} else if !output_read_data {
|
} else if !output_read_data {
|
||||||
qdebug!("READ[{stream_id}]: {sz} bytes");
|
qdebug!("READ[{stream_id}]: {} bytes", data.len());
|
||||||
} else if let Ok(txt) = String::from_utf8(data.clone()) {
|
} else if let Ok(txt) = std::str::from_utf8(data) {
|
||||||
qdebug!("READ[{stream_id}]: {txt}");
|
qdebug!("READ[{stream_id}]: {txt}");
|
||||||
} else {
|
} else {
|
||||||
qdebug!("READ[{}]: 0x{}", stream_id, hex(&data));
|
qdebug!("READ[{}]: 0x{}", stream_id, hex(data));
|
||||||
}
|
}
|
||||||
|
|
||||||
if fin {
|
if fin {
|
||||||
@ -344,11 +361,10 @@ impl StreamHandler for UploadStreamHandler {
|
|||||||
&mut self,
|
&mut self,
|
||||||
stream_id: StreamId,
|
stream_id: StreamId,
|
||||||
_fin: bool,
|
_fin: bool,
|
||||||
data: Vec<u8>,
|
data: &[u8],
|
||||||
_sz: usize,
|
|
||||||
_output_read_data: bool,
|
_output_read_data: bool,
|
||||||
) -> Res<bool> {
|
) -> Res<bool> {
|
||||||
if let Ok(txt) = String::from_utf8(data.clone()) {
|
if let Ok(txt) = std::str::from_utf8(data) {
|
||||||
let trimmed_txt = txt.trim_end_matches(char::from(0));
|
let trimmed_txt = txt.trim_end_matches(char::from(0));
|
||||||
let parsed: usize = trimmed_txt.parse().unwrap();
|
let parsed: usize = trimmed_txt.parse().unwrap();
|
||||||
if parsed == self.data.len() {
|
if parsed == self.data.len() {
|
||||||
@ -356,7 +372,7 @@ impl StreamHandler for UploadStreamHandler {
|
|||||||
qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}");
|
qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data));
|
panic!("Unexpected data [{}]: 0x{}", stream_id, hex(data));
|
||||||
}
|
}
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
@ -383,6 +399,7 @@ impl StreamHandler for UploadStreamHandler {
|
|||||||
|
|
||||||
struct UrlHandler<'a> {
|
struct UrlHandler<'a> {
|
||||||
url_queue: VecDeque<Url>,
|
url_queue: VecDeque<Url>,
|
||||||
|
handled_urls: Vec<Url>,
|
||||||
stream_handlers: HashMap<StreamId, Box<dyn StreamHandler>>,
|
stream_handlers: HashMap<StreamId, Box<dyn StreamHandler>>,
|
||||||
all_paths: Vec<PathBuf>,
|
all_paths: Vec<PathBuf>,
|
||||||
handler_type: StreamHandlerType,
|
handler_type: StreamHandlerType,
|
||||||
@ -432,6 +449,7 @@ impl<'a> UrlHandler<'a> {
|
|||||||
client_stream_id,
|
client_stream_id,
|
||||||
);
|
);
|
||||||
self.stream_handlers.insert(client_stream_id, handler);
|
self.stream_handlers.insert(client_stream_id, handler);
|
||||||
|
self.handled_urls.push(url);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
Err(
|
Err(
|
||||||
|
60
third_party/rust/neqo-bin/src/client/mod.rs
vendored
60
third_party/rust/neqo-bin/src/client/mod.rs
vendored
@ -23,14 +23,13 @@ use futures::{
|
|||||||
future::{select, Either},
|
future::{select, Either},
|
||||||
FutureExt, TryFutureExt,
|
FutureExt, TryFutureExt,
|
||||||
};
|
};
|
||||||
use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role};
|
use neqo_common::{qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role};
|
||||||
use neqo_crypto::{
|
use neqo_crypto::{
|
||||||
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
|
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
|
||||||
init, Cipher, ResumptionToken,
|
init, Cipher, ResumptionToken,
|
||||||
};
|
};
|
||||||
use neqo_http3::Output;
|
use neqo_http3::Output;
|
||||||
use neqo_transport::{AppError, CloseReason, ConnectionId, Version};
|
use neqo_transport::{AppError, CloseReason, ConnectionId, Version};
|
||||||
use qlog::{events::EventImportance, streamer::QlogStreamer};
|
|
||||||
use tokio::time::Sleep;
|
use tokio::time::Sleep;
|
||||||
use url::{Origin, Url};
|
use url::{Origin, Url};
|
||||||
|
|
||||||
@ -46,7 +45,7 @@ pub enum Error {
|
|||||||
ArgumentError(&'static str),
|
ArgumentError(&'static str),
|
||||||
Http3Error(neqo_http3::Error),
|
Http3Error(neqo_http3::Error),
|
||||||
IoError(io::Error),
|
IoError(io::Error),
|
||||||
QlogError,
|
QlogError(qlog::Error),
|
||||||
TransportError(neqo_transport::Error),
|
TransportError(neqo_transport::Error),
|
||||||
ApplicationError(neqo_transport::AppError),
|
ApplicationError(neqo_transport::AppError),
|
||||||
CryptoError(neqo_crypto::Error),
|
CryptoError(neqo_crypto::Error),
|
||||||
@ -71,8 +70,8 @@ impl From<neqo_http3::Error> for Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl From<qlog::Error> for Error {
|
impl From<qlog::Error> for Error {
|
||||||
fn from(_err: qlog::Error) -> Self {
|
fn from(err: qlog::Error) -> Self {
|
||||||
Self::QlogError
|
Self::QlogError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,7 +173,7 @@ pub struct Args {
|
|||||||
|
|
||||||
impl Args {
|
impl Args {
|
||||||
#[must_use]
|
#[must_use]
|
||||||
#[cfg(feature = "bench")]
|
#[cfg(any(test, feature = "bench"))]
|
||||||
#[allow(clippy::missing_panics_doc)]
|
#[allow(clippy::missing_panics_doc)]
|
||||||
pub fn new(requests: &[u64]) -> Self {
|
pub fn new(requests: &[u64]) -> Self {
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@ -253,6 +252,8 @@ impl Args {
|
|||||||
}
|
}
|
||||||
self.shared.use_old_http = true;
|
self.shared.use_old_http = true;
|
||||||
self.resume = true;
|
self.resume = true;
|
||||||
|
// PMTUD probes inflate what we sent in 1-RTT, causing QNS to fail the test.
|
||||||
|
self.shared.quic_parameters.no_pmtud = true;
|
||||||
}
|
}
|
||||||
"multiconnect" => {
|
"multiconnect" => {
|
||||||
self.shared.use_old_http = true;
|
self.shared.use_old_http = true;
|
||||||
@ -277,6 +278,11 @@ impl Args {
|
|||||||
_ => exit(127),
|
_ => exit(127),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "bench"))]
|
||||||
|
pub fn set_qlog_dir(&mut self, dir: PathBuf) {
|
||||||
|
self.shared.qlog_dir = Some(dir);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_output_file(
|
fn get_output_file(
|
||||||
@ -453,32 +459,26 @@ impl<'a, H: Handler> Runner<'a, H> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res<NeqoQlog> {
|
fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res<NeqoQlog> {
|
||||||
if let Some(qlog_dir) = &args.shared.qlog_dir {
|
let Some(qlog_dir) = args.shared.qlog_dir.clone() else {
|
||||||
let mut qlog_path = qlog_dir.clone();
|
return Ok(NeqoQlog::disabled());
|
||||||
let filename = format!("{hostname}-{cid}.sqlog");
|
};
|
||||||
qlog_path.push(filename);
|
|
||||||
|
|
||||||
let f = OpenOptions::new()
|
// hostname might be an IPv6 address, e.g. `[::1]`. `:` is an invalid
|
||||||
.write(true)
|
// Windows file name character.
|
||||||
.create(true)
|
#[cfg(windows)]
|
||||||
.truncate(true)
|
let hostname: String = hostname
|
||||||
.open(&qlog_path)?;
|
.chars()
|
||||||
|
.map(|c| if c == ':' { '_' } else { c })
|
||||||
|
.collect();
|
||||||
|
|
||||||
let streamer = QlogStreamer::new(
|
NeqoQlog::enabled_with_file(
|
||||||
qlog::QLOG_VERSION.to_string(),
|
qlog_dir,
|
||||||
Some("Example qlog".to_string()),
|
Role::Client,
|
||||||
Some("Example qlog description".to_string()),
|
Some("Example qlog".to_string()),
|
||||||
None,
|
Some("Example qlog description".to_string()),
|
||||||
std::time::Instant::now(),
|
format!("{hostname}-{cid}"),
|
||||||
common::qlog::new_trace(Role::Client),
|
)
|
||||||
EventImportance::Base,
|
.map_err(Error::QlogError)
|
||||||
Box::new(f),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(NeqoQlog::enabled(streamer, qlog_path)?)
|
|
||||||
} else {
|
|
||||||
Ok(NeqoQlog::disabled())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn client(mut args: Args) -> Res<()> {
|
pub async fn client(mut args: Args) -> Res<()> {
|
||||||
|
79
third_party/rust/neqo-bin/src/lib.rs
vendored
79
third_party/rust/neqo-bin/src/lib.rs
vendored
@ -24,6 +24,11 @@ pub mod client;
|
|||||||
pub mod server;
|
pub mod server;
|
||||||
pub mod udp;
|
pub mod udp;
|
||||||
|
|
||||||
|
/// Firefox default value
|
||||||
|
///
|
||||||
|
/// See `network.buffer.cache.size` pref <https://searchfox.org/mozilla-central/rev/f6e3b81aac49e602f06c204f9278da30993cdc8a/modules/libpref/init/all.js#3212>
|
||||||
|
const STREAM_IO_BUFFER_SIZE: usize = 32 * 1024;
|
||||||
|
|
||||||
#[derive(Debug, Parser)]
|
#[derive(Debug, Parser)]
|
||||||
pub struct SharedArgs {
|
pub struct SharedArgs {
|
||||||
#[command(flatten)]
|
#[command(flatten)]
|
||||||
@ -65,7 +70,7 @@ pub struct SharedArgs {
|
|||||||
pub quic_parameters: QuicParameters,
|
pub quic_parameters: QuicParameters,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bench")]
|
#[cfg(any(test, feature = "bench"))]
|
||||||
impl Default for SharedArgs {
|
impl Default for SharedArgs {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -132,7 +137,7 @@ pub struct QuicParameters {
|
|||||||
pub preferred_address_v6: Option<String>,
|
pub preferred_address_v6: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bench")]
|
#[cfg(any(test, feature = "bench"))]
|
||||||
impl Default for QuicParameters {
|
impl Default for QuicParameters {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -252,3 +257,73 @@ impl Display for Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::error::Error for Error {}
|
impl std::error::Error for Error {}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::{fs, path::PathBuf, str::FromStr, time::SystemTime};
|
||||||
|
|
||||||
|
use crate::{client, server};
|
||||||
|
|
||||||
|
struct TempDir {
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TempDir {
|
||||||
|
fn new() -> Self {
|
||||||
|
let mut dir = std::env::temp_dir();
|
||||||
|
dir.push(format!(
|
||||||
|
"neqo-bin-test-{}",
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs()
|
||||||
|
));
|
||||||
|
fs::create_dir(&dir).unwrap();
|
||||||
|
Self { path: dir }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path(&self) -> PathBuf {
|
||||||
|
self.path.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for TempDir {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if self.path.exists() {
|
||||||
|
fs::remove_dir_all(&self.path).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn write_qlog_file() {
|
||||||
|
neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap();
|
||||||
|
|
||||||
|
let temp_dir = TempDir::new();
|
||||||
|
|
||||||
|
let mut client_args = client::Args::new(&[1]);
|
||||||
|
client_args.set_qlog_dir(temp_dir.path());
|
||||||
|
let mut server_args = server::Args::default();
|
||||||
|
server_args.set_qlog_dir(temp_dir.path());
|
||||||
|
|
||||||
|
let client = client::client(client_args);
|
||||||
|
let server = Box::pin(server::server(server_args));
|
||||||
|
tokio::select! {
|
||||||
|
_ = client => {}
|
||||||
|
res = server => panic!("expect server not to terminate: {res:?}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Verify that the directory contains two non-empty files
|
||||||
|
let entries: Vec<_> = fs::read_dir(temp_dir.path())
|
||||||
|
.unwrap()
|
||||||
|
.filter_map(Result::ok)
|
||||||
|
.collect();
|
||||||
|
assert_eq!(entries.len(), 2, "expect 2 files in the directory");
|
||||||
|
|
||||||
|
for entry in entries {
|
||||||
|
let metadata = entry.metadata().unwrap();
|
||||||
|
assert!(metadata.is_file(), "expect a file, found something else");
|
||||||
|
assert!(metadata.len() > 0, "expect file not be empty");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
159
third_party/rust/neqo-bin/src/server/http09.rs
vendored
159
third_party/rust/neqo-bin/src/server/http09.rs
vendored
@ -4,7 +4,7 @@
|
|||||||
// option. This file may not be copied, modified, or distributed
|
// option. This file may not be copied, modified, or distributed
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc, time::Instant};
|
use std::{borrow::Cow, cell::RefCell, collections::HashMap, fmt::Display, rc::Rc, time::Instant};
|
||||||
|
|
||||||
use neqo_common::{event::Provider, hex, qdebug, qerror, qinfo, qwarn, Datagram};
|
use neqo_common::{event::Provider, hex, qdebug, qerror, qinfo, qwarn, Datagram};
|
||||||
use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay};
|
use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay};
|
||||||
@ -15,12 +15,13 @@ use neqo_transport::{
|
|||||||
};
|
};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
|
||||||
use super::{qns_read_response, Args};
|
use super::{qns_read_response, Args, ResponseData};
|
||||||
|
use crate::STREAM_IO_BUFFER_SIZE;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct HttpStreamState {
|
struct HttpStreamState {
|
||||||
writable: bool,
|
writable: bool,
|
||||||
data_to_send: Option<(Vec<u8>, usize)>,
|
data_to_send: Option<ResponseData>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct HttpServer {
|
pub struct HttpServer {
|
||||||
@ -29,6 +30,7 @@ pub struct HttpServer {
|
|||||||
read_state: HashMap<StreamId, Vec<u8>>,
|
read_state: HashMap<StreamId, Vec<u8>>,
|
||||||
is_qns_test: bool,
|
is_qns_test: bool,
|
||||||
regex: Regex,
|
regex: Regex,
|
||||||
|
read_buffer: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpServer {
|
impl HttpServer {
|
||||||
@ -72,6 +74,7 @@ impl HttpServer {
|
|||||||
} else {
|
} else {
|
||||||
Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap()
|
Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap()
|
||||||
},
|
},
|
||||||
|
read_buffer: vec![0; STREAM_IO_BUFFER_SIZE],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,11 +90,63 @@ impl HttpServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write(&mut self, stream_id: StreamId, data: Option<Vec<u8>>, conn: &ConnectionRef) {
|
fn stream_readable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||||
let resp = data.unwrap_or_else(|| Vec::from(&b"404 That request was nonsense\r\n"[..]));
|
if !stream_id.is_client_initiated() || !stream_id.is_bidi() {
|
||||||
|
qdebug!("Stream {} not client-initiated bidi, ignoring", stream_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let (sz, fin) = conn
|
||||||
|
.borrow_mut()
|
||||||
|
.stream_recv(stream_id, &mut self.read_buffer)
|
||||||
|
.expect("Read should succeed");
|
||||||
|
|
||||||
|
if sz == 0 {
|
||||||
|
if !fin {
|
||||||
|
qdebug!("size 0 but !fin");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let read_buffer = &self.read_buffer[..sz];
|
||||||
|
|
||||||
|
let buf = self.read_state.remove(&stream_id).map_or(
|
||||||
|
Cow::Borrowed(read_buffer),
|
||||||
|
|mut existing| {
|
||||||
|
existing.extend_from_slice(read_buffer);
|
||||||
|
Cow::Owned(existing)
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let Ok(msg) = std::str::from_utf8(&buf[..]) else {
|
||||||
|
self.save_partial(stream_id, buf.to_vec(), conn);
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let m = self.regex.captures(msg);
|
||||||
|
let Some(path) = m.and_then(|m| m.get(1)) else {
|
||||||
|
self.save_partial(stream_id, buf.to_vec(), conn);
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let resp: ResponseData = {
|
||||||
|
let path = path.as_str();
|
||||||
|
qdebug!("Path = '{path}'");
|
||||||
|
if self.is_qns_test {
|
||||||
|
match qns_read_response(path) {
|
||||||
|
Ok(data) => data.into(),
|
||||||
|
Err(e) => {
|
||||||
|
qerror!("Failed to read {path}: {e}");
|
||||||
|
b"404".to_vec().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let count = path.parse().unwrap();
|
||||||
|
ResponseData::zeroes(count)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(stream_state) = self.write_state.get_mut(&stream_id) {
|
if let Some(stream_state) = self.write_state.get_mut(&stream_id) {
|
||||||
match stream_state.data_to_send {
|
match stream_state.data_to_send {
|
||||||
None => stream_state.data_to_send = Some((resp, 0)),
|
None => stream_state.data_to_send = Some(resp),
|
||||||
Some(_) => {
|
Some(_) => {
|
||||||
qdebug!("Data already set, doing nothing");
|
qdebug!("Data already set, doing nothing");
|
||||||
}
|
}
|
||||||
@ -104,90 +159,26 @@ impl HttpServer {
|
|||||||
stream_id,
|
stream_id,
|
||||||
HttpStreamState {
|
HttpStreamState {
|
||||||
writable: false,
|
writable: false,
|
||||||
data_to_send: Some((resp, 0)),
|
data_to_send: Some(resp),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_readable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
|
||||||
if !stream_id.is_client_initiated() || !stream_id.is_bidi() {
|
|
||||||
qdebug!("Stream {} not client-initiated bidi, ignoring", stream_id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let mut data = vec![0; 4000];
|
|
||||||
let (sz, fin) = conn
|
|
||||||
.borrow_mut()
|
|
||||||
.stream_recv(stream_id, &mut data)
|
|
||||||
.expect("Read should succeed");
|
|
||||||
|
|
||||||
if sz == 0 {
|
|
||||||
if !fin {
|
|
||||||
qdebug!("size 0 but !fin");
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
data.truncate(sz);
|
|
||||||
let buf = if let Some(mut existing) = self.read_state.remove(&stream_id) {
|
|
||||||
existing.append(&mut data);
|
|
||||||
existing
|
|
||||||
} else {
|
|
||||||
data
|
|
||||||
};
|
|
||||||
|
|
||||||
let Ok(msg) = std::str::from_utf8(&buf[..]) else {
|
|
||||||
self.save_partial(stream_id, buf, conn);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let m = self.regex.captures(msg);
|
|
||||||
let Some(path) = m.and_then(|m| m.get(1)) else {
|
|
||||||
self.save_partial(stream_id, buf, conn);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let resp = {
|
|
||||||
let path = path.as_str();
|
|
||||||
qdebug!("Path = '{path}'");
|
|
||||||
if self.is_qns_test {
|
|
||||||
match qns_read_response(path) {
|
|
||||||
Ok(data) => Some(data),
|
|
||||||
Err(e) => {
|
|
||||||
qerror!("Failed to read {path}: {e}");
|
|
||||||
Some(b"404".to_vec())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let count = path.parse().unwrap();
|
|
||||||
Some(vec![b'a'; count])
|
|
||||||
}
|
|
||||||
};
|
|
||||||
self.write(stream_id, resp, conn);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stream_writable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
fn stream_writable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||||
match self.write_state.get_mut(&stream_id) {
|
let Some(stream_state) = self.write_state.get_mut(&stream_id) else {
|
||||||
None => {
|
qwarn!("Unknown stream {stream_id}, ignoring event");
|
||||||
qwarn!("Unknown stream {stream_id}, ignoring event");
|
return;
|
||||||
}
|
};
|
||||||
Some(stream_state) => {
|
|
||||||
stream_state.writable = true;
|
stream_state.writable = true;
|
||||||
if let Some((data, ref mut offset)) = &mut stream_state.data_to_send {
|
if let Some(resp) = &mut stream_state.data_to_send {
|
||||||
let sent = conn
|
resp.send_h09(stream_id, conn);
|
||||||
.borrow_mut()
|
if resp.done() {
|
||||||
.stream_send(stream_id, &data[*offset..])
|
conn.borrow_mut().stream_close_send(stream_id).unwrap();
|
||||||
.unwrap();
|
self.write_state.remove(&stream_id);
|
||||||
qdebug!("Wrote {}", sent);
|
} else {
|
||||||
*offset += sent;
|
stream_state.writable = false;
|
||||||
if *offset == data.len() {
|
|
||||||
qinfo!("Sent {sent} on {stream_id}, closing");
|
|
||||||
conn.borrow_mut().stream_close_send(stream_id).unwrap();
|
|
||||||
self.write_state.remove(&stream_id);
|
|
||||||
} else {
|
|
||||||
stream_state.writable = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
73
third_party/rust/neqo-bin/src/server/http3.rs
vendored
73
third_party/rust/neqo-bin/src/server/http3.rs
vendored
@ -5,23 +5,21 @@
|
|||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
|
||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
cmp::min,
|
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fmt::{self, Display},
|
fmt::{self, Display},
|
||||||
rc::Rc,
|
rc::Rc,
|
||||||
time::Instant,
|
time::Instant,
|
||||||
};
|
};
|
||||||
|
|
||||||
use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header};
|
use neqo_common::{hex, qdebug, qerror, qinfo, Datagram, Header};
|
||||||
use neqo_crypto::{generate_ech_keys, random, AntiReplay};
|
use neqo_crypto::{generate_ech_keys, random, AntiReplay};
|
||||||
use neqo_http3::{
|
use neqo_http3::{
|
||||||
Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId,
|
Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId,
|
||||||
};
|
};
|
||||||
use neqo_transport::{server::ValidateAddress, ConnectionIdGenerator};
|
use neqo_transport::{server::ValidateAddress, ConnectionIdGenerator};
|
||||||
|
|
||||||
use super::{qns_read_response, Args};
|
use super::{qns_read_response, Args, ResponseData};
|
||||||
|
|
||||||
pub struct HttpServer {
|
pub struct HttpServer {
|
||||||
server: Http3Server,
|
server: Http3Server,
|
||||||
@ -32,8 +30,6 @@ pub struct HttpServer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HttpServer {
|
impl HttpServer {
|
||||||
const MESSAGE: &'static [u8] = &[0; 4096];
|
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
args: &Args,
|
args: &Args,
|
||||||
anti_replay: AntiReplay,
|
anti_replay: AntiReplay,
|
||||||
@ -127,9 +123,9 @@ impl super::HttpServer for HttpServer {
|
|||||||
} else if let Ok(count) =
|
} else if let Ok(count) =
|
||||||
path.value().trim_matches(|p| p == '/').parse::<usize>()
|
path.value().trim_matches(|p| p == '/').parse::<usize>()
|
||||||
{
|
{
|
||||||
ResponseData::repeat(Self::MESSAGE, count)
|
ResponseData::zeroes(count)
|
||||||
} else {
|
} else {
|
||||||
ResponseData::from(Self::MESSAGE)
|
ResponseData::from(path.value())
|
||||||
};
|
};
|
||||||
|
|
||||||
stream
|
stream
|
||||||
@ -138,7 +134,7 @@ impl super::HttpServer for HttpServer {
|
|||||||
Header::new("content-length", response.remaining.to_string()),
|
Header::new("content-length", response.remaining.to_string()),
|
||||||
])
|
])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
response.send(&stream);
|
response.send_h3(&stream);
|
||||||
if response.done() {
|
if response.done() {
|
||||||
stream.stream_close_send().unwrap();
|
stream.stream_close_send().unwrap();
|
||||||
} else {
|
} else {
|
||||||
@ -148,7 +144,7 @@ impl super::HttpServer for HttpServer {
|
|||||||
Http3ServerEvent::DataWritable { stream } => {
|
Http3ServerEvent::DataWritable { stream } => {
|
||||||
if self.posts.get_mut(&stream).is_none() {
|
if self.posts.get_mut(&stream).is_none() {
|
||||||
if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) {
|
if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) {
|
||||||
remaining.send(&stream);
|
remaining.send_h3(&stream);
|
||||||
if remaining.done() {
|
if remaining.done() {
|
||||||
self.remaining_data.remove(&stream.stream_id());
|
self.remaining_data.remove(&stream.stream_id());
|
||||||
stream.stream_close_send().unwrap();
|
stream.stream_close_send().unwrap();
|
||||||
@ -181,60 +177,3 @@ impl super::HttpServer for HttpServer {
|
|||||||
self.server.has_events()
|
self.server.has_events()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ResponseData {
|
|
||||||
data: Cow<'static, [u8]>,
|
|
||||||
offset: usize,
|
|
||||||
remaining: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&[u8]> for ResponseData {
|
|
||||||
fn from(data: &[u8]) -> Self {
|
|
||||||
Self::from(data.to_vec())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Vec<u8>> for ResponseData {
|
|
||||||
fn from(data: Vec<u8>) -> Self {
|
|
||||||
let remaining = data.len();
|
|
||||||
Self {
|
|
||||||
data: Cow::Owned(data),
|
|
||||||
offset: 0,
|
|
||||||
remaining,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ResponseData {
|
|
||||||
const fn repeat(buf: &'static [u8], total: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
data: Cow::Borrowed(buf),
|
|
||||||
offset: 0,
|
|
||||||
remaining: total,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send(&mut self, stream: &Http3OrWebTransportStream) {
|
|
||||||
while self.remaining > 0 {
|
|
||||||
let end = min(self.data.len(), self.offset + self.remaining);
|
|
||||||
let slice = &self.data[self.offset..end];
|
|
||||||
match stream.send_data(slice) {
|
|
||||||
Ok(0) => {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Ok(sent) => {
|
|
||||||
self.remaining -= sent;
|
|
||||||
self.offset = (self.offset + sent) % self.data.len();
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
qwarn!("Error writing to stream {}: {:?}", stream, e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn done(&self) -> bool {
|
|
||||||
self.remaining == 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
100
third_party/rust/neqo-bin/src/server/mod.rs
vendored
100
third_party/rust/neqo-bin/src/server/mod.rs
vendored
@ -7,7 +7,9 @@
|
|||||||
#![allow(clippy::future_not_send)]
|
#![allow(clippy::future_not_send)]
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
|
cmp::min,
|
||||||
fmt::{self, Display},
|
fmt::{self, Display},
|
||||||
fs, io,
|
fs, io,
|
||||||
net::{SocketAddr, ToSocketAddrs},
|
net::{SocketAddr, ToSocketAddrs},
|
||||||
@ -28,10 +30,11 @@ use neqo_crypto::{
|
|||||||
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
|
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
|
||||||
init_db, AntiReplay, Cipher,
|
init_db, AntiReplay, Cipher,
|
||||||
};
|
};
|
||||||
use neqo_transport::{Output, RandomConnectionIdGenerator, Version};
|
use neqo_http3::{Http3OrWebTransportStream, StreamId};
|
||||||
|
use neqo_transport::{server::ConnectionRef, Output, RandomConnectionIdGenerator, Version};
|
||||||
use tokio::time::Sleep;
|
use tokio::time::Sleep;
|
||||||
|
|
||||||
use crate::SharedArgs;
|
use crate::{SharedArgs, STREAM_IO_BUFFER_SIZE};
|
||||||
|
|
||||||
const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10);
|
const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10);
|
||||||
|
|
||||||
@ -118,7 +121,7 @@ pub struct Args {
|
|||||||
ech: bool,
|
ech: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bench")]
|
#[cfg(any(test, feature = "bench"))]
|
||||||
impl Default for Args {
|
impl Default for Args {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@ -175,6 +178,11 @@ impl Args {
|
|||||||
Instant::now()
|
Instant::now()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "bench"))]
|
||||||
|
pub fn set_qlog_dir(&mut self, dir: PathBuf) {
|
||||||
|
self.shared.qlog_dir = Some(dir);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qns_read_response(filename: &str) -> Result<Vec<u8>, io::Error> {
|
fn qns_read_response(filename: &str) -> Result<Vec<u8>, io::Error> {
|
||||||
@ -390,3 +398,89 @@ pub async fn server(mut args: Args) -> Res<()> {
|
|||||||
.run()
|
.run()
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct ResponseData {
|
||||||
|
data: Cow<'static, [u8]>,
|
||||||
|
offset: usize,
|
||||||
|
remaining: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&[u8]> for ResponseData {
|
||||||
|
fn from(data: &[u8]) -> Self {
|
||||||
|
Self::from(data.to_vec())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Vec<u8>> for ResponseData {
|
||||||
|
fn from(data: Vec<u8>) -> Self {
|
||||||
|
let remaining = data.len();
|
||||||
|
Self {
|
||||||
|
data: Cow::Owned(data),
|
||||||
|
offset: 0,
|
||||||
|
remaining,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&str> for ResponseData {
|
||||||
|
fn from(data: &str) -> Self {
|
||||||
|
Self::from(data.as_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResponseData {
|
||||||
|
const fn zeroes(total: usize) -> Self {
|
||||||
|
const MESSAGE: &[u8] = &[0; STREAM_IO_BUFFER_SIZE];
|
||||||
|
Self {
|
||||||
|
data: Cow::Borrowed(MESSAGE),
|
||||||
|
offset: 0,
|
||||||
|
remaining: total,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slice(&self) -> &[u8] {
|
||||||
|
let end = min(self.data.len(), self.offset + self.remaining);
|
||||||
|
&self.data[self.offset..end]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_h3(&mut self, stream: &Http3OrWebTransportStream) {
|
||||||
|
while self.remaining > 0 {
|
||||||
|
match stream.send_data(self.slice()) {
|
||||||
|
Ok(0) => {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Ok(sent) => {
|
||||||
|
self.remaining -= sent;
|
||||||
|
self.offset = (self.offset + sent) % self.data.len();
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
qwarn!("Error writing to stream {}: {:?}", stream, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_h09(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||||
|
while self.remaining > 0 {
|
||||||
|
match conn
|
||||||
|
.borrow_mut()
|
||||||
|
.stream_send(stream_id, self.slice())
|
||||||
|
.unwrap()
|
||||||
|
{
|
||||||
|
0 => {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
sent => {
|
||||||
|
self.remaining -= sent;
|
||||||
|
self.offset = (self.offset + sent) % self.data.len();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const fn done(&self) -> bool {
|
||||||
|
self.remaining == 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
1
third_party/rust/neqo-bin/src/udp.rs
vendored
1
third_party/rust/neqo-bin/src/udp.rs
vendored
@ -9,6 +9,7 @@ use std::{io, net::SocketAddr};
|
|||||||
use neqo_common::Datagram;
|
use neqo_common::Datagram;
|
||||||
|
|
||||||
/// Ideally this would live in [`neqo-udp`]. [`neqo-udp`] is used in Firefox.
|
/// Ideally this would live in [`neqo-udp`]. [`neqo-udp`] is used in Firefox.
|
||||||
|
///
|
||||||
/// Firefox uses `cargo vet`. [`tokio`] the dependency of [`neqo-udp`] is not
|
/// Firefox uses `cargo vet`. [`tokio`] the dependency of [`neqo-udp`] is not
|
||||||
/// audited as `safe-to-deploy`. `cargo vet` will require `safe-to-deploy` for
|
/// audited as `safe-to-deploy`. `cargo vet` will require `safe-to-deploy` for
|
||||||
/// [`tokio`] even when behind a feature flag.
|
/// [`tokio`] even when behind a feature flag.
|
||||||
|
@ -1 +1 @@
|
|||||||
{"files":{"Cargo.toml":"3c2a56e78b593343b3d42f35bf87d0ea7cc628d2ab873ff6992c89336e0a44aa","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"549ee76e90898d37102bd4eabfce69a98aaec6862785eaeb4c9af57b7a36a655","src/datagram.rs":"2acecfcbecfbb767ea920e3b22388e67b31fcda776cae5b2d7ecbc67dd9febf7","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/fuzz.rs":"1ca74a34bdc97fedecf8a63c4a13cc487d1b2212398fb76f67792c822002138d","src/header.rs":"480a7848466249a78acddbf0bc0b4a096189abc14a89ad1a0943be571add2c2b","src/hrtime.rs":"93a544743f3994e5d4c494b313a9532ab5bd23541ff63a747cb377ad6d5edc72","src/incrdecoder.rs":"5c45034e61e75c76d2bca8b075c3e7a3cdd8af8c82b67c76283a2b08ab11846b","src/lib.rs":"2381fc00127a7eaf2265c3a13dc1e1d5843e048f3a8a1c97f1e6621c038de380","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"1cee4ff3bc9bf735a1bb913e1515ef240a70326a34c56a6ce89de02bc9f3459c","src/tos.rs":"28fd9acfce06f68ac6691efd2609618850182f77ef3717ce2db07bfac19a9396","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
|
{"files":{"Cargo.toml":"bbf1410d4b957a9cd30396819738865296b39a8c1b16aaaf1f81642039e1ff37","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"549ee76e90898d37102bd4eabfce69a98aaec6862785eaeb4c9af57b7a36a655","src/datagram.rs":"2acecfcbecfbb767ea920e3b22388e67b31fcda776cae5b2d7ecbc67dd9febf7","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/fuzz.rs":"1ca74a34bdc97fedecf8a63c4a13cc487d1b2212398fb76f67792c822002138d","src/header.rs":"480a7848466249a78acddbf0bc0b4a096189abc14a89ad1a0943be571add2c2b","src/hrtime.rs":"cbae4363ba64ff208d818d1a6ff0b42ec40a4e2b01b9cec224e57b4dc70c3830","src/incrdecoder.rs":"5c45034e61e75c76d2bca8b075c3e7a3cdd8af8c82b67c76283a2b08ab11846b","src/lib.rs":"2381fc00127a7eaf2265c3a13dc1e1d5843e048f3a8a1c97f1e6621c038de380","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"f53cb2a52dd7725c577d4e42065fb1c498ccc33dff0449b6889d9fbc1fdb96e2","src/tos.rs":"28fd9acfce06f68ac6691efd2609618850182f77ef3717ce2db07bfac19a9396","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
|
14
third_party/rust/neqo-common/Cargo.toml
vendored
14
third_party/rust/neqo-common/Cargo.toml
vendored
@ -17,7 +17,7 @@ bench = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-common"
|
name = "neqo-common"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
autobins = false
|
autobins = false
|
||||||
@ -73,11 +73,6 @@ default-features = false
|
|||||||
version = "0.13"
|
version = "0.13"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[dependencies.time]
|
|
||||||
version = "0.3"
|
|
||||||
features = ["formatting"]
|
|
||||||
default-features = false
|
|
||||||
|
|
||||||
[dev-dependencies.test-fixture]
|
[dev-dependencies.test-fixture]
|
||||||
path = "../test-fixture"
|
path = "../test-fixture"
|
||||||
|
|
||||||
@ -85,9 +80,10 @@ path = "../test-fixture"
|
|||||||
build-fuzzing-corpus = ["hex"]
|
build-fuzzing-corpus = ["hex"]
|
||||||
ci = []
|
ci = []
|
||||||
|
|
||||||
[target."cfg(windows)".dependencies.winapi]
|
[target."cfg(windows)".dependencies.windows]
|
||||||
version = "0.3"
|
version = "0.58"
|
||||||
features = ["timeapi"]
|
features = ["Win32_Media"]
|
||||||
|
default-features = false
|
||||||
|
|
||||||
[lints.clippy]
|
[lints.clippy]
|
||||||
multiple_crate_versions = "allow"
|
multiple_crate_versions = "allow"
|
||||||
|
12
third_party/rust/neqo-common/src/hrtime.rs
vendored
12
third_party/rust/neqo-common/src/hrtime.rs
vendored
@ -11,9 +11,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
use winapi::shared::minwindef::UINT;
|
use windows::Win32::Media::{timeBeginPeriod, timeEndPeriod};
|
||||||
#[cfg(windows)]
|
|
||||||
use winapi::um::timeapi::{timeBeginPeriod, timeEndPeriod};
|
|
||||||
|
|
||||||
/// A quantized `Duration`. This currently just produces 16 discrete values
|
/// A quantized `Duration`. This currently just produces 16 discrete values
|
||||||
/// corresponding to whole milliseconds. Future implementations might choose
|
/// corresponding to whole milliseconds. Future implementations might choose
|
||||||
@ -26,8 +24,8 @@ impl Period {
|
|||||||
const MIN: Self = Self(1);
|
const MIN: Self = Self(1);
|
||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
fn as_uint(self) -> UINT {
|
fn as_u32(self) -> u32 {
|
||||||
UINT::from(self.0)
|
u32::from(self.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(target_os = "macos")]
|
#[cfg(target_os = "macos")]
|
||||||
@ -299,7 +297,7 @@ impl Time {
|
|||||||
#[cfg(target_os = "windows")]
|
#[cfg(target_os = "windows")]
|
||||||
fn start(&self) {
|
fn start(&self) {
|
||||||
if let Some(p) = self.active {
|
if let Some(p) = self.active {
|
||||||
_ = unsafe { timeBeginPeriod(p.as_uint()) };
|
_ = unsafe { timeBeginPeriod(p.as_u32()) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +308,7 @@ impl Time {
|
|||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
fn stop(&self) {
|
fn stop(&self) {
|
||||||
if let Some(p) = self.active {
|
if let Some(p) = self.active {
|
||||||
_ = unsafe { timeEndPeriod(p.as_uint()) };
|
_ = unsafe { timeEndPeriod(p.as_u32()) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
62
third_party/rust/neqo-common/src/qlog.rs
vendored
62
third_party/rust/neqo-common/src/qlog.rs
vendored
@ -6,9 +6,12 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
fmt,
|
fmt::{self, Display},
|
||||||
path::{Path, PathBuf},
|
fs::OpenOptions,
|
||||||
|
io::BufWriter,
|
||||||
|
path::PathBuf,
|
||||||
rc::Rc,
|
rc::Rc,
|
||||||
|
time::SystemTime,
|
||||||
};
|
};
|
||||||
|
|
||||||
use qlog::{
|
use qlog::{
|
||||||
@ -29,21 +32,53 @@ pub struct NeqoQlogShared {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl NeqoQlog {
|
impl NeqoQlog {
|
||||||
|
/// Create an enabled `NeqoQlog` configuration backed by a file.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Will return `qlog::Error` if it cannot write to the new file.
|
||||||
|
pub fn enabled_with_file(
|
||||||
|
mut qlog_path: PathBuf,
|
||||||
|
role: Role,
|
||||||
|
title: Option<String>,
|
||||||
|
description: Option<String>,
|
||||||
|
file_prefix: impl Display,
|
||||||
|
) -> Result<Self, qlog::Error> {
|
||||||
|
qlog_path.push(format!("{file_prefix}.sqlog"));
|
||||||
|
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
// As a server, the original DCID is chosen by the client. Using
|
||||||
|
// create_new() prevents attackers from overwriting existing logs.
|
||||||
|
.create_new(true)
|
||||||
|
.open(&qlog_path)
|
||||||
|
.map_err(qlog::Error::IoError)?;
|
||||||
|
|
||||||
|
let streamer = QlogStreamer::new(
|
||||||
|
qlog::QLOG_VERSION.to_string(),
|
||||||
|
title,
|
||||||
|
description,
|
||||||
|
None,
|
||||||
|
std::time::Instant::now(),
|
||||||
|
new_trace(role),
|
||||||
|
qlog::events::EventImportance::Base,
|
||||||
|
Box::new(BufWriter::new(file)),
|
||||||
|
);
|
||||||
|
Self::enabled(streamer, qlog_path)
|
||||||
|
}
|
||||||
|
|
||||||
/// Create an enabled `NeqoQlog` configuration.
|
/// Create an enabled `NeqoQlog` configuration.
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// Will return `qlog::Error` if cannot write to the new log.
|
/// Will return `qlog::Error` if it cannot write to the new log.
|
||||||
pub fn enabled(
|
pub fn enabled(mut streamer: QlogStreamer, qlog_path: PathBuf) -> Result<Self, qlog::Error> {
|
||||||
mut streamer: QlogStreamer,
|
|
||||||
qlog_path: impl AsRef<Path>,
|
|
||||||
) -> Result<Self, qlog::Error> {
|
|
||||||
streamer.start_log()?;
|
streamer.start_log()?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
inner: Rc::new(RefCell::new(Some(NeqoQlogShared {
|
inner: Rc::new(RefCell::new(Some(NeqoQlogShared {
|
||||||
|
qlog_path,
|
||||||
streamer,
|
streamer,
|
||||||
qlog_path: qlog_path.as_ref().to_owned(),
|
|
||||||
}))),
|
}))),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -138,13 +173,10 @@ pub fn new_trace(role: Role) -> qlog::TraceSeq {
|
|||||||
common_fields: Some(CommonFields {
|
common_fields: Some(CommonFields {
|
||||||
group_id: None,
|
group_id: None,
|
||||||
protocol_type: None,
|
protocol_type: None,
|
||||||
reference_time: {
|
reference_time: SystemTime::now()
|
||||||
// It is better to allow this than deal with a conversion from i64 to f64.
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
// We can't do the obvious two-step conversion with f64::from(i32::try_from(...)),
|
.map(|d| d.as_secs_f64() * 1_000.0)
|
||||||
// because that overflows earlier than is ideal. This should be fine for a while.
|
.ok(),
|
||||||
#[allow(clippy::cast_precision_loss)]
|
|
||||||
Some(time::OffsetDateTime::now_utc().unix_timestamp() as f64)
|
|
||||||
},
|
|
||||||
time_format: Some("relative".to_string()),
|
time_format: Some("relative".to_string()),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -1 +1 @@
|
|||||||
{"files":{"Cargo.toml":"a4b882fb4d24557b4d365d13d83d46bba448648c834ab5bb488feb369be18188","bindings/bindings.toml":"0e06a03035a90ec5f823b30c8b78ec010a332ae0e5ed0c953da2e4c406451793","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"51cfa35860a4c1a0f16e3fc2e2540b02cd9bdf1598f0ca65b74cf4c02fca5be3","min_version.txt":"94ebbba5fc5de230ca467b7e316e9202e4a86c603b3a629cffd647859f48b730","src/aead.rs":"6410bcbe717a6b9ea6f11209b0888033358113ebc05b8a95cec1980d1360be4d","src/aead_null.rs":"81163fafef59bd2800bd0a078d53d0f05ee114f0e22165717823a5ff1cb908af","src/agent.rs":"607f8a648b2099e81750d3d4076a8ca485c79603011d6b0fb2a515aac400c514","src/agentio.rs":"22e63d5efefbff41113cf002a75bb08f15228cb83e9e2cba65eb6da52dad0264","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8e75e69ec3544474b21f8915a7559463889c2f608b201dee274a8d701880950e","src/constants.rs":"f5c779db128a8b0607841ca18c376971017eb327e102e5e6959a7d8effe4b3a6","src/ech.rs":"75dd192423e8996d9061da5e9c20d30bff5153b9344132eda4fe321c4c141870","src/err.rs":"2366501e0b48933a6a2e1c5b934aa55108c093729c84878b45e1e012e4e45d51","src/exp.rs":"d953873e87430b1c84d4a83c8eb3815041f5585b210bbaf59ae2c4d0057f5edd","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"8745ba761be821c1819cedf6dfd91f8b3148c6718053a4a74f33eb50c7d0cc40","src/hp.rs":"510a4a7f278203aa306ead05608f99397edc3806dc22b0af9e28c665b43ae56c","src/lib.rs":"db01ac68d002055bf12d940442c9b9195cc1331bb779571794eae6dc1223eef6","src/min_version.rs":"c6e1f98b9f56db0622ac38c1be131c55acf4a0f09ed0d6283f4d6308e2d1301a","src/p11.rs":"375397b18fcdf36dcdd22c164c8572dd83caf01b8d0065be3029444b197e1464","src/prio.rs":"5cf0105e78b1db43c65283208174abc3714a41dbb4d5cd80ac547a5a5a7c627c","src/replay.rs":"ad5be8e5d20cde477e7fa734000d880bc36d8288d4689e57332f212f65dde716","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"2c47935c5b8c42363897881eaa0c171e84cf031e57a6e1387b99327080e8dd60","src/selfencrypt.rs":"018c2dacabd3e463fdadd5707715b23c26c261c4c7d86e66c62f0acec986cad9","src/ssl.rs":"59bafcaed7caa66fe448339a1f75ce807ef92fc28247709df4f8058499b0787e","src/time.rs":"ade63a72ae90796d7fcccadbb15efc4594fcdb68913a914a657d4556fde88f62","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"cbd0011f1d33281883a45d433228221062424c94e86decade5697731c08a1c52","tests/ext.rs":"57af4e2df211fa8afdb73125d4344ef5c70c1ea4579107c3e6f5746308ee3e7b","tests/handshake.rs":"aa904736d36cc5d5cc0c4f6053b529987f33f944a73411bf08e01d30c4867186","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"ccda23018dac70b3ff3742afcb0fbae0735be9aeb36644a4ae2b1d7c9126801c","tests/init.rs":"3e15150c4b324c06ca5e8935618e4008da53dc0ef4b69325d150831e87dc0b63","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}
|
{"files":{"Cargo.toml":"01bffdf3b47044fe1916af7d766e224b535852433c16aae593731baf8baa20c3","bindings/bindings.toml":"0e06a03035a90ec5f823b30c8b78ec010a332ae0e5ed0c953da2e4c406451793","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"3618becbcf1d8d47fe681c13ff9fce070688c67db9d5203e6e8bc038e19a48fc","min_version.txt":"94ebbba5fc5de230ca467b7e316e9202e4a86c603b3a629cffd647859f48b730","src/aead.rs":"6410bcbe717a6b9ea6f11209b0888033358113ebc05b8a95cec1980d1360be4d","src/aead_null.rs":"81163fafef59bd2800bd0a078d53d0f05ee114f0e22165717823a5ff1cb908af","src/agent.rs":"d24f1a3df8300b93a1b606b2089bd758c9aa41c3a9e333089e6165b3449df94f","src/agentio.rs":"22e63d5efefbff41113cf002a75bb08f15228cb83e9e2cba65eb6da52dad0264","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8e75e69ec3544474b21f8915a7559463889c2f608b201dee274a8d701880950e","src/constants.rs":"f5c779db128a8b0607841ca18c376971017eb327e102e5e6959a7d8effe4b3a6","src/ech.rs":"75dd192423e8996d9061da5e9c20d30bff5153b9344132eda4fe321c4c141870","src/err.rs":"2366501e0b48933a6a2e1c5b934aa55108c093729c84878b45e1e012e4e45d51","src/exp.rs":"d953873e87430b1c84d4a83c8eb3815041f5585b210bbaf59ae2c4d0057f5edd","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"8745ba761be821c1819cedf6dfd91f8b3148c6718053a4a74f33eb50c7d0cc40","src/hp.rs":"510a4a7f278203aa306ead05608f99397edc3806dc22b0af9e28c665b43ae56c","src/lib.rs":"c8bd48f9d1d2ebbccfa1224047de3cc47f8bbd0f9fbc4fe073454d0288c66856","src/min_version.rs":"c6e1f98b9f56db0622ac38c1be131c55acf4a0f09ed0d6283f4d6308e2d1301a","src/p11.rs":"375397b18fcdf36dcdd22c164c8572dd83caf01b8d0065be3029444b197e1464","src/prio.rs":"5cf0105e78b1db43c65283208174abc3714a41dbb4d5cd80ac547a5a5a7c627c","src/replay.rs":"5cda39bc8fa8a07c493b761b8dfb5cbc9f669f97a2df7832a028ab366b3426be","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"2c47935c5b8c42363897881eaa0c171e84cf031e57a6e1387b99327080e8dd60","src/selfencrypt.rs":"018c2dacabd3e463fdadd5707715b23c26c261c4c7d86e66c62f0acec986cad9","src/ssl.rs":"59bafcaed7caa66fe448339a1f75ce807ef92fc28247709df4f8058499b0787e","src/time.rs":"ade63a72ae90796d7fcccadbb15efc4594fcdb68913a914a657d4556fde88f62","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"cbd0011f1d33281883a45d433228221062424c94e86decade5697731c08a1c52","tests/ext.rs":"57af4e2df211fa8afdb73125d4344ef5c70c1ea4579107c3e6f5746308ee3e7b","tests/handshake.rs":"aa904736d36cc5d5cc0c4f6053b529987f33f944a73411bf08e01d30c4867186","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"ccda23018dac70b3ff3742afcb0fbae0735be9aeb36644a4ae2b1d7c9126801c","tests/init.rs":"3e15150c4b324c06ca5e8935618e4008da53dc0ef4b69325d150831e87dc0b63","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}
|
2
third_party/rust/neqo-crypto/Cargo.toml
vendored
2
third_party/rust/neqo-crypto/Cargo.toml
vendored
@ -17,7 +17,7 @@ bench = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-crypto"
|
name = "neqo-crypto"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
autobins = false
|
autobins = false
|
||||||
|
2
third_party/rust/neqo-crypto/build.rs
vendored
2
third_party/rust/neqo-crypto/build.rs
vendored
@ -66,7 +66,7 @@ fn is_debug() -> bool {
|
|||||||
// Rather than download the 400Mb+ files, like gecko does, let's just reuse their work.
|
// Rather than download the 400Mb+ files, like gecko does, let's just reuse their work.
|
||||||
fn setup_clang() {
|
fn setup_clang() {
|
||||||
// If this isn't Windows, or we're in CI, then we don't need to do anything.
|
// If this isn't Windows, or we're in CI, then we don't need to do anything.
|
||||||
if env::consts::OS != "windows" || env::var("GITHUB_WORKFLOW").unwrap() == "CI" {
|
if env::consts::OS != "windows" || env::var("GITHUB_WORKFLOW").unwrap_or_default() == "CI" {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
println!("rerun-if-env-changed=LIBCLANG_PATH");
|
println!("rerun-if-env-changed=LIBCLANG_PATH");
|
||||||
|
4
third_party/rust/neqo-crypto/src/agent.rs
vendored
4
third_party/rust/neqo-crypto/src/agent.rs
vendored
@ -117,12 +117,12 @@ pub struct SecretAgentPreInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! preinfo_arg {
|
macro_rules! preinfo_arg {
|
||||||
($v:ident, $m:ident, $f:ident: $t:ident $(,)?) => {
|
($v:ident, $m:ident, $f:ident: $t:ty $(,)?) => {
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn $v(&self) -> Option<$t> {
|
pub fn $v(&self) -> Option<$t> {
|
||||||
match self.info.valuesSet & ssl::$m {
|
match self.info.valuesSet & ssl::$m {
|
||||||
0 => None,
|
0 => None,
|
||||||
_ => Some($t::try_from(self.info.$f).unwrap()),
|
_ => Some(<$t>::try_from(self.info.$f).unwrap()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
102
third_party/rust/neqo-crypto/src/lib.rs
vendored
102
third_party/rust/neqo-crypto/src/lib.rs
vendored
@ -91,10 +91,6 @@ impl Drop for NssLoaded {
|
|||||||
|
|
||||||
static INITIALIZED: OnceLock<Res<NssLoaded>> = OnceLock::new();
|
static INITIALIZED: OnceLock<Res<NssLoaded>> = OnceLock::new();
|
||||||
|
|
||||||
fn already_initialized() -> bool {
|
|
||||||
unsafe { nss::NSS_IsInitialized() != 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn version_check() -> Res<()> {
|
fn version_check() -> Res<()> {
|
||||||
let min_ver = CString::new(MINIMUM_NSS_VERSION)?;
|
let min_ver = CString::new(MINIMUM_NSS_VERSION)?;
|
||||||
if unsafe { nss::NSS_VersionCheck(min_ver.as_ptr()) } == 0 {
|
if unsafe { nss::NSS_VersionCheck(min_ver.as_ptr()) } == 0 {
|
||||||
@ -104,36 +100,6 @@ fn version_check() -> Res<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize NSS. This only executes the initialization routines once, so if there is any chance
|
|
||||||
/// that
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
///
|
|
||||||
/// When NSS initialization fails.
|
|
||||||
pub fn init() -> Res<()> {
|
|
||||||
// Set time zero.
|
|
||||||
time::init();
|
|
||||||
let res = INITIALIZED.get_or_init(|| {
|
|
||||||
version_check()?;
|
|
||||||
if already_initialized() {
|
|
||||||
return Ok(NssLoaded::External);
|
|
||||||
}
|
|
||||||
|
|
||||||
secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?;
|
|
||||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
|
||||||
secstatus_to_res(unsafe {
|
|
||||||
p11::NSS_SetAlgorithmPolicy(
|
|
||||||
p11::SECOidTag::SEC_OID_XYBER768D00,
|
|
||||||
p11::NSS_USE_ALG_IN_SSL_KX,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(NssLoaded::NoDb)
|
|
||||||
});
|
|
||||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This enables SSLTRACE by calling a simple, harmless function to trigger its
|
/// This enables SSLTRACE by calling a simple, harmless function to trigger its
|
||||||
/// side effects. SSLTRACE is not enabled in NSS until a socket is made or
|
/// side effects. SSLTRACE is not enabled in NSS until a socket is made or
|
||||||
/// global options are accessed. Reading an option is the least impact approach.
|
/// global options are accessed. Reading an option is the least impact approach.
|
||||||
@ -145,20 +111,15 @@ fn enable_ssl_trace() -> Res<()> {
|
|||||||
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) })
|
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize with a database.
|
fn init_once(db: Option<PathBuf>) -> Res<NssLoaded> {
|
||||||
///
|
// Set time zero.
|
||||||
/// # Errors
|
|
||||||
///
|
|
||||||
/// If NSS cannot be initialized.
|
|
||||||
pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
|
||||||
time::init();
|
time::init();
|
||||||
let res = INITIALIZED.get_or_init(|| {
|
version_check()?;
|
||||||
version_check()?;
|
if unsafe { nss::NSS_IsInitialized() != 0 } {
|
||||||
if already_initialized() {
|
return Ok(NssLoaded::External);
|
||||||
return Ok(NssLoaded::External);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let path = dir.into();
|
let state = if let Some(path) = db {
|
||||||
if !path.is_dir() {
|
if !path.is_dir() {
|
||||||
return Err(Error::InternalError);
|
return Err(Error::InternalError);
|
||||||
}
|
}
|
||||||
@ -175,23 +136,48 @@ pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
|
||||||
secstatus_to_res(unsafe {
|
|
||||||
p11::NSS_SetAlgorithmPolicy(
|
|
||||||
p11::SECOidTag::SEC_OID_XYBER768D00,
|
|
||||||
p11::NSS_USE_ALG_IN_SSL_KX,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
secstatus_to_res(unsafe {
|
secstatus_to_res(unsafe {
|
||||||
ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr())
|
ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr())
|
||||||
})?;
|
})?;
|
||||||
|
NssLoaded::Db
|
||||||
|
} else {
|
||||||
|
secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?;
|
||||||
|
NssLoaded::NoDb
|
||||||
|
};
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
||||||
enable_ssl_trace()?;
|
secstatus_to_res(unsafe {
|
||||||
|
p11::NSS_SetAlgorithmPolicy(
|
||||||
|
p11::SECOidTag::SEC_OID_XYBER768D00,
|
||||||
|
p11::NSS_USE_ALG_IN_SSL_KX,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(NssLoaded::Db)
|
#[cfg(debug_assertions)]
|
||||||
});
|
enable_ssl_trace()?;
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize NSS. This only executes the initialization routines once, so if there is any chance
|
||||||
|
/// that this is invoked twice, that's OK.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// When NSS initialization fails.
|
||||||
|
pub fn init() -> Res<()> {
|
||||||
|
let res = INITIALIZED.get_or_init(|| init_once(None));
|
||||||
|
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize with a database.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// If NSS cannot be initialized.
|
||||||
|
pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
||||||
|
let res = INITIALIZED.get_or_init(|| init_once(Some(dir.into())));
|
||||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
third_party/rust/neqo-crypto/src/replay.rs
vendored
1
third_party/rust/neqo-crypto/src/replay.rs
vendored
@ -40,6 +40,7 @@ scoped_ptr!(
|
|||||||
);
|
);
|
||||||
|
|
||||||
/// `AntiReplay` is used by servers when processing 0-RTT handshakes.
|
/// `AntiReplay` is used by servers when processing 0-RTT handshakes.
|
||||||
|
///
|
||||||
/// It limits the exposure of servers to replay attack by rejecting 0-RTT
|
/// It limits the exposure of servers to replay attack by rejecting 0-RTT
|
||||||
/// if it appears to be a replay. There is a false-positive rate that can be
|
/// if it appears to be a replay. There is a false-positive rate that can be
|
||||||
/// managed by tuning the parameters used to create the context.
|
/// managed by tuning the parameters used to create the context.
|
||||||
|
@ -1 +1 @@
|
|||||||
{"files":{"Cargo.toml":"db789a718ec09df778191371010b6530ac9ff3107454e88ef09300e02505adc9","src/buffered_send_stream.rs":"dfb248c66ea65418b0c7798c2ecaa3ed70ef1af818ef58d53ef742b3445077b7","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"7f0df52bceda1923aef2b7c5c64a532f49ea083ea45e3dcd5bd4b03031b89643","src/connection.rs":"0d7b2e529839fe6c6f7bcb6117dc8734f0dc5cce1dfb3e2541c9710488e1b753","src/connection_client.rs":"8d6d1518bee62519911dd2571e97d463d9e05cb13ec55bc1cf6f6712c920972e","src/connection_server.rs":"02fda7595a33c57d0b3ccede51a1e7a8c9073e1ec107ca1b56c56f1728db2318","src/control_stream_local.rs":"20917762c7e7c1112c56abf1cbaf0ad7f0eab97d8db9a3b10ff524315a235670","src/control_stream_remote.rs":"3729f67aa0681b1dbd4147063890f8440f27d82454776500ae964a17cda4d6b5","src/features/extended_connect/mod.rs":"cbeb2294eaf34f08a2c0d0fe4d3473aea9c65df6faaec9dc3ed29dcb577b1c3f","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"51d6f3828c44b438eb1776e8dcce531af520f28bc0d715807d3f53a0eaa071d1","src/features/extended_connect/tests/webtransport/mod.rs":"27f77213414089148e94067bfc54133945a971fd7ddd6936bbfeabb9badc7e67","src/features/extended_connect/tests/webtransport/negotiation.rs":"a22094dbaf0754d39ac8ac08fce1ae34ace108220b696c7d618567df56cddeec","src/features/extended_connect/tests/webtransport/sessions.rs":"cf8aa14087cc3ff42657d86ecacbd51bc182357fdcbd10f57d32784abb415a12","src/features/extended_connect/tests/webtransport/streams.rs":"4c136855292d5ba5169f41c18beea13e7f1e014a0acb13c565c872d3a80d6377","src/features/extended_connect/webtransport_session.rs":"da0b99092d8af8d4f7699c8d45e2e4057f4de38d6fa99e27e3a7feffa569374f","src/features/extended_connect/webtransport_streams.rs":"9855d77705acb7d21566333c4b297816e363be2ade14b8685fd1df4a4861cf74","src/features/mod.rs":"89056df3a868cb0037963c942fc27093cc16d84538ffca2d4759f9a6a6c74c7f","src/frames/hframe.rs":"72349bf4e9dd5c57dc5443bb9aa079887e2742dc08d77ea55567e3b09e0de4d8","src/frames/mod.rs":"0e6d49888d723b2c2c73df11020ceb88d9f062e9d4dc436eb38173e0b772d905","src/frames/reader.rs":"8c7ea836a466410bd3c98848b4852945ae30e1306f73290c401c686998bde16d","src/frames/tests/hframe.rs":"53941fd7656f5e424d499278e6d9ba93ce716f219e86fe6fa08c058ea92f8d7b","src/frames/tests/mod.rs":"c6bbf85fbc6cb9adf6115d315f0564317eefd83ff3177c93050844ad77f6e694","src/frames/tests/reader.rs":"9ee0d9cdd87b98da2b94e577bbcc2bfde6d72be5177bf02364188935f79cb36a","src/frames/tests/wtframe.rs":"c6598d24f5e12972f02de6e1394362671633982db637a07e1c0bb9b56d93ea2a","src/frames/wtframe.rs":"ad6dd63c54a0305c045cd983d5889ae86a5a1afe1e7c13e1c169de9af440759e","src/headers_checks.rs":"69964deb121721be01df7174c177543c161389295ce1450d348369279e312ba4","src/lib.rs":"3fb980eee46bee8dcb97ad9d55014555d8994a7a2d040ca223f2d28fe7d923ef","src/priority.rs":"946307329f31819d969093406ae5448f7923343ccc112221ea6eedf86cf447dc","src/push_controller.rs":"53f72e8043505f85cba0f9c16b4a5ce14d6668b030d773067bc88b2a10bdd25b","src/qlog.rs":"db5f2dd6566d44b4f0541f75266b417b558c09e62141f056885cb8c66478a932","src/qpack_decoder_receiver.rs":"eb06c4be59da567fef70c20daa2c0f165c768131165479a210e69659f168b88f","src/qpack_encoder_receiver.rs":"831f3da9ec17966286786ba3f2c723395a132e65d6a33b4ec341fe7640c1a53d","src/recv_message.rs":"c3acf0544680f88ccd3500e6bea949c1bb43e2fb0a8922edc8f837d0166c89f8","src/request_target.rs":"9720b9f87d66a7c2301bba7de5a5a9300f547613a63153a4d35c7a7506a59b31","src/send_message.rs":"be4e9f64db2c25eb7176b84695e608e768115d62e615d389a33d26f7cd5b0c6c","src/server.rs":"8d48376abf36d036f51a84cddcc3d5acd56786b181fba0e24449e1417b030d63","src/server_connection_events.rs":"1396baab265a814045ccfe63d637a4fdc32a667b5eb2925fa4951f5c3078fb20","src/server_events.rs":"02fc8c0711efd758fb1ddee27d257c12ed35e2a989e7bf3de44bd662dc8234e3","src/settings.rs":"d0f8c546e70161422a029a40564b9e9b953fe671c60835196b16f3364779eaf9","src/stream_type_reader.rs":"0bc91ee4c2a516053cd2b55a60f9bd8e62008cde94274e281224cdffe352a907","tests/httpconn.rs":"87c32197258711d916cace23ed850c5bf0198f5e32756c68a32d91206b6e6db8","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"cdf7028eb64f8f3778c3bbb2a10e9482c4e995e9e1813143ccd83ec96b2d4b6a","tests/webtransport.rs":"02b81be0a20252a8bb0796b5287e426c1af5ddaf5a47d68aa9165393cba83c45"},"package":null}
|
{"files":{"Cargo.toml":"54df05103756645fe2a74baa9aae7ddc59cc782afe3f021f74c05e0e532352b8","src/buffered_send_stream.rs":"dfb248c66ea65418b0c7798c2ecaa3ed70ef1af818ef58d53ef742b3445077b7","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"7f0df52bceda1923aef2b7c5c64a532f49ea083ea45e3dcd5bd4b03031b89643","src/connection.rs":"1bf52ac3f3714f5bb2b1237fdb7b026ee4a2183f8f173120661f46213f8c5daa","src/connection_client.rs":"ec979c1ed03002ec2095aab40986089d6c2b0eda541566e0023424a0a896b687","src/connection_server.rs":"cf4da2cdd823e31d2352e45de84d366c45bd3d8adf38c9151a84d808bda80209","src/control_stream_local.rs":"20917762c7e7c1112c56abf1cbaf0ad7f0eab97d8db9a3b10ff524315a235670","src/control_stream_remote.rs":"3729f67aa0681b1dbd4147063890f8440f27d82454776500ae964a17cda4d6b5","src/features/extended_connect/mod.rs":"cbeb2294eaf34f08a2c0d0fe4d3473aea9c65df6faaec9dc3ed29dcb577b1c3f","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"51d6f3828c44b438eb1776e8dcce531af520f28bc0d715807d3f53a0eaa071d1","src/features/extended_connect/tests/webtransport/mod.rs":"27f77213414089148e94067bfc54133945a971fd7ddd6936bbfeabb9badc7e67","src/features/extended_connect/tests/webtransport/negotiation.rs":"a22094dbaf0754d39ac8ac08fce1ae34ace108220b696c7d618567df56cddeec","src/features/extended_connect/tests/webtransport/sessions.rs":"cf8aa14087cc3ff42657d86ecacbd51bc182357fdcbd10f57d32784abb415a12","src/features/extended_connect/tests/webtransport/streams.rs":"4c136855292d5ba5169f41c18beea13e7f1e014a0acb13c565c872d3a80d6377","src/features/extended_connect/webtransport_session.rs":"a55876a7ba1de47950f4209cfaa0e04ddbc54fb4109d0133f8e6e6b150971563","src/features/extended_connect/webtransport_streams.rs":"9855d77705acb7d21566333c4b297816e363be2ade14b8685fd1df4a4861cf74","src/features/mod.rs":"89056df3a868cb0037963c942fc27093cc16d84538ffca2d4759f9a6a6c74c7f","src/frames/hframe.rs":"de2c3d1a9205b0459fe676d7d5e1c0e463d3c1dd9e5f518a07b2e4ebbe66e3ec","src/frames/mod.rs":"0e6d49888d723b2c2c73df11020ceb88d9f062e9d4dc436eb38173e0b772d905","src/frames/reader.rs":"36e113164995fbd202c5024b51230c12fa7134b1759170abfd4fc1b4e7f5a5da","src/frames/tests/hframe.rs":"53941fd7656f5e424d499278e6d9ba93ce716f219e86fe6fa08c058ea92f8d7b","src/frames/tests/mod.rs":"c6bbf85fbc6cb9adf6115d315f0564317eefd83ff3177c93050844ad77f6e694","src/frames/tests/reader.rs":"9ee0d9cdd87b98da2b94e577bbcc2bfde6d72be5177bf02364188935f79cb36a","src/frames/tests/wtframe.rs":"c6598d24f5e12972f02de6e1394362671633982db637a07e1c0bb9b56d93ea2a","src/frames/wtframe.rs":"0f0366e590f7409580459e8a8b86fc48308ca7585837dddd7c319581a9a5a972","src/headers_checks.rs":"69964deb121721be01df7174c177543c161389295ce1450d348369279e312ba4","src/lib.rs":"3fb980eee46bee8dcb97ad9d55014555d8994a7a2d040ca223f2d28fe7d923ef","src/priority.rs":"946307329f31819d969093406ae5448f7923343ccc112221ea6eedf86cf447dc","src/push_controller.rs":"53f72e8043505f85cba0f9c16b4a5ce14d6668b030d773067bc88b2a10bdd25b","src/qlog.rs":"db5f2dd6566d44b4f0541f75266b417b558c09e62141f056885cb8c66478a932","src/qpack_decoder_receiver.rs":"eb06c4be59da567fef70c20daa2c0f165c768131165479a210e69659f168b88f","src/qpack_encoder_receiver.rs":"831f3da9ec17966286786ba3f2c723395a132e65d6a33b4ec341fe7640c1a53d","src/recv_message.rs":"8b2fb49850560b32dcdd7a90933361ef7d61bc42daad3f2952462913d49e8787","src/request_target.rs":"9720b9f87d66a7c2301bba7de5a5a9300f547613a63153a4d35c7a7506a59b31","src/send_message.rs":"be4e9f64db2c25eb7176b84695e608e768115d62e615d389a33d26f7cd5b0c6c","src/server.rs":"8d48376abf36d036f51a84cddcc3d5acd56786b181fba0e24449e1417b030d63","src/server_connection_events.rs":"1396baab265a814045ccfe63d637a4fdc32a667b5eb2925fa4951f5c3078fb20","src/server_events.rs":"02fc8c0711efd758fb1ddee27d257c12ed35e2a989e7bf3de44bd662dc8234e3","src/settings.rs":"d0f8c546e70161422a029a40564b9e9b953fe671c60835196b16f3364779eaf9","src/stream_type_reader.rs":"4e79202e7f1415165fe4eb88b9af67cbb8f85a13d68a577249c397fd5a78dbfb","tests/httpconn.rs":"87c32197258711d916cace23ed850c5bf0198f5e32756c68a32d91206b6e6db8","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"cdf7028eb64f8f3778c3bbb2a10e9482c4e995e9e1813143ccd83ec96b2d4b6a","tests/webtransport.rs":"02b81be0a20252a8bb0796b5287e426c1af5ddaf5a47d68aa9165393cba83c45"},"package":null}
|
2
third_party/rust/neqo-http3/Cargo.toml
vendored
2
third_party/rust/neqo-http3/Cargo.toml
vendored
@ -17,7 +17,7 @@ bench = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-http3"
|
name = "neqo-http3"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = false
|
build = false
|
||||||
autobins = false
|
autobins = false
|
||||||
|
14
third_party/rust/neqo-http3/src/connection.rs
vendored
14
third_party/rust/neqo-http3/src/connection.rs
vendored
@ -533,7 +533,9 @@ impl Http3Connection {
|
|||||||
Ok(ReceiveOutput::ControlFrames(rest))
|
Ok(ReceiveOutput::ControlFrames(rest))
|
||||||
}
|
}
|
||||||
ReceiveOutput::NewStream(
|
ReceiveOutput::NewStream(
|
||||||
NewStreamType::Push(_) | NewStreamType::Http | NewStreamType::WebTransportStream(_),
|
NewStreamType::Push(_)
|
||||||
|
| NewStreamType::Http(_)
|
||||||
|
| NewStreamType::WebTransportStream(_),
|
||||||
) => Ok(output),
|
) => Ok(output),
|
||||||
ReceiveOutput::NewStream(_) => {
|
ReceiveOutput::NewStream(_) => {
|
||||||
unreachable!("NewStream should have been handled already")
|
unreachable!("NewStream should have been handled already")
|
||||||
@ -723,7 +725,7 @@ impl Http3Connection {
|
|||||||
)),
|
)),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
NewStreamType::Http => {
|
NewStreamType::Http(_) => {
|
||||||
qinfo!([self], "A new http stream {}.", stream_id);
|
qinfo!([self], "A new http stream {}.", stream_id);
|
||||||
}
|
}
|
||||||
NewStreamType::WebTransportStream(session_id) => {
|
NewStreamType::WebTransportStream(session_id) => {
|
||||||
@ -755,9 +757,9 @@ impl Http3Connection {
|
|||||||
NewStreamType::Control | NewStreamType::Decoder | NewStreamType::Encoder => {
|
NewStreamType::Control | NewStreamType::Decoder | NewStreamType::Encoder => {
|
||||||
self.stream_receive(conn, stream_id)
|
self.stream_receive(conn, stream_id)
|
||||||
}
|
}
|
||||||
NewStreamType::Push(_) | NewStreamType::Http | NewStreamType::WebTransportStream(_) => {
|
NewStreamType::Push(_)
|
||||||
Ok(ReceiveOutput::NewStream(stream_type))
|
| NewStreamType::Http(_)
|
||||||
}
|
| NewStreamType::WebTransportStream(_) => Ok(ReceiveOutput::NewStream(stream_type)),
|
||||||
NewStreamType::Unknown => Ok(ReceiveOutput::NoOutput),
|
NewStreamType::Unknown => Ok(ReceiveOutput::NoOutput),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -919,7 +921,7 @@ impl Http3Connection {
|
|||||||
message_type: MessageType::Response,
|
message_type: MessageType::Response,
|
||||||
stream_type,
|
stream_type,
|
||||||
stream_id,
|
stream_id,
|
||||||
header_frame_type_read: false,
|
first_frame_type: None,
|
||||||
},
|
},
|
||||||
Rc::clone(&self.qpack_decoder),
|
Rc::clone(&self.qpack_decoder),
|
||||||
recv_events,
|
recv_events,
|
||||||
|
@ -1094,7 +1094,7 @@ impl Http3Client {
|
|||||||
ReceiveOutput::NewStream(NewStreamType::Push(push_id)) => {
|
ReceiveOutput::NewStream(NewStreamType::Push(push_id)) => {
|
||||||
self.handle_new_push_stream(stream_id, push_id)
|
self.handle_new_push_stream(stream_id, push_id)
|
||||||
}
|
}
|
||||||
ReceiveOutput::NewStream(NewStreamType::Http) => Err(Error::HttpStreamCreation),
|
ReceiveOutput::NewStream(NewStreamType::Http(_)) => Err(Error::HttpStreamCreation),
|
||||||
ReceiveOutput::NewStream(NewStreamType::WebTransportStream(session_id)) => {
|
ReceiveOutput::NewStream(NewStreamType::WebTransportStream(session_id)) => {
|
||||||
self.base_handler.webtransport_create_stream_remote(
|
self.base_handler.webtransport_create_stream_remote(
|
||||||
StreamId::from(session_id),
|
StreamId::from(session_id),
|
||||||
@ -1162,7 +1162,7 @@ impl Http3Client {
|
|||||||
message_type: MessageType::Response,
|
message_type: MessageType::Response,
|
||||||
stream_type: Http3StreamType::Push,
|
stream_type: Http3StreamType::Push,
|
||||||
stream_id,
|
stream_id,
|
||||||
header_frame_type_read: false,
|
first_frame_type: None,
|
||||||
},
|
},
|
||||||
Rc::clone(&self.base_handler.qpack_decoder),
|
Rc::clone(&self.base_handler.qpack_decoder),
|
||||||
Box::new(RecvPushEvents::new(push_id, Rc::clone(&self.push_handler))),
|
Box::new(RecvPushEvents::new(push_id, Rc::clone(&self.push_handler))),
|
||||||
|
@ -318,7 +318,7 @@ impl Http3ServerHandler {
|
|||||||
fn handle_stream_readable(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> {
|
fn handle_stream_readable(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> {
|
||||||
match self.base_handler.handle_stream_readable(conn, stream_id)? {
|
match self.base_handler.handle_stream_readable(conn, stream_id)? {
|
||||||
ReceiveOutput::NewStream(NewStreamType::Push(_)) => Err(Error::HttpStreamCreation),
|
ReceiveOutput::NewStream(NewStreamType::Push(_)) => Err(Error::HttpStreamCreation),
|
||||||
ReceiveOutput::NewStream(NewStreamType::Http) => {
|
ReceiveOutput::NewStream(NewStreamType::Http(first_frame_type)) => {
|
||||||
self.base_handler.add_streams(
|
self.base_handler.add_streams(
|
||||||
stream_id,
|
stream_id,
|
||||||
Box::new(SendMessage::new(
|
Box::new(SendMessage::new(
|
||||||
@ -333,7 +333,7 @@ impl Http3ServerHandler {
|
|||||||
message_type: MessageType::Request,
|
message_type: MessageType::Request,
|
||||||
stream_type: Http3StreamType::Http,
|
stream_type: Http3StreamType::Http,
|
||||||
stream_id,
|
stream_id,
|
||||||
header_frame_type_read: true,
|
first_frame_type: Some(first_frame_type),
|
||||||
},
|
},
|
||||||
Rc::clone(&self.base_handler.qpack_decoder),
|
Rc::clone(&self.base_handler.qpack_decoder),
|
||||||
Box::new(self.events.clone()),
|
Box::new(self.events.clone()),
|
||||||
|
@ -70,7 +70,7 @@ impl WebTransportSession {
|
|||||||
message_type: MessageType::Response,
|
message_type: MessageType::Response,
|
||||||
stream_type: Http3StreamType::ExtendedConnect,
|
stream_type: Http3StreamType::ExtendedConnect,
|
||||||
stream_id: session_id,
|
stream_id: session_id,
|
||||||
header_frame_type_read: false,
|
first_frame_type: None,
|
||||||
},
|
},
|
||||||
qpack_decoder,
|
qpack_decoder,
|
||||||
Box::new(stream_event_listener.clone()),
|
Box::new(stream_event_listener.clone()),
|
||||||
|
44
third_party/rust/neqo-http3/src/frames/hframe.rs
vendored
44
third_party/rust/neqo-http3/src/frames/hframe.rs
vendored
@ -12,19 +12,31 @@ use neqo_transport::StreamId;
|
|||||||
|
|
||||||
use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res};
|
use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res};
|
||||||
|
|
||||||
pub type HFrameType = u64;
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub struct HFrameType(pub u64);
|
||||||
|
|
||||||
pub const H3_FRAME_TYPE_DATA: HFrameType = 0x0;
|
pub const H3_FRAME_TYPE_DATA: HFrameType = HFrameType(0x0);
|
||||||
pub const H3_FRAME_TYPE_HEADERS: HFrameType = 0x1;
|
pub const H3_FRAME_TYPE_HEADERS: HFrameType = HFrameType(0x1);
|
||||||
pub const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = 0x3;
|
pub const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = HFrameType(0x3);
|
||||||
pub const H3_FRAME_TYPE_SETTINGS: HFrameType = 0x4;
|
pub const H3_FRAME_TYPE_SETTINGS: HFrameType = HFrameType(0x4);
|
||||||
pub const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = 0x5;
|
pub const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = HFrameType(0x5);
|
||||||
pub const H3_FRAME_TYPE_GOAWAY: HFrameType = 0x7;
|
pub const H3_FRAME_TYPE_GOAWAY: HFrameType = HFrameType(0x7);
|
||||||
pub const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = 0xd;
|
pub const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = HFrameType(0xd);
|
||||||
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: HFrameType = 0xf0700;
|
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: HFrameType = HFrameType(0xf0700);
|
||||||
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: HFrameType = 0xf0701;
|
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: HFrameType = HFrameType(0xf0701);
|
||||||
|
|
||||||
pub const H3_RESERVED_FRAME_TYPES: &[HFrameType] = &[0x2, 0x6, 0x8, 0x9];
|
pub const H3_RESERVED_FRAME_TYPES: &[HFrameType] = &[
|
||||||
|
HFrameType(0x2),
|
||||||
|
HFrameType(0x6),
|
||||||
|
HFrameType(0x8),
|
||||||
|
HFrameType(0x9),
|
||||||
|
];
|
||||||
|
|
||||||
|
impl From<HFrameType> for u64 {
|
||||||
|
fn from(t: HFrameType) -> Self {
|
||||||
|
t.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// data for DATA frame is not read into HFrame::Data.
|
// data for DATA frame is not read into HFrame::Data.
|
||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
@ -74,7 +86,9 @@ impl HFrame {
|
|||||||
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
|
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
|
||||||
Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST,
|
Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST,
|
||||||
Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH,
|
Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH,
|
||||||
Self::Grease => Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21,
|
Self::Grease => {
|
||||||
|
HFrameType(Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,14 +157,14 @@ impl HFrame {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FrameDecoder<Self> for HFrame {
|
impl FrameDecoder<Self> for HFrame {
|
||||||
fn frame_type_allowed(frame_type: u64) -> Res<()> {
|
fn frame_type_allowed(frame_type: HFrameType) -> Res<()> {
|
||||||
if H3_RESERVED_FRAME_TYPES.contains(&frame_type) {
|
if H3_RESERVED_FRAME_TYPES.contains(&frame_type) {
|
||||||
return Err(Error::HttpFrameUnexpected);
|
return Err(Error::HttpFrameUnexpected);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
fn decode(frame_type: HFrameType, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
||||||
if frame_type == H3_FRAME_TYPE_DATA {
|
if frame_type == H3_FRAME_TYPE_DATA {
|
||||||
Ok(Some(Self::Data { len: frame_len }))
|
Ok(Some(Self::Data { len: frame_len }))
|
||||||
} else if let Some(payload) = data {
|
} else if let Some(payload) = data {
|
||||||
@ -207,7 +221,7 @@ impl FrameDecoder<Self> for HFrame {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_known_type(frame_type: u64) -> bool {
|
fn is_known_type(frame_type: HFrameType) -> bool {
|
||||||
matches!(
|
matches!(
|
||||||
frame_type,
|
frame_type,
|
||||||
H3_FRAME_TYPE_DATA
|
H3_FRAME_TYPE_DATA
|
||||||
|
22
third_party/rust/neqo-http3/src/frames/reader.rs
vendored
22
third_party/rust/neqo-http3/src/frames/reader.rs
vendored
@ -14,23 +14,25 @@ use neqo_common::{
|
|||||||
};
|
};
|
||||||
use neqo_transport::{Connection, StreamId};
|
use neqo_transport::{Connection, StreamId};
|
||||||
|
|
||||||
|
use super::hframe::HFrameType;
|
||||||
use crate::{Error, RecvStream, Res};
|
use crate::{Error, RecvStream, Res};
|
||||||
|
|
||||||
const MAX_READ_SIZE: usize = 4096;
|
const MAX_READ_SIZE: usize = 4096;
|
||||||
|
|
||||||
pub trait FrameDecoder<T> {
|
pub trait FrameDecoder<T> {
|
||||||
fn is_known_type(frame_type: u64) -> bool;
|
fn is_known_type(frame_type: HFrameType) -> bool;
|
||||||
|
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`.
|
/// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`.
|
||||||
fn frame_type_allowed(_frame_type: u64) -> Res<()> {
|
fn frame_type_allowed(_frame_type: HFrameType) -> Res<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// If a frame cannot be properly decoded.
|
/// If a frame cannot be properly decoded.
|
||||||
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<T>>;
|
fn decode(frame_type: HFrameType, frame_len: u64, data: Option<&[u8]>) -> Res<Option<T>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait StreamReader {
|
pub trait StreamReader {
|
||||||
@ -95,7 +97,7 @@ enum FrameReaderState {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct FrameReader {
|
pub struct FrameReader {
|
||||||
state: FrameReaderState,
|
state: FrameReaderState,
|
||||||
frame_type: u64,
|
frame_type: HFrameType,
|
||||||
frame_len: u64,
|
frame_len: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,13 +114,13 @@ impl FrameReader {
|
|||||||
state: FrameReaderState::GetType {
|
state: FrameReaderState::GetType {
|
||||||
decoder: IncrementalDecoderUint::default(),
|
decoder: IncrementalDecoderUint::default(),
|
||||||
},
|
},
|
||||||
frame_type: 0,
|
frame_type: HFrameType(u64::MAX),
|
||||||
frame_len: 0,
|
frame_len: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn new_with_type(frame_type: u64) -> Self {
|
pub fn new_with_type(frame_type: HFrameType) -> Self {
|
||||||
Self {
|
Self {
|
||||||
state: FrameReaderState::GetLength {
|
state: FrameReaderState::GetLength {
|
||||||
decoder: IncrementalDecoderUint::default(),
|
decoder: IncrementalDecoderUint::default(),
|
||||||
@ -202,13 +204,13 @@ impl FrameReader {
|
|||||||
FrameReaderState::GetType { decoder } => {
|
FrameReaderState::GetType { decoder } => {
|
||||||
if let Some(v) = decoder.consume(&mut input) {
|
if let Some(v) = decoder.consume(&mut input) {
|
||||||
qtrace!("FrameReader::receive: read frame type {}", v);
|
qtrace!("FrameReader::receive: read frame type {}", v);
|
||||||
self.frame_type_decoded::<T>(v)?;
|
self.frame_type_decoded::<T>(HFrameType(v))?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FrameReaderState::GetLength { decoder } => {
|
FrameReaderState::GetLength { decoder } => {
|
||||||
if let Some(len) = decoder.consume(&mut input) {
|
if let Some(len) = decoder.consume(&mut input) {
|
||||||
qtrace!(
|
qtrace!(
|
||||||
"FrameReader::receive: frame type {} length {}",
|
"FrameReader::receive: frame type {:?} length {}",
|
||||||
self.frame_type,
|
self.frame_type,
|
||||||
len
|
len
|
||||||
);
|
);
|
||||||
@ -218,7 +220,7 @@ impl FrameReader {
|
|||||||
FrameReaderState::GetData { decoder } => {
|
FrameReaderState::GetData { decoder } => {
|
||||||
if let Some(data) = decoder.consume(&mut input) {
|
if let Some(data) = decoder.consume(&mut input) {
|
||||||
qtrace!(
|
qtrace!(
|
||||||
"received frame {}: {}",
|
"received frame {:?}: {}",
|
||||||
self.frame_type,
|
self.frame_type,
|
||||||
hex_with_len(&data[..])
|
hex_with_len(&data[..])
|
||||||
);
|
);
|
||||||
@ -236,7 +238,7 @@ impl FrameReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FrameReader {
|
impl FrameReader {
|
||||||
fn frame_type_decoded<T: FrameDecoder<T>>(&mut self, frame_type: u64) -> Res<()> {
|
fn frame_type_decoded<T: FrameDecoder<T>>(&mut self, frame_type: HFrameType) -> Res<()> {
|
||||||
T::frame_type_allowed(frame_type)?;
|
T::frame_type_allowed(frame_type)?;
|
||||||
self.frame_type = frame_type;
|
self.frame_type = frame_type;
|
||||||
self.state = FrameReaderState::GetLength {
|
self.state = FrameReaderState::GetLength {
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
use neqo_common::{Decoder, Encoder};
|
use neqo_common::{Decoder, Encoder};
|
||||||
|
|
||||||
|
use super::hframe::HFrameType;
|
||||||
use crate::{frames::reader::FrameDecoder, Error, Res};
|
use crate::{frames::reader::FrameDecoder, Error, Res};
|
||||||
|
|
||||||
pub type WebTransportFrameType = u64;
|
pub type WebTransportFrameType = u64;
|
||||||
@ -29,10 +30,10 @@ impl WebTransportFrame {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FrameDecoder<Self> for WebTransportFrame {
|
impl FrameDecoder<Self> for WebTransportFrame {
|
||||||
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
fn decode(frame_type: HFrameType, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
||||||
if let Some(payload) = data {
|
if let Some(payload) = data {
|
||||||
let mut dec = Decoder::from(payload);
|
let mut dec = Decoder::from(payload);
|
||||||
if frame_type == WT_FRAME_CLOSE_SESSION {
|
if frame_type == HFrameType(WT_FRAME_CLOSE_SESSION) {
|
||||||
if frame_len > WT_FRAME_CLOSE_MAX_MESSAGE_SIZE + 4 {
|
if frame_len > WT_FRAME_CLOSE_MAX_MESSAGE_SIZE + 4 {
|
||||||
return Err(Error::HttpMessageError);
|
return Err(Error::HttpMessageError);
|
||||||
}
|
}
|
||||||
@ -50,7 +51,7 @@ impl FrameDecoder<Self> for WebTransportFrame {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_known_type(frame_type: u64) -> bool {
|
fn is_known_type(frame_type: HFrameType) -> bool {
|
||||||
frame_type == WT_FRAME_CLOSE_SESSION
|
frame_type == HFrameType(WT_FRAME_CLOSE_SESSION)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
14
third_party/rust/neqo-http3/src/recv_message.rs
vendored
14
third_party/rust/neqo-http3/src/recv_message.rs
vendored
@ -11,7 +11,7 @@ use neqo_qpack::decoder::QPackDecoder;
|
|||||||
use neqo_transport::{Connection, StreamId};
|
use neqo_transport::{Connection, StreamId};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS},
|
frames::{hframe::HFrameType, FrameReader, HFrame, StreamReaderConnectionWrapper},
|
||||||
headers_checks::{headers_valid, is_interim},
|
headers_checks::{headers_valid, is_interim},
|
||||||
priority::PriorityHandler,
|
priority::PriorityHandler,
|
||||||
push_controller::PushController,
|
push_controller::PushController,
|
||||||
@ -24,7 +24,7 @@ pub struct RecvMessageInfo {
|
|||||||
pub message_type: MessageType,
|
pub message_type: MessageType,
|
||||||
pub stream_type: Http3StreamType,
|
pub stream_type: Http3StreamType,
|
||||||
pub stream_id: StreamId,
|
pub stream_id: StreamId,
|
||||||
pub header_frame_type_read: bool,
|
pub first_frame_type: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -94,11 +94,11 @@ impl RecvMessage {
|
|||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
state: RecvMessageState::WaitingForResponseHeaders {
|
state: RecvMessageState::WaitingForResponseHeaders {
|
||||||
frame_reader: if message_info.header_frame_type_read {
|
frame_reader: message_info
|
||||||
FrameReader::new_with_type(H3_FRAME_TYPE_HEADERS)
|
.first_frame_type
|
||||||
} else {
|
.map_or_else(FrameReader::new, |frame_type| {
|
||||||
FrameReader::new()
|
FrameReader::new_with_type(HFrameType(frame_type))
|
||||||
},
|
}),
|
||||||
},
|
},
|
||||||
message_type: message_info.message_type,
|
message_type: message_info.message_type,
|
||||||
stream_type: message_info.stream_type,
|
stream_type: message_info.stream_type,
|
||||||
|
@ -9,8 +9,9 @@ use neqo_qpack::{decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STRE
|
|||||||
use neqo_transport::{Connection, StreamId, StreamType};
|
use neqo_transport::{Connection, StreamId, StreamType};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, CloseType,
|
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL,
|
||||||
Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream,
|
frames::{hframe::HFrameType, reader::FrameDecoder, HFrame, H3_FRAME_TYPE_HEADERS},
|
||||||
|
CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1;
|
pub const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1;
|
||||||
@ -24,7 +25,7 @@ pub enum NewStreamType {
|
|||||||
Encoder,
|
Encoder,
|
||||||
Push(u64),
|
Push(u64),
|
||||||
WebTransportStream(u64),
|
WebTransportStream(u64),
|
||||||
Http,
|
Http(u64),
|
||||||
Unknown,
|
Unknown,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ impl NewStreamType {
|
|||||||
///
|
///
|
||||||
/// Push streams received by the server are not allowed and this function will return
|
/// Push streams received by the server are not allowed and this function will return
|
||||||
/// `HttpStreamCreation` error.
|
/// `HttpStreamCreation` error.
|
||||||
const fn final_stream_type(
|
fn final_stream_type(
|
||||||
stream_type: u64,
|
stream_type: u64,
|
||||||
trans_stream_type: StreamType,
|
trans_stream_type: StreamType,
|
||||||
role: Role,
|
role: Role,
|
||||||
@ -49,8 +50,18 @@ impl NewStreamType {
|
|||||||
(HTTP3_UNI_STREAM_TYPE_PUSH, StreamType::UniDi, Role::Client)
|
(HTTP3_UNI_STREAM_TYPE_PUSH, StreamType::UniDi, Role::Client)
|
||||||
| (WEBTRANSPORT_UNI_STREAM, StreamType::UniDi, _)
|
| (WEBTRANSPORT_UNI_STREAM, StreamType::UniDi, _)
|
||||||
| (WEBTRANSPORT_STREAM, StreamType::BiDi, _) => Ok(None),
|
| (WEBTRANSPORT_STREAM, StreamType::BiDi, _) => Ok(None),
|
||||||
(H3_FRAME_TYPE_HEADERS, StreamType::BiDi, Role::Server) => Ok(Some(Self::Http)),
|
(_, StreamType::BiDi, Role::Server) => {
|
||||||
(_, StreamType::BiDi, Role::Server) => Err(Error::HttpFrame),
|
// The "stream_type" for a bidirectional stream is a frame type. We accept
|
||||||
|
// WEBTRANSPORT_STREAM (above), and HEADERS, and we have to ignore unknown types,
|
||||||
|
// but any other frame type is bad if we know about it.
|
||||||
|
if <HFrame as FrameDecoder<HFrame>>::is_known_type(HFrameType(stream_type))
|
||||||
|
&& HFrameType(stream_type) != H3_FRAME_TYPE_HEADERS
|
||||||
|
{
|
||||||
|
Err(Error::HttpFrame)
|
||||||
|
} else {
|
||||||
|
Ok(Some(Self::Http(stream_type)))
|
||||||
|
}
|
||||||
|
}
|
||||||
(HTTP3_UNI_STREAM_TYPE_PUSH, StreamType::UniDi, Role::Server)
|
(HTTP3_UNI_STREAM_TYPE_PUSH, StreamType::UniDi, Role::Server)
|
||||||
| (_, StreamType::BiDi, Role::Client) => Err(Error::HttpStreamCreation),
|
| (_, StreamType::BiDi, Role::Client) => Err(Error::HttpStreamCreation),
|
||||||
_ => Ok(Some(Self::Unknown)),
|
_ => Ok(Some(Self::Unknown)),
|
||||||
@ -190,7 +201,7 @@ impl NewStreamHeadReader {
|
|||||||
Err(Error::HttpClosedCriticalStream)
|
Err(Error::HttpClosedCriticalStream)
|
||||||
}
|
}
|
||||||
None => Err(Error::HttpStreamCreation),
|
None => Err(Error::HttpStreamCreation),
|
||||||
Some(NewStreamType::Http) => Err(Error::HttpFrame),
|
Some(NewStreamType::Http(_)) => Err(Error::HttpFrame),
|
||||||
Some(NewStreamType::Unknown) => Ok(decoded),
|
Some(NewStreamType::Unknown) => Ok(decoded),
|
||||||
Some(NewStreamType::Push(_) | NewStreamType::WebTransportStream(_)) => {
|
Some(NewStreamType::Push(_) | NewStreamType::WebTransportStream(_)) => {
|
||||||
unreachable!("PushStream and WebTransport are mapped to None at this stage.")
|
unreachable!("PushStream and WebTransport are mapped to None at this stage.")
|
||||||
@ -216,9 +227,9 @@ impl RecvStream for NewStreamHeadReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
|
fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
|
||||||
|
let t = self.get_type(conn)?;
|
||||||
Ok((
|
Ok((
|
||||||
self.get_type(conn)?
|
t.map_or(ReceiveOutput::NoOutput, ReceiveOutput::NewStream),
|
||||||
.map_or(ReceiveOutput::NoOutput, ReceiveOutput::NewStream),
|
|
||||||
self.done(),
|
self.done(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@ -240,7 +251,8 @@ mod tests {
|
|||||||
WEBTRANSPORT_UNI_STREAM,
|
WEBTRANSPORT_UNI_STREAM,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS,
|
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL,
|
||||||
|
frames::{H3_FRAME_TYPE_HEADERS, H3_FRAME_TYPE_SETTINGS},
|
||||||
CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res,
|
CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -384,16 +396,20 @@ mod tests {
|
|||||||
fn decode_stream_http() {
|
fn decode_stream_http() {
|
||||||
let mut t = Test::new(StreamType::BiDi, Role::Server);
|
let mut t = Test::new(StreamType::BiDi, Role::Server);
|
||||||
t.decode(
|
t.decode(
|
||||||
&[H3_FRAME_TYPE_HEADERS],
|
&[u64::from(H3_FRAME_TYPE_HEADERS)],
|
||||||
false,
|
false,
|
||||||
&Ok((ReceiveOutput::NewStream(NewStreamType::Http), true)),
|
&Ok((
|
||||||
|
ReceiveOutput::NewStream(NewStreamType::Http(u64::from(H3_FRAME_TYPE_HEADERS))),
|
||||||
|
true,
|
||||||
|
)),
|
||||||
true,
|
true,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut t = Test::new(StreamType::UniDi, Role::Server);
|
let mut t = Test::new(StreamType::UniDi, Role::Server);
|
||||||
t.decode(
|
t.decode(
|
||||||
&[H3_FRAME_TYPE_HEADERS], /* this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which
|
&[u64::from(H3_FRAME_TYPE_HEADERS)], /* this is the same as a
|
||||||
* is not aallowed on the server side. */
|
* HTTP3_UNI_STREAM_TYPE_PUSH which
|
||||||
|
* is not aallowed on the server side. */
|
||||||
false,
|
false,
|
||||||
&Err(Error::HttpStreamCreation),
|
&Err(Error::HttpStreamCreation),
|
||||||
true,
|
true,
|
||||||
@ -401,7 +417,7 @@ mod tests {
|
|||||||
|
|
||||||
let mut t = Test::new(StreamType::BiDi, Role::Client);
|
let mut t = Test::new(StreamType::BiDi, Role::Client);
|
||||||
t.decode(
|
t.decode(
|
||||||
&[H3_FRAME_TYPE_HEADERS],
|
&[u64::from(H3_FRAME_TYPE_HEADERS)],
|
||||||
false,
|
false,
|
||||||
&Err(Error::HttpStreamCreation),
|
&Err(Error::HttpStreamCreation),
|
||||||
true,
|
true,
|
||||||
@ -409,8 +425,8 @@ mod tests {
|
|||||||
|
|
||||||
let mut t = Test::new(StreamType::UniDi, Role::Client);
|
let mut t = Test::new(StreamType::UniDi, Role::Client);
|
||||||
t.decode(
|
t.decode(
|
||||||
&[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], /* this is the same as a
|
&[u64::from(H3_FRAME_TYPE_HEADERS), 0xaaaa_aaaa], /* this is the same as a
|
||||||
* HTTP3_UNI_STREAM_TYPE_PUSH */
|
* HTTP3_UNI_STREAM_TYPE_PUSH */
|
||||||
false,
|
false,
|
||||||
&Ok((
|
&Ok((
|
||||||
ReceiveOutput::NewStream(NewStreamType::Push(0xaaaa_aaaa)),
|
ReceiveOutput::NewStream(NewStreamType::Push(0xaaaa_aaaa)),
|
||||||
@ -418,6 +434,14 @@ mod tests {
|
|||||||
)),
|
)),
|
||||||
true,
|
true,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let mut t = Test::new(StreamType::BiDi, Role::Server);
|
||||||
|
t.decode(
|
||||||
|
&[H3_FRAME_TYPE_SETTINGS.into()],
|
||||||
|
true,
|
||||||
|
&Err(Error::HttpFrame),
|
||||||
|
true,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -478,7 +502,8 @@ mod tests {
|
|||||||
t.decode(
|
t.decode(
|
||||||
&[WEBTRANSPORT_UNI_STREAM],
|
&[WEBTRANSPORT_UNI_STREAM],
|
||||||
false,
|
false,
|
||||||
&Err(Error::HttpFrame),
|
// WEBTRANSPORT_UNI_STREAM is treated as an unknown frame type here.
|
||||||
|
&Ok((ReceiveOutput::NewStream(NewStreamType::Http(84)), true)),
|
||||||
true,
|
true,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1 +1 @@
|
|||||||
{"files":{"Cargo.toml":"65733e28fe0e6be1fbffa77fea4ed32f38ffab469763a577434e003d05c74786","src/decoder.rs":"ed2d6fa29e8726429aabb84e65f5d8025b320c0219b442b47c38903728ba3b2d","src/decoder_instructions.rs":"7e23ad00bcc6a1f0ee9af6c3d7f5ec5fcf11e9bc6cd895e125e3392c34b309e0","src/encoder.rs":"ebc9e82e5ad6b31be46ab876965d0e9dc710c4c5db084a631f384185b56cab36","src/encoder_instructions.rs":"5afc60ecc5b65f5b1908cff7eb3b7394c5c36cebe8ebfcdefbf792c827799390","src/header_block.rs":"1ea71fe2f588a0f96e39fd3a3157c66cc0ed2794f14c6f01b4a3069a43f7997b","src/huffman.rs":"6976f1b4d3e5ef849a6b080cfb2e8804bf01cfe3b9bd9e3994a319d5405cd8f3","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"f9bad0fe7643c618d034c4941ebd30ad5f6015b8b87b484b0ea79681d13d8b49","src/prefix.rs":"d9ad12838d61b38dc2300948e3da01fd65371215edde1c370cf54ccd87d64d46","src/qlog.rs":"fbd96ef7d21db2bae19b8e379995544e8cf123e8e5129c1500ace2773acf5649","src/qpack_send_buf.rs":"48f8d0e011e0fb8e4bd0774279d3465e2be01fd9480eaf374ae2adada6be430d","src/reader.rs":"c23214ba190c7a59e416eaffac612ff8c2043c3a84e884fb10ae3bc112d884a5","src/static_table.rs":"6e5ec26e2b6bd63375d2d77e72748151d430d1629a8e497ec0d0ea21c078524a","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"2d2c9e6070a1e90048a4ad7c8279f9e1ce7615b44d7d8145fb0f140e554f5ca2"},"package":null}
|
{"files":{"Cargo.toml":"b7832ef93c7463abc8cf9a8eab836fea5c79502cd23a037bceee8805704571ca","src/decoder.rs":"ed2d6fa29e8726429aabb84e65f5d8025b320c0219b442b47c38903728ba3b2d","src/decoder_instructions.rs":"7e23ad00bcc6a1f0ee9af6c3d7f5ec5fcf11e9bc6cd895e125e3392c34b309e0","src/encoder.rs":"ebc9e82e5ad6b31be46ab876965d0e9dc710c4c5db084a631f384185b56cab36","src/encoder_instructions.rs":"5afc60ecc5b65f5b1908cff7eb3b7394c5c36cebe8ebfcdefbf792c827799390","src/header_block.rs":"1ea71fe2f588a0f96e39fd3a3157c66cc0ed2794f14c6f01b4a3069a43f7997b","src/huffman.rs":"6976f1b4d3e5ef849a6b080cfb2e8804bf01cfe3b9bd9e3994a319d5405cd8f3","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"f9bad0fe7643c618d034c4941ebd30ad5f6015b8b87b484b0ea79681d13d8b49","src/prefix.rs":"d9ad12838d61b38dc2300948e3da01fd65371215edde1c370cf54ccd87d64d46","src/qlog.rs":"fbd96ef7d21db2bae19b8e379995544e8cf123e8e5129c1500ace2773acf5649","src/qpack_send_buf.rs":"48f8d0e011e0fb8e4bd0774279d3465e2be01fd9480eaf374ae2adada6be430d","src/reader.rs":"c23214ba190c7a59e416eaffac612ff8c2043c3a84e884fb10ae3bc112d884a5","src/static_table.rs":"6e5ec26e2b6bd63375d2d77e72748151d430d1629a8e497ec0d0ea21c078524a","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"2d2c9e6070a1e90048a4ad7c8279f9e1ce7615b44d7d8145fb0f140e554f5ca2"},"package":null}
|
2
third_party/rust/neqo-qpack/Cargo.toml
vendored
2
third_party/rust/neqo-qpack/Cargo.toml
vendored
@ -18,7 +18,7 @@ bench = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-qpack"
|
name = "neqo-qpack"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = false
|
build = false
|
||||||
autobins = false
|
autobins = false
|
||||||
|
File diff suppressed because one or more lines are too long
2
third_party/rust/neqo-transport/Cargo.toml
vendored
2
third_party/rust/neqo-transport/Cargo.toml
vendored
@ -16,7 +16,7 @@ example = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-transport"
|
name = "neqo-transport"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
autobins = false
|
autobins = false
|
||||||
|
@ -190,7 +190,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||||||
let mut is_app_limited = true;
|
let mut is_app_limited = true;
|
||||||
let mut new_acked = 0;
|
let mut new_acked = 0;
|
||||||
for pkt in acked_pkts {
|
for pkt in acked_pkts {
|
||||||
qdebug!(
|
qtrace!(
|
||||||
"packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}",
|
"packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}",
|
||||||
self,
|
self,
|
||||||
pkt.pn(),
|
pkt.pn(),
|
||||||
|
@ -966,6 +966,11 @@ impl Connection {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.state.closing() {
|
||||||
|
qtrace!([self], "Closing, not processing other timers");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
self.streams.cleanup_closed_streams();
|
self.streams.cleanup_closed_streams();
|
||||||
|
|
||||||
let res = self.crypto.states.check_key_update(now);
|
let res = self.crypto.states.check_key_update(now);
|
||||||
@ -981,7 +986,10 @@ impl Connection {
|
|||||||
self.create_resumption_token(now);
|
self.create_resumption_token(now);
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.paths.process_timeout(now, pto) {
|
if !self
|
||||||
|
.paths
|
||||||
|
.process_timeout(now, pto, &mut self.stats.borrow_mut())
|
||||||
|
{
|
||||||
qinfo!([self], "last available path failed");
|
qinfo!([self], "last available path failed");
|
||||||
self.absorb_error::<Error>(now, Err(Error::NoAvailablePath));
|
self.absorb_error::<Error>(now, Err(Error::NoAvailablePath));
|
||||||
}
|
}
|
||||||
@ -1455,7 +1463,9 @@ impl Connection {
|
|||||||
) {
|
) {
|
||||||
let space = PacketNumberSpace::from(packet.packet_type());
|
let space = PacketNumberSpace::from(packet.packet_type());
|
||||||
if let Some(space) = self.acks.get_mut(space) {
|
if let Some(space) = self.acks.get_mut(space) {
|
||||||
*space.ecn_marks() += d.tos().into();
|
let space_ecn_marks = space.ecn_marks();
|
||||||
|
*space_ecn_marks += d.tos().into();
|
||||||
|
self.stats.borrow_mut().ecn_rx = *space_ecn_marks;
|
||||||
} else {
|
} else {
|
||||||
qtrace!("Not tracking ECN for dropped packet number space");
|
qtrace!("Not tracking ECN for dropped packet number space");
|
||||||
}
|
}
|
||||||
@ -1547,17 +1557,29 @@ impl Connection {
|
|||||||
|
|
||||||
qlog::packet_received(&self.qlog, &packet, &payload);
|
qlog::packet_received(&self.qlog, &packet, &payload);
|
||||||
let space = PacketNumberSpace::from(payload.packet_type());
|
let space = PacketNumberSpace::from(payload.packet_type());
|
||||||
if self.acks.get_mut(space).unwrap().is_duplicate(payload.pn()) {
|
if let Some(space) = self.acks.get_mut(space) {
|
||||||
qdebug!([self], "Duplicate packet {}-{}", space, payload.pn());
|
if space.is_duplicate(payload.pn()) {
|
||||||
self.stats.borrow_mut().dups_rx += 1;
|
qdebug!("Duplicate packet {}-{}", space, payload.pn());
|
||||||
} else {
|
self.stats.borrow_mut().dups_rx += 1;
|
||||||
match self.process_packet(path, &payload, now) {
|
} else {
|
||||||
Ok(migrate) => self.postprocess_packet(path, d, &packet, migrate, now),
|
match self.process_packet(path, &payload, now) {
|
||||||
Err(e) => {
|
Ok(migrate) => {
|
||||||
self.ensure_error_path(path, &packet, now);
|
self.postprocess_packet(path, d, &packet, migrate, now);
|
||||||
return Err(e);
|
}
|
||||||
|
Err(e) => {
|
||||||
|
self.ensure_error_path(path, &packet, now);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
qdebug!(
|
||||||
|
[self],
|
||||||
|
"Received packet {} for untracked space {}",
|
||||||
|
space,
|
||||||
|
payload.pn()
|
||||||
|
);
|
||||||
|
return Err(Error::ProtocolViolation);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -1684,7 +1706,11 @@ impl Connection {
|
|||||||
self.paths.make_permanent(path, None, cid);
|
self.paths.make_permanent(path, None, cid);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else if let Some(primary) = self.paths.primary() {
|
} else if let Some(primary) = self.paths.primary() {
|
||||||
if primary.borrow().remote_cid().is_empty() {
|
if primary
|
||||||
|
.borrow()
|
||||||
|
.remote_cid()
|
||||||
|
.map_or(true, |id| id.is_empty())
|
||||||
|
{
|
||||||
self.paths
|
self.paths
|
||||||
.make_permanent(path, None, ConnectionIdEntry::empty_remote());
|
.make_permanent(path, None, ConnectionIdEntry::empty_remote());
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -1729,12 +1755,12 @@ impl Connection {
|
|||||||
// Make a path on which to run the handshake.
|
// Make a path on which to run the handshake.
|
||||||
self.setup_handshake_path(path, now);
|
self.setup_handshake_path(path, now);
|
||||||
|
|
||||||
self.zero_rtt_state = match self.crypto.enable_0rtt(self.version, self.role) {
|
self.zero_rtt_state = if self.crypto.enable_0rtt(self.version, self.role) == Ok(true) {
|
||||||
Ok(true) => {
|
qdebug!([self], "Accepted 0-RTT");
|
||||||
qdebug!([self], "Accepted 0-RTT");
|
ZeroRttState::AcceptedServer
|
||||||
ZeroRttState::AcceptedServer
|
} else {
|
||||||
}
|
qtrace!([self], "Rejected 0-RTT");
|
||||||
_ => ZeroRttState::Rejected,
|
ZeroRttState::Rejected
|
||||||
};
|
};
|
||||||
|
|
||||||
// The server knows the final version if it has remote transport parameters.
|
// The server knows the final version if it has remote transport parameters.
|
||||||
@ -1817,7 +1843,10 @@ impl Connection {
|
|||||||
path.borrow(),
|
path.borrow(),
|
||||||
if force { "now" } else { "after" }
|
if force { "now" } else { "after" }
|
||||||
);
|
);
|
||||||
if self.paths.migrate(&path, force, now) {
|
if self
|
||||||
|
.paths
|
||||||
|
.migrate(&path, force, now, &mut self.stats.borrow_mut())
|
||||||
|
{
|
||||||
self.loss_recovery.migrate();
|
self.loss_recovery.migrate();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -1878,7 +1907,8 @@ impl Connection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.ensure_permanent(path).is_ok() {
|
if self.ensure_permanent(path).is_ok() {
|
||||||
self.paths.handle_migration(path, d.source(), now);
|
self.paths
|
||||||
|
.handle_migration(path, d.source(), now, &mut self.stats.borrow_mut());
|
||||||
} else {
|
} else {
|
||||||
qinfo!(
|
qinfo!(
|
||||||
[self],
|
[self],
|
||||||
@ -1913,7 +1943,7 @@ impl Connection {
|
|||||||
// a packet on a new path, we avoid sending (and the privacy risk) rather
|
// a packet on a new path, we avoid sending (and the privacy risk) rather
|
||||||
// than reuse a connection ID.
|
// than reuse a connection ID.
|
||||||
let res = if path.borrow().is_temporary() {
|
let res = if path.borrow().is_temporary() {
|
||||||
assert!(!cfg!(test), "attempting to close with a temporary path");
|
qerror!([self], "Attempting to close with a temporary path");
|
||||||
Err(Error::InternalError)
|
Err(Error::InternalError)
|
||||||
} else {
|
} else {
|
||||||
self.output_path(&path, now, &Some(details))
|
self.output_path(&path, now, &Some(details))
|
||||||
@ -1937,16 +1967,15 @@ impl Connection {
|
|||||||
) -> (PacketType, PacketBuilder) {
|
) -> (PacketType, PacketBuilder) {
|
||||||
let pt = PacketType::from(cspace);
|
let pt = PacketType::from(cspace);
|
||||||
let mut builder = if pt == PacketType::Short {
|
let mut builder = if pt == PacketType::Short {
|
||||||
qdebug!("Building Short dcid {}", path.remote_cid());
|
qdebug!("Building Short dcid {:?}", path.remote_cid());
|
||||||
PacketBuilder::short(encoder, tx.key_phase(), path.remote_cid())
|
PacketBuilder::short(encoder, tx.key_phase(), path.remote_cid())
|
||||||
} else {
|
} else {
|
||||||
qdebug!(
|
qdebug!(
|
||||||
"Building {:?} dcid {} scid {}",
|
"Building {:?} dcid {:?} scid {:?}",
|
||||||
pt,
|
pt,
|
||||||
path.remote_cid(),
|
path.remote_cid(),
|
||||||
path.local_cid(),
|
path.local_cid(),
|
||||||
);
|
);
|
||||||
|
|
||||||
PacketBuilder::long(encoder, pt, version, path.remote_cid(), path.local_cid())
|
PacketBuilder::long(encoder, pt, version, path.remote_cid(), path.local_cid())
|
||||||
};
|
};
|
||||||
if builder.remaining() > 0 {
|
if builder.remaining() > 0 {
|
||||||
@ -2220,7 +2249,7 @@ impl Connection {
|
|||||||
// Include an ACK frame with the CONNECTION_CLOSE.
|
// Include an ACK frame with the CONNECTION_CLOSE.
|
||||||
let limit = builder.limit();
|
let limit = builder.limit();
|
||||||
builder.set_limit(limit - ClosingFrame::MIN_LENGTH);
|
builder.set_limit(limit - ClosingFrame::MIN_LENGTH);
|
||||||
self.acks.immediate_ack(now);
|
self.acks.immediate_ack(space, now);
|
||||||
self.acks.write_frame(
|
self.acks.write_frame(
|
||||||
space,
|
space,
|
||||||
now,
|
now,
|
||||||
@ -2407,7 +2436,10 @@ impl Connection {
|
|||||||
self.loss_recovery.on_packet_sent(path, initial);
|
self.loss_recovery.on_packet_sent(path, initial);
|
||||||
}
|
}
|
||||||
path.borrow_mut().add_sent(packets.len());
|
path.borrow_mut().add_sent(packets.len());
|
||||||
Ok(SendOption::Yes(path.borrow_mut().datagram(packets)))
|
Ok(SendOption::Yes(
|
||||||
|
path.borrow_mut()
|
||||||
|
.datagram(packets, &mut self.stats.borrow_mut()),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2783,10 +2815,8 @@ impl Connection {
|
|||||||
// prepare to resend them.
|
// prepare to resend them.
|
||||||
self.stats.borrow_mut().frame_rx.ping += 1;
|
self.stats.borrow_mut().frame_rx.ping += 1;
|
||||||
self.crypto.resend_unacked(space);
|
self.crypto.resend_unacked(space);
|
||||||
if space == PacketNumberSpace::ApplicationData {
|
// Send an ACK immediately if we might not otherwise do so.
|
||||||
// Send an ACK immediately if we might not otherwise do so.
|
self.acks.immediate_ack(space, now);
|
||||||
self.acks.immediate_ack(now);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Frame::Ack {
|
Frame::Ack {
|
||||||
largest_acknowledged,
|
largest_acknowledged,
|
||||||
@ -2864,7 +2894,10 @@ impl Connection {
|
|||||||
}
|
}
|
||||||
Frame::PathResponse { data } => {
|
Frame::PathResponse { data } => {
|
||||||
self.stats.borrow_mut().frame_rx.path_response += 1;
|
self.stats.borrow_mut().frame_rx.path_response += 1;
|
||||||
if self.paths.path_response(data, now) {
|
if self
|
||||||
|
.paths
|
||||||
|
.path_response(data, now, &mut self.stats.borrow_mut())
|
||||||
|
{
|
||||||
// This PATH_RESPONSE enabled migration; tell loss recovery.
|
// This PATH_RESPONSE enabled migration; tell loss recovery.
|
||||||
self.loss_recovery.migrate();
|
self.loss_recovery.migrate();
|
||||||
}
|
}
|
||||||
@ -2945,7 +2978,12 @@ impl Connection {
|
|||||||
for token in lost.tokens() {
|
for token in lost.tokens() {
|
||||||
qdebug!([self], "Lost: {:?}", token);
|
qdebug!([self], "Lost: {:?}", token);
|
||||||
match token {
|
match token {
|
||||||
RecoveryToken::Ack(_) => {}
|
RecoveryToken::Ack(ack_token) => {
|
||||||
|
// If we lost an ACK frame during the handshake, send another one.
|
||||||
|
if ack_token.space() != PacketNumberSpace::ApplicationData {
|
||||||
|
self.acks.immediate_ack(ack_token.space(), lost.time_sent());
|
||||||
|
}
|
||||||
|
}
|
||||||
RecoveryToken::Crypto(ct) => self.crypto.lost(ct),
|
RecoveryToken::Crypto(ct) => self.crypto.lost(ct),
|
||||||
RecoveryToken::HandshakeDone => self.state_signaling.handshake_done(),
|
RecoveryToken::HandshakeDone => self.state_signaling.handshake_done(),
|
||||||
RecoveryToken::NewToken(seqno) => self.new_token.lost(*seqno),
|
RecoveryToken::NewToken(seqno) => self.new_token.lost(*seqno),
|
||||||
|
@ -76,6 +76,11 @@ impl State {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub const fn closing(&self) -> bool {
|
||||||
|
matches!(self, Self::Closing { .. } | Self::Draining { .. })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implement `PartialOrd` so that we can enforce monotonic state progression.
|
// Implement `PartialOrd` so that we can enforce monotonic state progression.
|
||||||
|
@ -599,7 +599,7 @@ fn datagram_fill() {
|
|||||||
let path = p.borrow();
|
let path = p.borrow();
|
||||||
// Minimum overhead is connection ID length, 1 byte short header, 1 byte packet number,
|
// Minimum overhead is connection ID length, 1 byte short header, 1 byte packet number,
|
||||||
// 1 byte for the DATAGRAM frame type, and 16 bytes for the AEAD.
|
// 1 byte for the DATAGRAM frame type, and 16 bytes for the AEAD.
|
||||||
path.plpmtu() - path.remote_cid().len() - 19
|
path.plpmtu() - path.remote_cid().unwrap().len() - 19
|
||||||
};
|
};
|
||||||
assert!(space >= 64); // Unlikely, but this test depends on the datagram being this large.
|
assert!(space >= 64); // Unlikely, but this test depends on the datagram being this large.
|
||||||
|
|
||||||
|
@ -12,13 +12,14 @@ use test_fixture::{
|
|||||||
fixture_init, now, DEFAULT_ADDR_V4,
|
fixture_init, now, DEFAULT_ADDR_V4,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{send_something_with_modifier, DEFAULT_RTT};
|
|
||||||
use crate::{
|
use crate::{
|
||||||
connection::tests::{
|
connection::tests::{
|
||||||
connect_force_idle, connect_force_idle_with_modifier, default_client, default_server,
|
connect_force_idle, connect_force_idle_with_modifier, default_client, default_server,
|
||||||
handshake_with_modifier, migration::get_cid, new_client, new_server, send_something,
|
handshake_with_modifier, migration::get_cid, new_client, new_server, send_and_receive,
|
||||||
|
send_something, send_something_with_modifier, send_with_modifier_and_receive, DEFAULT_RTT,
|
||||||
},
|
},
|
||||||
ecn::ECN_TEST_COUNT,
|
ecn::ECN_TEST_COUNT,
|
||||||
|
path::MAX_PATH_PROBES,
|
||||||
ConnectionId, ConnectionParameters, StreamType,
|
ConnectionId, ConnectionParameters, StreamType,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -91,6 +92,79 @@ fn handshake_delay_with_ecn_blackhole() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn migration_delay_to_ecn_blackhole() {
|
||||||
|
let mut now = now();
|
||||||
|
let mut client = default_client();
|
||||||
|
let mut server = default_server();
|
||||||
|
|
||||||
|
// Do a handshake.
|
||||||
|
connect_force_idle(&mut client, &mut server);
|
||||||
|
|
||||||
|
// Migrate the client.
|
||||||
|
client
|
||||||
|
.migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// The client should send MAX_PATH_PROBES path challenges with ECN enabled, and then another
|
||||||
|
// MAX_PATH_PROBES without ECN.
|
||||||
|
let mut probes = 0;
|
||||||
|
while probes < MAX_PATH_PROBES * 2 {
|
||||||
|
match client.process_output(now) {
|
||||||
|
crate::Output::Callback(t) => {
|
||||||
|
now += t;
|
||||||
|
}
|
||||||
|
crate::Output::Datagram(d) => {
|
||||||
|
// The new path is IPv4.
|
||||||
|
if d.source().is_ipv4() {
|
||||||
|
// This should be a PATH_CHALLENGE.
|
||||||
|
probes += 1;
|
||||||
|
assert_eq!(client.stats().frame_tx.path_challenge, probes);
|
||||||
|
if probes <= MAX_PATH_PROBES {
|
||||||
|
// The first probes should be sent with ECN.
|
||||||
|
assert_ecn_enabled(d.tos());
|
||||||
|
} else {
|
||||||
|
// The next probes should be sent without ECN.
|
||||||
|
assert_ecn_disabled(d.tos());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
crate::Output::None => panic!("unexpected output"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn stats() {
|
||||||
|
let now = now();
|
||||||
|
let mut client = default_client();
|
||||||
|
let mut server = default_server();
|
||||||
|
connect_force_idle(&mut client, &mut server);
|
||||||
|
|
||||||
|
for _ in 0..ECN_TEST_COUNT {
|
||||||
|
let ack = send_and_receive(&mut client, &mut server, now);
|
||||||
|
client.process_input(&ack.unwrap(), now);
|
||||||
|
}
|
||||||
|
|
||||||
|
for _ in 0..ECN_TEST_COUNT {
|
||||||
|
let ack = send_and_receive(&mut server, &mut client, now);
|
||||||
|
server.process_input(&ack.unwrap(), now);
|
||||||
|
}
|
||||||
|
|
||||||
|
for stats in [client.stats(), server.stats()] {
|
||||||
|
assert_eq!(stats.ecn_paths_capable, 1);
|
||||||
|
assert_eq!(stats.ecn_paths_not_capable, 0);
|
||||||
|
|
||||||
|
for codepoint in [IpTosEcn::Ect1, IpTosEcn::Ce] {
|
||||||
|
assert_eq!(stats.ecn_tx[codepoint], 0);
|
||||||
|
assert_eq!(stats.ecn_rx[codepoint], 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(client.stats().ecn_tx[IpTosEcn::Ect0] <= server.stats().ecn_rx[IpTosEcn::Ect0]);
|
||||||
|
assert!(server.stats().ecn_tx[IpTosEcn::Ect0] <= client.stats().ecn_rx[IpTosEcn::Ect0]);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn disables_on_loss() {
|
fn disables_on_loss() {
|
||||||
let now = now();
|
let now = now();
|
||||||
@ -111,6 +185,24 @@ fn disables_on_loss() {
|
|||||||
assert_ecn_disabled(client_pkt.tos());
|
assert_ecn_disabled(client_pkt.tos());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn disables_on_remark() {
|
||||||
|
let now = now();
|
||||||
|
let mut client = default_client();
|
||||||
|
let mut server = default_server();
|
||||||
|
connect_force_idle(&mut client, &mut server);
|
||||||
|
|
||||||
|
for _ in 0..ECN_TEST_COUNT {
|
||||||
|
if let Some(ack) = send_with_modifier_and_receive(&mut client, &mut server, now, remark()) {
|
||||||
|
client.process_input(&ack, now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ECN should now be disabled.
|
||||||
|
let client_pkt = send_something(&mut client, now);
|
||||||
|
assert_ecn_disabled(client_pkt.tos());
|
||||||
|
}
|
||||||
|
|
||||||
/// This function performs a handshake over a path that modifies packets via `orig_path_modifier`.
|
/// This function performs a handshake over a path that modifies packets via `orig_path_modifier`.
|
||||||
/// It then sends `burst` packets on that path, and then migrates to a new path that
|
/// It then sends `burst` packets on that path, and then migrates to a new path that
|
||||||
/// modifies packets via `new_path_modifier`. It sends `burst` packets on the new path.
|
/// modifies packets via `new_path_modifier`. It sends `burst` packets on the new path.
|
||||||
|
@ -35,6 +35,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
events::ConnectionEvent,
|
events::ConnectionEvent,
|
||||||
server::ValidateAddress,
|
server::ValidateAddress,
|
||||||
|
stats::FrameStats,
|
||||||
tparams::{TransportParameter, MIN_ACK_DELAY},
|
tparams::{TransportParameter, MIN_ACK_DELAY},
|
||||||
tracking::DEFAULT_ACK_DELAY,
|
tracking::DEFAULT_ACK_DELAY,
|
||||||
CloseReason, ConnectionParameters, EmptyConnectionIdGenerator, Error, Pmtud, StreamType,
|
CloseReason, ConnectionParameters, EmptyConnectionIdGenerator, Error, Pmtud, StreamType,
|
||||||
@ -1194,3 +1195,62 @@ fn emit_authentication_needed_once() {
|
|||||||
_ = client.process(server2.as_dgram_ref(), now());
|
_ = client.process(server2.as_dgram_ref(), now());
|
||||||
assert_eq!(0, authentication_needed_count(&mut client));
|
assert_eq!(0, authentication_needed_count(&mut client));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn client_initial_retransmits_identical() {
|
||||||
|
let mut now = now();
|
||||||
|
let mut client = default_client();
|
||||||
|
|
||||||
|
// Force the client to retransmit its Initial packet a number of times and make sure the
|
||||||
|
// retranmissions are identical to the original. Also, verify the PTO durations.
|
||||||
|
for i in 1..=5 {
|
||||||
|
let ci = client.process(None, now).dgram().unwrap();
|
||||||
|
assert_eq!(ci.len(), client.plpmtu());
|
||||||
|
assert_eq!(
|
||||||
|
client.stats().frame_tx,
|
||||||
|
FrameStats {
|
||||||
|
crypto: i,
|
||||||
|
all: i,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let pto = client.process(None, now).callback();
|
||||||
|
assert_eq!(pto, DEFAULT_RTT * 3 * (1 << (i - 1)));
|
||||||
|
now += pto;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_initial_retransmits_identical() {
|
||||||
|
let mut now = now();
|
||||||
|
let mut client = default_client();
|
||||||
|
let mut ci = client.process(None, now).dgram();
|
||||||
|
|
||||||
|
// Force the server to retransmit its Initial packet a number of times and make sure the
|
||||||
|
// retranmissions are identical to the original. Also, verify the PTO durations.
|
||||||
|
let mut server = default_server();
|
||||||
|
let mut total_ptos: Duration = Duration::from_secs(0);
|
||||||
|
for i in 1..=3 {
|
||||||
|
let si = server.process(ci.take().as_ref(), now).dgram().unwrap();
|
||||||
|
assert_eq!(si.len(), server.plpmtu());
|
||||||
|
assert_eq!(
|
||||||
|
server.stats().frame_tx,
|
||||||
|
FrameStats {
|
||||||
|
crypto: i * 2,
|
||||||
|
ack: i,
|
||||||
|
all: i * 3,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let pto = server.process(None, now).callback();
|
||||||
|
if i < 3 {
|
||||||
|
assert_eq!(pto, DEFAULT_RTT * 3 * (1 << (i - 1)));
|
||||||
|
} else {
|
||||||
|
// Server is amplification-limited after three (re)transmissions.
|
||||||
|
assert_eq!(pto, server.conn_params.get_idle_timeout() - total_ptos);
|
||||||
|
}
|
||||||
|
now += pto;
|
||||||
|
total_ptos += pto;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -287,7 +287,7 @@ fn idle_caching() {
|
|||||||
let mut client = default_client();
|
let mut client = default_client();
|
||||||
let mut server = default_server();
|
let mut server = default_server();
|
||||||
let start = now();
|
let start = now();
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
|
|
||||||
// Perform the first round trip, but drop the Initial from the server.
|
// Perform the first round trip, but drop the Initial from the server.
|
||||||
// The client then caches the Handshake packet.
|
// The client then caches the Handshake packet.
|
||||||
@ -297,18 +297,15 @@ fn idle_caching() {
|
|||||||
client.process_input(&handshake.unwrap(), start);
|
client.process_input(&handshake.unwrap(), start);
|
||||||
|
|
||||||
// Perform an exchange and keep the connection alive.
|
// Perform an exchange and keep the connection alive.
|
||||||
// Only allow a packet containing a PING to pass.
|
|
||||||
let middle = start + AT_LEAST_PTO;
|
let middle = start + AT_LEAST_PTO;
|
||||||
mem::drop(client.process_output(middle));
|
// This is the RTX of the client Initial.
|
||||||
let dgram = client.process_output(middle).dgram();
|
let dgram = client.process_output(middle).dgram();
|
||||||
|
|
||||||
// Get the server to send its first probe and throw that away.
|
// Get the server to send its first probe and throw that away.
|
||||||
mem::drop(server.process_output(middle).dgram());
|
mem::drop(server.process_output(middle).dgram());
|
||||||
// Now let the server process the client PING. This causes the server
|
// Now let the server process the RTX'ed client Initial. This causes the server
|
||||||
// to send CRYPTO frames again, so manually extract and discard those.
|
// to send CRYPTO frames again, so manually extract and discard those.
|
||||||
let ping_before_s = server.stats().frame_rx.ping;
|
|
||||||
server.process_input(&dgram.unwrap(), middle);
|
server.process_input(&dgram.unwrap(), middle);
|
||||||
assert_eq!(server.stats().frame_rx.ping, ping_before_s + 1);
|
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
server.crypto.streams.write_frame(
|
server.crypto.streams.write_frame(
|
||||||
PacketNumberSpace::Initial,
|
PacketNumberSpace::Initial,
|
||||||
@ -330,10 +327,10 @@ fn idle_caching() {
|
|||||||
// Now only allow the Initial packet from the server through;
|
// Now only allow the Initial packet from the server through;
|
||||||
// it shouldn't contain a CRYPTO frame.
|
// it shouldn't contain a CRYPTO frame.
|
||||||
let (initial, _) = split_datagram(&dgram.unwrap());
|
let (initial, _) = split_datagram(&dgram.unwrap());
|
||||||
let ping_before_c = client.stats().frame_rx.ping;
|
let crypto_before_c = client.stats().frame_rx.crypto;
|
||||||
let ack_before = client.stats().frame_rx.ack;
|
let ack_before = client.stats().frame_rx.ack;
|
||||||
client.process_input(&initial, middle);
|
client.process_input(&initial, middle);
|
||||||
assert_eq!(client.stats().frame_rx.ping, ping_before_c + 1);
|
assert_eq!(client.stats().frame_rx.crypto, crypto_before_c);
|
||||||
assert_eq!(client.stats().frame_rx.ack, ack_before + 1);
|
assert_eq!(client.stats().frame_rx.ack, ack_before + 1);
|
||||||
|
|
||||||
let end = start + default_timeout() + (AT_LEAST_PTO / 2);
|
let end = start + default_timeout() + (AT_LEAST_PTO / 2);
|
||||||
|
@ -28,6 +28,7 @@ use crate::{
|
|||||||
connection::tests::send_something_paced,
|
connection::tests::send_something_paced,
|
||||||
frame::FRAME_TYPE_NEW_CONNECTION_ID,
|
frame::FRAME_TYPE_NEW_CONNECTION_ID,
|
||||||
packet::PacketBuilder,
|
packet::PacketBuilder,
|
||||||
|
path::MAX_PATH_PROBES,
|
||||||
pmtud::Pmtud,
|
pmtud::Pmtud,
|
||||||
tparams::{self, PreferredAddress, TransportParameter},
|
tparams::{self, PreferredAddress, TransportParameter},
|
||||||
CloseReason, ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef,
|
CloseReason, ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef,
|
||||||
@ -236,7 +237,8 @@ fn migrate_immediate_fail() {
|
|||||||
let probe = client.process_output(now).dgram().unwrap();
|
let probe = client.process_output(now).dgram().unwrap();
|
||||||
assert_v4_path(&probe, true); // Contains PATH_CHALLENGE.
|
assert_v4_path(&probe, true); // Contains PATH_CHALLENGE.
|
||||||
|
|
||||||
for _ in 0..2 {
|
// -1 because first PATH_CHALLENGE already sent above
|
||||||
|
for _ in 0..MAX_PATH_PROBES * 2 - 1 {
|
||||||
let cb = client.process_output(now).callback();
|
let cb = client.process_output(now).callback();
|
||||||
assert_ne!(cb, Duration::new(0, 0));
|
assert_ne!(cb, Duration::new(0, 0));
|
||||||
now += cb;
|
now += cb;
|
||||||
@ -311,7 +313,8 @@ fn migrate_same_fail() {
|
|||||||
let probe = client.process_output(now).dgram().unwrap();
|
let probe = client.process_output(now).dgram().unwrap();
|
||||||
assert_v6_path(&probe, true); // Contains PATH_CHALLENGE.
|
assert_v6_path(&probe, true); // Contains PATH_CHALLENGE.
|
||||||
|
|
||||||
for _ in 0..2 {
|
// -1 because first PATH_CHALLENGE already sent above
|
||||||
|
for _ in 0..MAX_PATH_PROBES * 2 - 1 {
|
||||||
let cb = client.process_output(now).callback();
|
let cb = client.process_output(now).callback();
|
||||||
assert_ne!(cb, Duration::new(0, 0));
|
assert_ne!(cb, Duration::new(0, 0));
|
||||||
now += cb;
|
now += cb;
|
||||||
@ -946,7 +949,6 @@ impl crate::connection::test_internal::FrameWriter for GarbageWriter {
|
|||||||
/// Test the case that we run out of connection ID and receive an invalid frame
|
/// Test the case that we run out of connection ID and receive an invalid frame
|
||||||
/// from a new path.
|
/// from a new path.
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "attempting to close with a temporary path")]
|
|
||||||
fn error_on_new_path_with_no_connection_id() {
|
fn error_on_new_path_with_no_connection_id() {
|
||||||
let mut client = default_client();
|
let mut client = default_client();
|
||||||
let mut server = default_server();
|
let mut server = default_server();
|
||||||
@ -967,5 +969,23 @@ fn error_on_new_path_with_no_connection_id() {
|
|||||||
|
|
||||||
// See issue #1697. We had a crash when the client had a temporary path and
|
// See issue #1697. We had a crash when the client had a temporary path and
|
||||||
// process_output is called.
|
// process_output is called.
|
||||||
|
let closing_frames = client.stats().frame_tx.connection_close;
|
||||||
mem::drop(client.process_output(now()));
|
mem::drop(client.process_output(now()));
|
||||||
|
assert!(matches!(
|
||||||
|
client.state(),
|
||||||
|
State::Closing {
|
||||||
|
error: CloseReason::Transport(Error::UnknownFrameType),
|
||||||
|
..
|
||||||
|
}
|
||||||
|
));
|
||||||
|
// Wait until the connection is closed.
|
||||||
|
let mut now = now();
|
||||||
|
now += client.process(None, now).callback();
|
||||||
|
_ = client.process_output(now);
|
||||||
|
// No closing frames should be sent, and the connection should be closed.
|
||||||
|
assert_eq!(client.stats().frame_tx.connection_close, closing_frames);
|
||||||
|
assert!(matches!(
|
||||||
|
client.state(),
|
||||||
|
State::Closed(CloseReason::Transport(Error::UnknownFrameType))
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
@ -586,10 +586,10 @@ fn send_something_paced_with_modifier(
|
|||||||
.dgram()
|
.dgram()
|
||||||
.expect("send_something: should have something to send")
|
.expect("send_something: should have something to send")
|
||||||
}
|
}
|
||||||
Output::Datagram(d) => modifier(d).unwrap(),
|
Output::Datagram(d) => d,
|
||||||
Output::None => panic!("send_something: got Output::None"),
|
Output::None => panic!("send_something: got Output::None"),
|
||||||
};
|
};
|
||||||
(dgram, now)
|
(modifier(dgram).unwrap(), now)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_something_paced(
|
fn send_something_paced(
|
||||||
@ -614,6 +614,18 @@ fn send_something(sender: &mut Connection, now: Instant) -> Datagram {
|
|||||||
send_something_with_modifier(sender, now, Some)
|
send_something_with_modifier(sender, now, Some)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Send something on a stream from `sender` through a modifier to `receiver`.
|
||||||
|
/// Return any ACK that might result.
|
||||||
|
fn send_with_modifier_and_receive(
|
||||||
|
sender: &mut Connection,
|
||||||
|
receiver: &mut Connection,
|
||||||
|
now: Instant,
|
||||||
|
modifier: fn(Datagram) -> Option<Datagram>,
|
||||||
|
) -> Option<Datagram> {
|
||||||
|
let dgram = send_something_with_modifier(sender, now, modifier);
|
||||||
|
receiver.process(Some(&dgram), now).dgram()
|
||||||
|
}
|
||||||
|
|
||||||
/// Send something on a stream from `sender` to `receiver`.
|
/// Send something on a stream from `sender` to `receiver`.
|
||||||
/// Return any ACK that might result.
|
/// Return any ACK that might result.
|
||||||
fn send_and_receive(
|
fn send_and_receive(
|
||||||
@ -621,8 +633,7 @@ fn send_and_receive(
|
|||||||
receiver: &mut Connection,
|
receiver: &mut Connection,
|
||||||
now: Instant,
|
now: Instant,
|
||||||
) -> Option<Datagram> {
|
) -> Option<Datagram> {
|
||||||
let dgram = send_something(sender, now);
|
send_with_modifier_and_receive(sender, receiver, now, Some)
|
||||||
receiver.process(Some(&dgram), now).dgram()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_tokens(client: &mut Connection) -> Vec<ResumptionToken> {
|
fn get_tokens(client: &mut Connection) -> Vec<ResumptionToken> {
|
||||||
|
@ -264,6 +264,7 @@ fn pto_handshake_complete() {
|
|||||||
// We'll use that packet to force the server to acknowledge 1-RTT.
|
// We'll use that packet to force the server to acknowledge 1-RTT.
|
||||||
let stream_id = client.stream_create(StreamType::UniDi).unwrap();
|
let stream_id = client.stream_create(StreamType::UniDi).unwrap();
|
||||||
client.stream_close_send(stream_id).unwrap();
|
client.stream_close_send(stream_id).unwrap();
|
||||||
|
now += HALF_RTT * 6;
|
||||||
let pkt3 = client.process(None, now).dgram();
|
let pkt3 = client.process(None, now).dgram();
|
||||||
assert_handshake(pkt3.as_ref().unwrap());
|
assert_handshake(pkt3.as_ref().unwrap());
|
||||||
let (pkt3_hs, pkt3_1rtt) = split_datagram(&pkt3.unwrap());
|
let (pkt3_hs, pkt3_1rtt) = split_datagram(&pkt3.unwrap());
|
||||||
@ -581,6 +582,9 @@ fn loss_time_past_largest_acked() {
|
|||||||
assert!(s_pto < RTT);
|
assert!(s_pto < RTT);
|
||||||
let s_hs2 = server.process(None, now + s_pto).dgram();
|
let s_hs2 = server.process(None, now + s_pto).dgram();
|
||||||
assert!(s_hs2.is_some());
|
assert!(s_hs2.is_some());
|
||||||
|
let s_pto = server.process(None, now).callback();
|
||||||
|
assert_ne!(s_pto, Duration::from_secs(0));
|
||||||
|
assert!(s_pto < RTT);
|
||||||
let s_hs3 = server.process(None, now + s_pto).dgram();
|
let s_hs3 = server.process(None, now + s_pto).dgram();
|
||||||
assert!(s_hs3.is_some());
|
assert!(s_hs3.is_some());
|
||||||
|
|
||||||
@ -623,7 +627,9 @@ fn loss_time_past_largest_acked() {
|
|||||||
|
|
||||||
// Now the client should start its loss recovery timer based on the ACK.
|
// Now the client should start its loss recovery timer based on the ACK.
|
||||||
now += RTT / 2;
|
now += RTT / 2;
|
||||||
let c_ack = client.process(Some(&s_hs_ack), now).dgram();
|
let _c_ack = client.process(Some(&s_hs_ack), now).dgram();
|
||||||
|
// This ACK triggers an immediate ACK, due to an ACK loss during handshake.
|
||||||
|
let c_ack = client.process(None, now).dgram();
|
||||||
assert!(c_ack.is_none());
|
assert!(c_ack.is_none());
|
||||||
// The client should now have the loss recovery timer active.
|
// The client should now have the loss recovery timer active.
|
||||||
let lr_time = client.process(None, now).callback();
|
let lr_time = client.process(None, now).callback();
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
// option. This file may not be copied, modified, or distributed
|
// option. This file may not be copied, modified, or distributed
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use std::{cell::RefCell, rc::Rc};
|
use std::{cell::RefCell, rc::Rc, time::Duration};
|
||||||
|
|
||||||
use neqo_common::event::Provider;
|
use neqo_common::{event::Provider, qdebug};
|
||||||
use neqo_crypto::{AllowZeroRtt, AntiReplay};
|
use neqo_crypto::{AllowZeroRtt, AntiReplay};
|
||||||
use test_fixture::{assertions, now};
|
use test_fixture::{assertions, now};
|
||||||
|
|
||||||
@ -258,3 +258,65 @@ fn zero_rtt_update_flow_control() {
|
|||||||
assert!(client.stream_send_atomic(uni_stream, MESSAGE).unwrap());
|
assert!(client.stream_send_atomic(uni_stream, MESSAGE).unwrap());
|
||||||
assert!(client.stream_send_atomic(bidi_stream, MESSAGE).unwrap());
|
assert!(client.stream_send_atomic(bidi_stream, MESSAGE).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn zero_rtt_loss_accepted() {
|
||||||
|
// This test requires a wider anti-replay window than other tests
|
||||||
|
// because the dropped 0-RTT packets add a bunch of delay.
|
||||||
|
const WINDOW: Duration = Duration::from_secs(20);
|
||||||
|
for i in 0..5 {
|
||||||
|
let mut client = default_client();
|
||||||
|
let mut server = default_server();
|
||||||
|
connect(&mut client, &mut server);
|
||||||
|
|
||||||
|
let mut now = now();
|
||||||
|
let earlier = now;
|
||||||
|
|
||||||
|
let token = exchange_ticket(&mut client, &mut server, now);
|
||||||
|
|
||||||
|
now += WINDOW;
|
||||||
|
let mut client = default_client();
|
||||||
|
client.enable_resumption(now, token).unwrap();
|
||||||
|
let mut server = resumed_server(&client);
|
||||||
|
let anti_replay = AntiReplay::new(earlier, WINDOW, 1, 3).unwrap();
|
||||||
|
server
|
||||||
|
.server_enable_0rtt(&anti_replay, AllowZeroRtt {})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Make CI/0-RTT
|
||||||
|
let client_stream_id = client.stream_create(StreamType::UniDi).unwrap();
|
||||||
|
client.stream_send(client_stream_id, &[1, 2, 3]).unwrap();
|
||||||
|
let mut ci = client.process_output(now);
|
||||||
|
assert!(ci.as_dgram_ref().is_some());
|
||||||
|
assertions::assert_coalesced_0rtt(&ci.as_dgram_ref().unwrap()[..]);
|
||||||
|
|
||||||
|
// Drop CI/0-RTT a number of times
|
||||||
|
qdebug!("Drop CI/0-RTT {i} extra times");
|
||||||
|
for _ in 0..i {
|
||||||
|
now += client.process_output(now).callback();
|
||||||
|
ci = client.process_output(now);
|
||||||
|
assert!(ci.as_dgram_ref().is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process CI/0-RTT
|
||||||
|
let si = server.process(ci.as_dgram_ref(), now);
|
||||||
|
assert!(si.as_dgram_ref().is_some());
|
||||||
|
|
||||||
|
let server_stream_id = server
|
||||||
|
.events()
|
||||||
|
.find_map(|evt| match evt {
|
||||||
|
ConnectionEvent::NewStream { stream_id } => Some(stream_id),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.expect("should have received a new stream event");
|
||||||
|
assert_eq!(client_stream_id, server_stream_id.as_u64());
|
||||||
|
|
||||||
|
// 0-RTT should be accepted
|
||||||
|
client.process_input(si.as_dgram_ref().unwrap(), now);
|
||||||
|
let recvd_0rtt_reject = |e| e == ConnectionEvent::ZeroRttRejected;
|
||||||
|
assert!(
|
||||||
|
!client.events().any(recvd_0rtt_reject),
|
||||||
|
"rejected 0-RTT after {i} extra dropped packets"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
56
third_party/rust/neqo-transport/src/ecn.rs
vendored
56
third_party/rust/neqo-transport/src/ecn.rs
vendored
@ -12,6 +12,7 @@ use neqo_common::{qdebug, qinfo, qwarn, IpTosEcn};
|
|||||||
use crate::{
|
use crate::{
|
||||||
packet::{PacketNumber, PacketType},
|
packet::{PacketNumber, PacketType},
|
||||||
recovery::SentPacket,
|
recovery::SentPacket,
|
||||||
|
Stats,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The number of packets to use for testing a path for ECN capability.
|
/// The number of packets to use for testing a path for ECN capability.
|
||||||
@ -25,7 +26,7 @@ const ECN_TEST_COUNT_INITIAL_PHASE: usize = 3;
|
|||||||
|
|
||||||
/// The state information related to testing a path for ECN capability.
|
/// The state information related to testing a path for ECN capability.
|
||||||
/// See RFC9000, Appendix A.4.
|
/// See RFC9000, Appendix A.4.
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||||
enum EcnValidationState {
|
enum EcnValidationState {
|
||||||
/// The path is currently being tested for ECN capability, with the number of probes sent so
|
/// The path is currently being tested for ECN capability, with the number of probes sent so
|
||||||
/// far on the path during the ECN validation.
|
/// far on the path during the ECN validation.
|
||||||
@ -50,7 +51,32 @@ impl Default for EcnValidationState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl EcnValidationState {
|
||||||
|
fn set(&mut self, new: Self, stats: &mut Stats) {
|
||||||
|
let old = std::mem::replace(self, new);
|
||||||
|
|
||||||
|
match old {
|
||||||
|
Self::Testing { .. } | Self::Unknown => {}
|
||||||
|
Self::Failed => debug_assert!(false, "Failed is a terminal state"),
|
||||||
|
Self::Capable => stats.ecn_paths_capable -= 1,
|
||||||
|
}
|
||||||
|
match new {
|
||||||
|
Self::Testing { .. } | Self::Unknown => {}
|
||||||
|
Self::Failed => stats.ecn_paths_not_capable += 1,
|
||||||
|
Self::Capable => stats.ecn_paths_capable += 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The counts for different ECN marks.
|
/// The counts for different ECN marks.
|
||||||
|
///
|
||||||
|
/// Note: [`EcnCount`] is used both for outgoing UDP datagrams, returned by
|
||||||
|
/// remote through QUIC ACKs and for incoming UDP datagrams, read from IP TOS
|
||||||
|
/// header. In the former case, given that QUIC ACKs only carry
|
||||||
|
/// [`IpTosEcn::Ect0`], [`IpTosEcn::Ect1`] and [`IpTosEcn::Ce`], but never
|
||||||
|
/// [`IpTosEcn::NotEct`], the [`IpTosEcn::NotEct`] value will always be 0.
|
||||||
|
///
|
||||||
|
/// See also <https://www.rfc-editor.org/rfc/rfc9000.html#section-19.3.2>.
|
||||||
#[derive(PartialEq, Eq, Debug, Clone, Copy, Default)]
|
#[derive(PartialEq, Eq, Debug, Clone, Copy, Default)]
|
||||||
pub struct EcnCount(EnumMap<IpTosEcn, u64>);
|
pub struct EcnCount(EnumMap<IpTosEcn, u64>);
|
||||||
|
|
||||||
@ -126,17 +152,22 @@ impl EcnInfo {
|
|||||||
/// Exit ECN validation if the number of packets sent exceeds `ECN_TEST_COUNT`.
|
/// Exit ECN validation if the number of packets sent exceeds `ECN_TEST_COUNT`.
|
||||||
/// We do not implement the part of the RFC that says to exit ECN validation if the time since
|
/// We do not implement the part of the RFC that says to exit ECN validation if the time since
|
||||||
/// the start of ECN validation exceeds 3 * PTO, since this seems to happen much too quickly.
|
/// the start of ECN validation exceeds 3 * PTO, since this seems to happen much too quickly.
|
||||||
pub fn on_packet_sent(&mut self) {
|
pub fn on_packet_sent(&mut self, stats: &mut Stats) {
|
||||||
if let EcnValidationState::Testing { probes_sent, .. } = &mut self.state {
|
if let EcnValidationState::Testing { probes_sent, .. } = &mut self.state {
|
||||||
*probes_sent += 1;
|
*probes_sent += 1;
|
||||||
qdebug!("ECN probing: sent {} probes", probes_sent);
|
qdebug!("ECN probing: sent {} probes", probes_sent);
|
||||||
if *probes_sent == ECN_TEST_COUNT {
|
if *probes_sent == ECN_TEST_COUNT {
|
||||||
qdebug!("ECN probing concluded with {} probes sent", probes_sent);
|
qdebug!("ECN probing concluded with {} probes sent", probes_sent);
|
||||||
self.state = EcnValidationState::Unknown;
|
self.state.set(EcnValidationState::Unknown, stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Disable ECN.
|
||||||
|
pub fn disable_ecn(&mut self, stats: &mut Stats) {
|
||||||
|
self.state.set(EcnValidationState::Failed, stats);
|
||||||
|
}
|
||||||
|
|
||||||
/// Process ECN counts from an ACK frame.
|
/// Process ECN counts from an ACK frame.
|
||||||
///
|
///
|
||||||
/// Returns whether ECN counts contain new valid ECN CE marks.
|
/// Returns whether ECN counts contain new valid ECN CE marks.
|
||||||
@ -144,16 +175,17 @@ impl EcnInfo {
|
|||||||
&mut self,
|
&mut self,
|
||||||
acked_packets: &[SentPacket],
|
acked_packets: &[SentPacket],
|
||||||
ack_ecn: Option<EcnCount>,
|
ack_ecn: Option<EcnCount>,
|
||||||
|
stats: &mut Stats,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let prev_baseline = self.baseline;
|
let prev_baseline = self.baseline;
|
||||||
|
|
||||||
self.validate_ack_ecn_and_update(acked_packets, ack_ecn);
|
self.validate_ack_ecn_and_update(acked_packets, ack_ecn, stats);
|
||||||
|
|
||||||
matches!(self.state, EcnValidationState::Capable)
|
matches!(self.state, EcnValidationState::Capable)
|
||||||
&& (self.baseline - prev_baseline)[IpTosEcn::Ce] > 0
|
&& (self.baseline - prev_baseline)[IpTosEcn::Ce] > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_packets_lost(&mut self, lost_packets: &[SentPacket]) {
|
pub fn on_packets_lost(&mut self, lost_packets: &[SentPacket], stats: &mut Stats) {
|
||||||
if let EcnValidationState::Testing {
|
if let EcnValidationState::Testing {
|
||||||
probes_sent,
|
probes_sent,
|
||||||
initial_probes_lost: probes_lost,
|
initial_probes_lost: probes_lost,
|
||||||
@ -170,7 +202,7 @@ impl EcnInfo {
|
|||||||
"ECN validation failed, all {} initial marked packets were lost",
|
"ECN validation failed, all {} initial marked packets were lost",
|
||||||
probes_lost
|
probes_lost
|
||||||
);
|
);
|
||||||
self.state = EcnValidationState::Failed;
|
self.disable_ecn(stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -180,6 +212,7 @@ impl EcnInfo {
|
|||||||
&mut self,
|
&mut self,
|
||||||
acked_packets: &[SentPacket],
|
acked_packets: &[SentPacket],
|
||||||
ack_ecn: Option<EcnCount>,
|
ack_ecn: Option<EcnCount>,
|
||||||
|
stats: &mut Stats,
|
||||||
) {
|
) {
|
||||||
// RFC 9000, Appendix A.4:
|
// RFC 9000, Appendix A.4:
|
||||||
//
|
//
|
||||||
@ -212,7 +245,7 @@ impl EcnInfo {
|
|||||||
// > corresponding ECN counts are not present in the ACK frame.
|
// > corresponding ECN counts are not present in the ACK frame.
|
||||||
let Some(ack_ecn) = ack_ecn else {
|
let Some(ack_ecn) = ack_ecn else {
|
||||||
qwarn!("ECN validation failed, no ECN counts in ACK frame");
|
qwarn!("ECN validation failed, no ECN counts in ACK frame");
|
||||||
self.state = EcnValidationState::Failed;
|
self.disable_ecn(stats);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -229,7 +262,7 @@ impl EcnInfo {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
if newly_acked_sent_with_ect0 == 0 {
|
if newly_acked_sent_with_ect0 == 0 {
|
||||||
qwarn!("ECN validation failed, no ECT(0) packets were newly acked");
|
qwarn!("ECN validation failed, no ECT(0) packets were newly acked");
|
||||||
self.state = EcnValidationState::Failed;
|
self.disable_ecn(stats);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let ecn_diff = ack_ecn - self.baseline;
|
let ecn_diff = ack_ecn - self.baseline;
|
||||||
@ -240,15 +273,16 @@ impl EcnInfo {
|
|||||||
sum_inc,
|
sum_inc,
|
||||||
newly_acked_sent_with_ect0
|
newly_acked_sent_with_ect0
|
||||||
);
|
);
|
||||||
self.state = EcnValidationState::Failed;
|
self.disable_ecn(stats);
|
||||||
} else if ecn_diff[IpTosEcn::Ect1] > 0 {
|
} else if ecn_diff[IpTosEcn::Ect1] > 0 {
|
||||||
qwarn!("ECN validation failed, ACK counted ECT(1) marks that were never sent");
|
qwarn!("ECN validation failed, ACK counted ECT(1) marks that were never sent");
|
||||||
self.state = EcnValidationState::Failed;
|
self.disable_ecn(stats);
|
||||||
} else if self.state != EcnValidationState::Capable {
|
} else if self.state != EcnValidationState::Capable {
|
||||||
qinfo!("ECN validation succeeded, path is capable");
|
qinfo!("ECN validation succeeded, path is capable");
|
||||||
self.state = EcnValidationState::Capable;
|
self.state.set(EcnValidationState::Capable, stats);
|
||||||
}
|
}
|
||||||
self.baseline = ack_ecn;
|
self.baseline = ack_ecn;
|
||||||
|
stats.ecn_tx = ack_ecn;
|
||||||
self.largest_acked = largest_acked;
|
self.largest_acked = largest_acked;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
third_party/rust/neqo-transport/src/fc.rs
vendored
2
third_party/rust/neqo-transport/src/fc.rs
vendored
@ -810,7 +810,7 @@ mod test {
|
|||||||
fc[StreamType::BiDi].add_retired(1);
|
fc[StreamType::BiDi].add_retired(1);
|
||||||
fc[StreamType::BiDi].send_flowc_update();
|
fc[StreamType::BiDi].send_flowc_update();
|
||||||
// consume the frame
|
// consume the frame
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
fc[StreamType::BiDi].write_frames(&mut builder, &mut tokens, &mut FrameStats::default());
|
fc[StreamType::BiDi].write_frames(&mut builder, &mut tokens, &mut FrameStats::default());
|
||||||
assert_eq!(tokens.len(), 1);
|
assert_eq!(tokens.len(), 1);
|
||||||
|
@ -149,15 +149,19 @@ impl PacketBuilder {
|
|||||||
///
|
///
|
||||||
/// If, after calling this method, `remaining()` returns 0, then call `abort()` to get
|
/// If, after calling this method, `remaining()` returns 0, then call `abort()` to get
|
||||||
/// the encoder back.
|
/// the encoder back.
|
||||||
pub fn short(mut encoder: Encoder, key_phase: bool, dcid: impl AsRef<[u8]>) -> Self {
|
pub fn short(mut encoder: Encoder, key_phase: bool, dcid: Option<impl AsRef<[u8]>>) -> Self {
|
||||||
let mut limit = Self::infer_limit(&encoder);
|
let mut limit = Self::infer_limit(&encoder);
|
||||||
let header_start = encoder.len();
|
let header_start = encoder.len();
|
||||||
// Check that there is enough space for the header.
|
// Check that there is enough space for the header.
|
||||||
// 5 = 1 (first byte) + 4 (packet number)
|
// 5 = 1 (first byte) + 4 (packet number)
|
||||||
if limit > encoder.len() && 5 + dcid.as_ref().len() < limit - encoder.len() {
|
if limit > encoder.len()
|
||||||
|
&& 5 + dcid.as_ref().map_or(0, |d| d.as_ref().len()) < limit - encoder.len()
|
||||||
|
{
|
||||||
encoder
|
encoder
|
||||||
.encode_byte(PACKET_BIT_SHORT | PACKET_BIT_FIXED_QUIC | (u8::from(key_phase) << 2));
|
.encode_byte(PACKET_BIT_SHORT | PACKET_BIT_FIXED_QUIC | (u8::from(key_phase) << 2));
|
||||||
encoder.encode(dcid.as_ref());
|
if let Some(dcid) = dcid {
|
||||||
|
encoder.encode(dcid.as_ref());
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
limit = 0;
|
limit = 0;
|
||||||
}
|
}
|
||||||
@ -185,20 +189,23 @@ impl PacketBuilder {
|
|||||||
mut encoder: Encoder,
|
mut encoder: Encoder,
|
||||||
pt: PacketType,
|
pt: PacketType,
|
||||||
version: Version,
|
version: Version,
|
||||||
dcid: impl AsRef<[u8]>,
|
mut dcid: Option<impl AsRef<[u8]>>,
|
||||||
scid: impl AsRef<[u8]>,
|
mut scid: Option<impl AsRef<[u8]>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut limit = Self::infer_limit(&encoder);
|
let mut limit = Self::infer_limit(&encoder);
|
||||||
let header_start = encoder.len();
|
let header_start = encoder.len();
|
||||||
// Check that there is enough space for the header.
|
// Check that there is enough space for the header.
|
||||||
// 11 = 1 (first byte) + 4 (version) + 2 (dcid+scid length) + 4 (packet number)
|
// 11 = 1 (first byte) + 4 (version) + 2 (dcid+scid length) + 4 (packet number)
|
||||||
if limit > encoder.len()
|
if limit > encoder.len()
|
||||||
&& 11 + dcid.as_ref().len() + scid.as_ref().len() < limit - encoder.len()
|
&& 11
|
||||||
|
+ dcid.as_ref().map_or(0, |d| d.as_ref().len())
|
||||||
|
+ scid.as_ref().map_or(0, |d| d.as_ref().len())
|
||||||
|
< limit - encoder.len()
|
||||||
{
|
{
|
||||||
encoder.encode_byte(PACKET_BIT_LONG | PACKET_BIT_FIXED_QUIC | pt.to_byte(version) << 4);
|
encoder.encode_byte(PACKET_BIT_LONG | PACKET_BIT_FIXED_QUIC | pt.to_byte(version) << 4);
|
||||||
encoder.encode_uint(4, version.wire_version());
|
encoder.encode_uint(4, version.wire_version());
|
||||||
encoder.encode_vec(1, dcid.as_ref());
|
encoder.encode_vec(1, dcid.take().as_ref().map_or(&[], AsRef::as_ref));
|
||||||
encoder.encode_vec(1, scid.as_ref());
|
encoder.encode_vec(1, scid.take().as_ref().map_or(&[], AsRef::as_ref));
|
||||||
} else {
|
} else {
|
||||||
limit = 0;
|
limit = 0;
|
||||||
}
|
}
|
||||||
@ -994,8 +1001,8 @@ mod tests {
|
|||||||
Encoder::new(),
|
Encoder::new(),
|
||||||
PacketType::Initial,
|
PacketType::Initial,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
ConnectionId::from(&[][..]),
|
None::<&[u8]>,
|
||||||
ConnectionId::from(SERVER_CID),
|
Some(ConnectionId::from(SERVER_CID)),
|
||||||
);
|
);
|
||||||
builder.initial_token(&[]);
|
builder.initial_token(&[]);
|
||||||
builder.pn(1, 2);
|
builder.pn(1, 2);
|
||||||
@ -1058,7 +1065,7 @@ mod tests {
|
|||||||
fn build_short() {
|
fn build_short() {
|
||||||
fixture_init();
|
fixture_init();
|
||||||
let mut builder =
|
let mut builder =
|
||||||
PacketBuilder::short(Encoder::new(), true, ConnectionId::from(SERVER_CID));
|
PacketBuilder::short(Encoder::new(), true, Some(ConnectionId::from(SERVER_CID)));
|
||||||
builder.pn(0, 1);
|
builder.pn(0, 1);
|
||||||
builder.encode(SAMPLE_SHORT_PAYLOAD); // Enough payload for sampling.
|
builder.encode(SAMPLE_SHORT_PAYLOAD); // Enough payload for sampling.
|
||||||
let packet = builder
|
let packet = builder
|
||||||
@ -1073,7 +1080,7 @@ mod tests {
|
|||||||
let mut firsts = Vec::new();
|
let mut firsts = Vec::new();
|
||||||
for _ in 0..64 {
|
for _ in 0..64 {
|
||||||
let mut builder =
|
let mut builder =
|
||||||
PacketBuilder::short(Encoder::new(), true, ConnectionId::from(SERVER_CID));
|
PacketBuilder::short(Encoder::new(), true, Some(ConnectionId::from(SERVER_CID)));
|
||||||
builder.scramble(true);
|
builder.scramble(true);
|
||||||
builder.pn(0, 1);
|
builder.pn(0, 1);
|
||||||
firsts.push(builder.as_ref()[0]);
|
firsts.push(builder.as_ref()[0]);
|
||||||
@ -1136,8 +1143,8 @@ mod tests {
|
|||||||
Encoder::new(),
|
Encoder::new(),
|
||||||
PacketType::Handshake,
|
PacketType::Handshake,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
ConnectionId::from(SERVER_CID),
|
Some(ConnectionId::from(SERVER_CID)),
|
||||||
ConnectionId::from(CLIENT_CID),
|
Some(ConnectionId::from(CLIENT_CID)),
|
||||||
);
|
);
|
||||||
builder.pn(0, 1);
|
builder.pn(0, 1);
|
||||||
builder.encode(&[0; 3]);
|
builder.encode(&[0; 3]);
|
||||||
@ -1145,7 +1152,8 @@ mod tests {
|
|||||||
assert_eq!(encoder.len(), 45);
|
assert_eq!(encoder.len(), 45);
|
||||||
let first = encoder.clone();
|
let first = encoder.clone();
|
||||||
|
|
||||||
let mut builder = PacketBuilder::short(encoder, false, ConnectionId::from(SERVER_CID));
|
let mut builder =
|
||||||
|
PacketBuilder::short(encoder, false, Some(ConnectionId::from(SERVER_CID)));
|
||||||
builder.pn(1, 3);
|
builder.pn(1, 3);
|
||||||
builder.encode(&[0]); // Minimal size (packet number is big enough).
|
builder.encode(&[0]); // Minimal size (packet number is big enough).
|
||||||
let encoder = builder.build(&mut prot).expect("build");
|
let encoder = builder.build(&mut prot).expect("build");
|
||||||
@ -1170,8 +1178,8 @@ mod tests {
|
|||||||
Encoder::new(),
|
Encoder::new(),
|
||||||
PacketType::Handshake,
|
PacketType::Handshake,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
ConnectionId::from(&[][..]),
|
None::<&[u8]>,
|
||||||
ConnectionId::from(&[][..]),
|
None::<&[u8]>,
|
||||||
);
|
);
|
||||||
builder.pn(0, 1);
|
builder.pn(0, 1);
|
||||||
builder.encode(&[1, 2, 3]);
|
builder.encode(&[1, 2, 3]);
|
||||||
@ -1189,8 +1197,8 @@ mod tests {
|
|||||||
Encoder::new(),
|
Encoder::new(),
|
||||||
PacketType::Handshake,
|
PacketType::Handshake,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
ConnectionId::from(&[][..]),
|
None::<&[u8]>,
|
||||||
ConnectionId::from(&[][..]),
|
None::<&[u8]>,
|
||||||
);
|
);
|
||||||
builder.pn(0, 1);
|
builder.pn(0, 1);
|
||||||
builder.scramble(true);
|
builder.scramble(true);
|
||||||
@ -1210,8 +1218,8 @@ mod tests {
|
|||||||
Encoder::new(),
|
Encoder::new(),
|
||||||
PacketType::Initial,
|
PacketType::Initial,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
ConnectionId::from(&[][..]),
|
None::<&[u8]>,
|
||||||
ConnectionId::from(SERVER_CID),
|
Some(ConnectionId::from(SERVER_CID)),
|
||||||
);
|
);
|
||||||
assert_ne!(builder.remaining(), 0);
|
assert_ne!(builder.remaining(), 0);
|
||||||
builder.initial_token(&[]);
|
builder.initial_token(&[]);
|
||||||
@ -1229,7 +1237,7 @@ mod tests {
|
|||||||
let mut builder = PacketBuilder::short(
|
let mut builder = PacketBuilder::short(
|
||||||
Encoder::with_capacity(100),
|
Encoder::with_capacity(100),
|
||||||
true,
|
true,
|
||||||
ConnectionId::from(SERVER_CID),
|
Some(ConnectionId::from(SERVER_CID)),
|
||||||
);
|
);
|
||||||
builder.pn(0, 1);
|
builder.pn(0, 1);
|
||||||
// Pad, but not up to the full capacity. Leave enough space for the
|
// Pad, but not up to the full capacity. Leave enough space for the
|
||||||
@ -1244,8 +1252,8 @@ mod tests {
|
|||||||
encoder,
|
encoder,
|
||||||
PacketType::Initial,
|
PacketType::Initial,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
ConnectionId::from(SERVER_CID),
|
Some(ConnectionId::from(SERVER_CID)),
|
||||||
ConnectionId::from(SERVER_CID),
|
Some(ConnectionId::from(SERVER_CID)),
|
||||||
);
|
);
|
||||||
assert_eq!(builder.remaining(), 0);
|
assert_eq!(builder.remaining(), 0);
|
||||||
assert_eq!(builder.abort(), encoder_copy);
|
assert_eq!(builder.abort(), encoder_copy);
|
||||||
|
89
third_party/rust/neqo-transport/src/path.rs
vendored
89
third_party/rust/neqo-transport/src/path.rs
vendored
@ -15,7 +15,7 @@ use std::{
|
|||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos};
|
use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos, IpTosEcn};
|
||||||
use neqo_crypto::random;
|
use neqo_crypto::random;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -35,7 +35,10 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// The number of times that a path will be probed before it is considered failed.
|
/// The number of times that a path will be probed before it is considered failed.
|
||||||
const MAX_PATH_PROBES: usize = 3;
|
///
|
||||||
|
/// Note that with [`crate::ecn`], a path is probed [`MAX_PATH_PROBES`] with ECN
|
||||||
|
/// marks and [`MAX_PATH_PROBES`] without.
|
||||||
|
pub const MAX_PATH_PROBES: usize = 3;
|
||||||
/// The maximum number of paths that `Paths` will track.
|
/// The maximum number of paths that `Paths` will track.
|
||||||
const MAX_PATHS: usize = 15;
|
const MAX_PATHS: usize = 15;
|
||||||
|
|
||||||
@ -225,7 +228,13 @@ impl Paths {
|
|||||||
/// Otherwise, migration will occur after probing succeeds.
|
/// Otherwise, migration will occur after probing succeeds.
|
||||||
/// The path is always probed and will be abandoned if probing fails.
|
/// The path is always probed and will be abandoned if probing fails.
|
||||||
/// Returns `true` if the path was migrated.
|
/// Returns `true` if the path was migrated.
|
||||||
pub fn migrate(&mut self, path: &PathRef, force: bool, now: Instant) -> bool {
|
pub fn migrate(
|
||||||
|
&mut self,
|
||||||
|
path: &PathRef,
|
||||||
|
force: bool,
|
||||||
|
now: Instant,
|
||||||
|
stats: &mut Stats,
|
||||||
|
) -> bool {
|
||||||
debug_assert!(!self.is_temporary(path));
|
debug_assert!(!self.is_temporary(path));
|
||||||
let baseline = self.primary().map_or_else(
|
let baseline = self.primary().map_or_else(
|
||||||
|| EcnInfo::default().baseline(),
|
|| EcnInfo::default().baseline(),
|
||||||
@ -239,7 +248,7 @@ impl Paths {
|
|||||||
} else {
|
} else {
|
||||||
self.migration_target = Some(Rc::clone(path));
|
self.migration_target = Some(Rc::clone(path));
|
||||||
}
|
}
|
||||||
path.borrow_mut().probe();
|
path.borrow_mut().probe(stats);
|
||||||
self.migration_target.is_none()
|
self.migration_target.is_none()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,11 +257,11 @@ impl Paths {
|
|||||||
///
|
///
|
||||||
/// TODO(mt) - the paths should own the RTT estimator, so they can find the PTO
|
/// TODO(mt) - the paths should own the RTT estimator, so they can find the PTO
|
||||||
/// for themselves.
|
/// for themselves.
|
||||||
pub fn process_timeout(&mut self, now: Instant, pto: Duration) -> bool {
|
pub fn process_timeout(&mut self, now: Instant, pto: Duration, stats: &mut Stats) -> bool {
|
||||||
let to_retire = &mut self.to_retire;
|
let to_retire = &mut self.to_retire;
|
||||||
let mut primary_failed = false;
|
let mut primary_failed = false;
|
||||||
self.paths.retain(|p| {
|
self.paths.retain(|p| {
|
||||||
if p.borrow_mut().process_timeout(now, pto) {
|
if p.borrow_mut().process_timeout(now, pto, stats) {
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
qdebug!([p.borrow()], "Retiring path");
|
qdebug!([p.borrow()], "Retiring path");
|
||||||
@ -301,7 +310,13 @@ impl Paths {
|
|||||||
|
|
||||||
/// Set the identified path to be primary.
|
/// Set the identified path to be primary.
|
||||||
/// This panics if `make_permanent` hasn't been called.
|
/// This panics if `make_permanent` hasn't been called.
|
||||||
pub fn handle_migration(&mut self, path: &PathRef, remote: SocketAddr, now: Instant) {
|
pub fn handle_migration(
|
||||||
|
&mut self,
|
||||||
|
path: &PathRef,
|
||||||
|
remote: SocketAddr,
|
||||||
|
now: Instant,
|
||||||
|
stats: &mut Stats,
|
||||||
|
) {
|
||||||
// The update here needs to match the checks in `Path::received_on`.
|
// The update here needs to match the checks in `Path::received_on`.
|
||||||
// Here, we update the remote port number to match the source port on the
|
// Here, we update the remote port number to match the source port on the
|
||||||
// datagram that was received. This ensures that we send subsequent
|
// datagram that was received. This ensures that we send subsequent
|
||||||
@ -316,7 +331,7 @@ impl Paths {
|
|||||||
|
|
||||||
if let Some(old_path) = self.select_primary(path) {
|
if let Some(old_path) = self.select_primary(path) {
|
||||||
// Need to probe the old path if the peer migrates.
|
// Need to probe the old path if the peer migrates.
|
||||||
old_path.borrow_mut().probe();
|
old_path.borrow_mut().probe(stats);
|
||||||
// TODO(mt) - suppress probing if the path was valid within 3PTO.
|
// TODO(mt) - suppress probing if the path was valid within 3PTO.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -339,11 +354,11 @@ impl Paths {
|
|||||||
/// A `PATH_RESPONSE` was received.
|
/// A `PATH_RESPONSE` was received.
|
||||||
/// Returns `true` if migration occurred.
|
/// Returns `true` if migration occurred.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn path_response(&mut self, response: [u8; 8], now: Instant) -> bool {
|
pub fn path_response(&mut self, response: [u8; 8], now: Instant, stats: &mut Stats) -> bool {
|
||||||
// TODO(mt) consider recording an RTT measurement here as we don't train
|
// TODO(mt) consider recording an RTT measurement here as we don't train
|
||||||
// RTT for non-primary paths.
|
// RTT for non-primary paths.
|
||||||
for p in &self.paths {
|
for p in &self.paths {
|
||||||
if p.borrow_mut().path_response(response, now) {
|
if p.borrow_mut().path_response(response, now, stats) {
|
||||||
// The response was accepted. If this path is one we intend
|
// The response was accepted. If this path is one we intend
|
||||||
// to migrate to, then migrate.
|
// to migrate to, then migrate.
|
||||||
if self
|
if self
|
||||||
@ -452,10 +467,10 @@ impl Paths {
|
|||||||
// make a new RTT esimate and interrogate that.
|
// make a new RTT esimate and interrogate that.
|
||||||
// That is more expensive, but it should be rare and breaking encapsulation
|
// That is more expensive, but it should be rare and breaking encapsulation
|
||||||
// is worse, especially as this is only used in tests.
|
// is worse, especially as this is only used in tests.
|
||||||
self.primary()
|
self.primary().map_or_else(
|
||||||
.map_or(RttEstimate::default().estimate(), |p| {
|
|| RttEstimate::default().estimate(),
|
||||||
p.borrow().rtt().estimate()
|
|p| p.borrow().rtt().estimate(),
|
||||||
})
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_qlog(&mut self, qlog: NeqoQlog) {
|
pub fn set_qlog(&mut self, qlog: NeqoQlog) {
|
||||||
@ -660,8 +675,8 @@ impl Path {
|
|||||||
|
|
||||||
/// Get the first local connection ID.
|
/// Get the first local connection ID.
|
||||||
/// Only do this for the primary path during the handshake.
|
/// Only do this for the primary path during the handshake.
|
||||||
pub fn local_cid(&self) -> &ConnectionId {
|
pub const fn local_cid(&self) -> Option<&ConnectionId> {
|
||||||
self.local_cid.as_ref().unwrap()
|
self.local_cid.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the remote connection ID based on the peer's choice.
|
/// Set the remote connection ID based on the peer's choice.
|
||||||
@ -674,8 +689,10 @@ impl Path {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Access the remote connection ID.
|
/// Access the remote connection ID.
|
||||||
pub fn remote_cid(&self) -> &ConnectionId {
|
pub fn remote_cid(&self) -> Option<&ConnectionId> {
|
||||||
self.remote_cid.as_ref().unwrap().connection_id()
|
self.remote_cid
|
||||||
|
.as_ref()
|
||||||
|
.map(super::cid::ConnectionIdEntry::connection_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the stateless reset token for the connection ID that is currently in use.
|
/// Set the stateless reset token for the connection ID that is currently in use.
|
||||||
@ -696,12 +713,12 @@ impl Path {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Make a datagram.
|
/// Make a datagram.
|
||||||
pub fn datagram<V: Into<Vec<u8>>>(&mut self, payload: V) -> Datagram {
|
pub fn datagram<V: Into<Vec<u8>>>(&mut self, payload: V, stats: &mut Stats) -> Datagram {
|
||||||
// Make sure to use the TOS value from before calling EcnInfo::on_packet_sent, which may
|
// Make sure to use the TOS value from before calling EcnInfo::on_packet_sent, which may
|
||||||
// update the ECN state and can hence change it - this packet should still be sent
|
// update the ECN state and can hence change it - this packet should still be sent
|
||||||
// with the current value.
|
// with the current value.
|
||||||
let tos = self.tos();
|
let tos = self.tos();
|
||||||
self.ecn_info.on_packet_sent();
|
self.ecn_info.on_packet_sent(stats);
|
||||||
Datagram::new(self.local, self.remote, tos, payload)
|
Datagram::new(self.local, self.remote, tos, payload)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -721,14 +738,14 @@ impl Path {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handle a `PATH_RESPONSE` frame. Returns true if the response was accepted.
|
/// Handle a `PATH_RESPONSE` frame. Returns true if the response was accepted.
|
||||||
pub fn path_response(&mut self, response: [u8; 8], now: Instant) -> bool {
|
pub fn path_response(&mut self, response: [u8; 8], now: Instant, stats: &mut Stats) -> bool {
|
||||||
if let ProbeState::Probing { data, mtu, .. } = &mut self.state {
|
if let ProbeState::Probing { data, mtu, .. } = &mut self.state {
|
||||||
if response == *data {
|
if response == *data {
|
||||||
let need_full_probe = !*mtu;
|
let need_full_probe = !*mtu;
|
||||||
self.set_valid(now);
|
self.set_valid(now);
|
||||||
if need_full_probe {
|
if need_full_probe {
|
||||||
qdebug!([self], "Sub-MTU probe successful, reset probe count");
|
qdebug!([self], "Sub-MTU probe successful, reset probe count");
|
||||||
self.probe();
|
self.probe(stats);
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
@ -747,15 +764,25 @@ impl Path {
|
|||||||
|
|
||||||
/// At the next opportunity, send a probe.
|
/// At the next opportunity, send a probe.
|
||||||
/// If the probe count has been exhausted already, marks the path as failed.
|
/// If the probe count has been exhausted already, marks the path as failed.
|
||||||
fn probe(&mut self) {
|
fn probe(&mut self, stats: &mut Stats) {
|
||||||
let probe_count = match &self.state {
|
let probe_count = match &self.state {
|
||||||
ProbeState::Probing { probe_count, .. } => *probe_count + 1,
|
ProbeState::Probing { probe_count, .. } => *probe_count + 1,
|
||||||
ProbeState::ProbeNeeded { probe_count, .. } => *probe_count,
|
ProbeState::ProbeNeeded { probe_count, .. } => *probe_count,
|
||||||
_ => 0,
|
_ => 0,
|
||||||
};
|
};
|
||||||
self.state = if probe_count >= MAX_PATH_PROBES {
|
self.state = if probe_count >= MAX_PATH_PROBES {
|
||||||
qinfo!([self], "Probing failed");
|
if self.ecn_info.ecn_mark() == IpTosEcn::Ect0 {
|
||||||
ProbeState::Failed
|
// The path validation failure may be due to ECN blackholing, try again without ECN.
|
||||||
|
qinfo!(
|
||||||
|
[self],
|
||||||
|
"Possible ECN blackhole, disabling ECN and re-probing path"
|
||||||
|
);
|
||||||
|
self.ecn_info.disable_ecn(stats);
|
||||||
|
ProbeState::ProbeNeeded { probe_count: 0 }
|
||||||
|
} else {
|
||||||
|
qinfo!([self], "Probing failed");
|
||||||
|
ProbeState::Failed
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
qdebug!([self], "Initiating probe");
|
qdebug!([self], "Initiating probe");
|
||||||
ProbeState::ProbeNeeded { probe_count }
|
ProbeState::ProbeNeeded { probe_count }
|
||||||
@ -839,10 +866,10 @@ impl Path {
|
|||||||
|
|
||||||
/// Process a timer for this path.
|
/// Process a timer for this path.
|
||||||
/// This returns true if the path is viable and can be kept alive.
|
/// This returns true if the path is viable and can be kept alive.
|
||||||
pub fn process_timeout(&mut self, now: Instant, pto: Duration) -> bool {
|
pub fn process_timeout(&mut self, now: Instant, pto: Duration, stats: &mut Stats) -> bool {
|
||||||
if let ProbeState::Probing { sent, .. } = &self.state {
|
if let ProbeState::Probing { sent, .. } = &self.state {
|
||||||
if now >= *sent + pto {
|
if now >= *sent + pto {
|
||||||
self.probe();
|
self.probe(stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if matches!(self.state, ProbeState::Failed) {
|
if matches!(self.state, ProbeState::Failed) {
|
||||||
@ -853,9 +880,9 @@ impl Path {
|
|||||||
true
|
true
|
||||||
} else if matches!(self.state, ProbeState::Valid) {
|
} else if matches!(self.state, ProbeState::Valid) {
|
||||||
// Retire validated, non-primary paths.
|
// Retire validated, non-primary paths.
|
||||||
// Allow more than `MAX_PATH_PROBES` times the PTO so that an old
|
// Allow more than 2* `MAX_PATH_PROBES` times the PTO so that an old
|
||||||
// path remains around until after a previous path fails.
|
// path remains around until after a previous path fails.
|
||||||
let count = u32::try_from(MAX_PATH_PROBES + 1).unwrap();
|
let count = u32::try_from(2 * MAX_PATH_PROBES + 1).unwrap();
|
||||||
self.validated.unwrap() + (pto * count) > now
|
self.validated.unwrap() + (pto * count) > now
|
||||||
} else {
|
} else {
|
||||||
// Keep paths that are being actively probed.
|
// Keep paths that are being actively probed.
|
||||||
@ -978,7 +1005,7 @@ impl Path {
|
|||||||
) {
|
) {
|
||||||
debug_assert!(self.is_primary());
|
debug_assert!(self.is_primary());
|
||||||
|
|
||||||
let ecn_ce_received = self.ecn_info.on_packets_acked(acked_pkts, ack_ecn);
|
let ecn_ce_received = self.ecn_info.on_packets_acked(acked_pkts, ack_ecn, stats);
|
||||||
if ecn_ce_received {
|
if ecn_ce_received {
|
||||||
let cwnd_reduced = self
|
let cwnd_reduced = self
|
||||||
.sender
|
.sender
|
||||||
@ -1002,7 +1029,7 @@ impl Path {
|
|||||||
now: Instant,
|
now: Instant,
|
||||||
) {
|
) {
|
||||||
debug_assert!(self.is_primary());
|
debug_assert!(self.is_primary());
|
||||||
self.ecn_info.on_packets_lost(lost_packets);
|
self.ecn_info.on_packets_lost(lost_packets, stats);
|
||||||
let cwnd_reduced = self.sender.on_packets_lost(
|
let cwnd_reduced = self.sender.on_packets_lost(
|
||||||
self.rtt.first_sample_time(),
|
self.rtt.first_sample_time(),
|
||||||
prev_largest_acked_sent,
|
prev_largest_acked_sent,
|
||||||
|
2
third_party/rust/neqo-transport/src/pmtud.rs
vendored
2
third_party/rust/neqo-transport/src/pmtud.rs
vendored
@ -383,7 +383,7 @@ mod tests {
|
|||||||
let stats_before = stats.clone();
|
let stats_before = stats.clone();
|
||||||
|
|
||||||
// Fake a packet number, so the builder logic works.
|
// Fake a packet number, so the builder logic works.
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let pn = prot.next_pn();
|
let pn = prot.next_pn();
|
||||||
builder.pn(pn, 4);
|
builder.pn(pn, 4);
|
||||||
builder.set_initial_limit(&SendProfile::new_limited(pmtud.plpmtu()), 16, pmtud);
|
builder.set_initial_limit(&SendProfile::new_limited(pmtud.plpmtu()), 16, pmtud);
|
||||||
|
4
third_party/rust/neqo-transport/src/qlog.rs
vendored
4
third_party/rust/neqo-transport/src/qlog.rs
vendored
@ -104,8 +104,8 @@ fn connection_started(qlog: &NeqoQlog, path: &PathRef) {
|
|||||||
protocol: Some("QUIC".into()),
|
protocol: Some("QUIC".into()),
|
||||||
src_port: p.local_address().port().into(),
|
src_port: p.local_address().port().into(),
|
||||||
dst_port: p.remote_address().port().into(),
|
dst_port: p.remote_address().port().into(),
|
||||||
src_cid: Some(format!("{}", p.local_cid())),
|
src_cid: p.local_cid().map(ToString::to_string),
|
||||||
dst_cid: Some(format!("{}", p.remote_cid())),
|
dst_cid: p.remote_cid().map(ToString::to_string),
|
||||||
});
|
});
|
||||||
|
|
||||||
Some(ev_data)
|
Some(ev_data)
|
||||||
|
@ -16,10 +16,10 @@ use std::{
|
|||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use enum_map::{enum_map, EnumMap};
|
||||||
use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn};
|
use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn};
|
||||||
pub use sent::SentPacket;
|
pub use sent::SentPacket;
|
||||||
use sent::SentPackets;
|
use sent::SentPackets;
|
||||||
use smallvec::{smallvec, SmallVec};
|
|
||||||
pub use token::{RecoveryToken, StreamRecoveryToken};
|
pub use token::{RecoveryToken, StreamRecoveryToken};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -361,20 +361,10 @@ impl LossRecoverySpace {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct LossRecoverySpaces {
|
pub struct LossRecoverySpaces {
|
||||||
/// When we have all of the loss recovery spaces, this will use a separate
|
spaces: EnumMap<PacketNumberSpace, Option<LossRecoverySpace>>,
|
||||||
/// allocation, but this is reduced once the handshake is done.
|
|
||||||
spaces: SmallVec<[LossRecoverySpace; 1]>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LossRecoverySpaces {
|
impl LossRecoverySpaces {
|
||||||
const fn idx(space: PacketNumberSpace) -> usize {
|
|
||||||
match space {
|
|
||||||
PacketNumberSpace::ApplicationData => 0,
|
|
||||||
PacketNumberSpace::Handshake => 1,
|
|
||||||
PacketNumberSpace::Initial => 2,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Drop a packet number space and return all the packets that were
|
/// Drop a packet number space and return all the packets that were
|
||||||
/// outstanding, so that those can be marked as lost.
|
/// outstanding, so that those can be marked as lost.
|
||||||
///
|
///
|
||||||
@ -382,45 +372,42 @@ impl LossRecoverySpaces {
|
|||||||
///
|
///
|
||||||
/// If the space has already been removed.
|
/// If the space has already been removed.
|
||||||
pub fn drop_space(&mut self, space: PacketNumberSpace) -> impl IntoIterator<Item = SentPacket> {
|
pub fn drop_space(&mut self, space: PacketNumberSpace) -> impl IntoIterator<Item = SentPacket> {
|
||||||
let sp = match space {
|
let sp = self.spaces[space].take();
|
||||||
PacketNumberSpace::Initial => self.spaces.pop(),
|
assert_ne!(
|
||||||
PacketNumberSpace::Handshake => {
|
space,
|
||||||
let sp = self.spaces.pop();
|
PacketNumberSpace::ApplicationData,
|
||||||
self.spaces.shrink_to_fit();
|
"discarding application space"
|
||||||
sp
|
);
|
||||||
}
|
sp.unwrap().remove_ignored()
|
||||||
PacketNumberSpace::ApplicationData => panic!("discarding application space"),
|
|
||||||
};
|
|
||||||
let mut sp = sp.unwrap();
|
|
||||||
assert_eq!(sp.space(), space, "dropping spaces out of order");
|
|
||||||
sp.remove_ignored()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&self, space: PacketNumberSpace) -> Option<&LossRecoverySpace> {
|
pub fn get(&self, space: PacketNumberSpace) -> Option<&LossRecoverySpace> {
|
||||||
self.spaces.get(Self::idx(space))
|
self.spaces[space].as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_mut(&mut self, space: PacketNumberSpace) -> Option<&mut LossRecoverySpace> {
|
pub fn get_mut(&mut self, space: PacketNumberSpace) -> Option<&mut LossRecoverySpace> {
|
||||||
self.spaces.get_mut(Self::idx(space))
|
self.spaces[space].as_mut()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self) -> impl Iterator<Item = &LossRecoverySpace> {
|
fn iter(&self) -> impl Iterator<Item = &LossRecoverySpace> {
|
||||||
self.spaces.iter()
|
self.spaces.iter().filter_map(|(_, recvd)| recvd.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_mut(&mut self) -> impl Iterator<Item = &mut LossRecoverySpace> {
|
fn iter_mut(&mut self) -> impl Iterator<Item = &mut LossRecoverySpace> {
|
||||||
self.spaces.iter_mut()
|
self.spaces
|
||||||
|
.iter_mut()
|
||||||
|
.filter_map(|(_, recvd)| recvd.as_mut())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for LossRecoverySpaces {
|
impl Default for LossRecoverySpaces {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
spaces: smallvec![
|
spaces: enum_map! {
|
||||||
LossRecoverySpace::new(PacketNumberSpace::ApplicationData),
|
PacketNumberSpace::Initial => Some(LossRecoverySpace::new(PacketNumberSpace::Initial)),
|
||||||
LossRecoverySpace::new(PacketNumberSpace::Handshake),
|
PacketNumberSpace::Handshake => Some(LossRecoverySpace::new(PacketNumberSpace::Handshake)),
|
||||||
LossRecoverySpace::new(PacketNumberSpace::Initial),
|
PacketNumberSpace::ApplicationData =>Some(LossRecoverySpace::new(PacketNumberSpace::ApplicationData)),
|
||||||
],
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -439,32 +426,33 @@ struct PtoState {
|
|||||||
impl PtoState {
|
impl PtoState {
|
||||||
/// The number of packets we send on a PTO.
|
/// The number of packets we send on a PTO.
|
||||||
/// And the number to declare lost when the PTO timer is hit.
|
/// And the number to declare lost when the PTO timer is hit.
|
||||||
fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize {
|
fn pto_packet_count(space: PacketNumberSpace) -> usize {
|
||||||
if space == PacketNumberSpace::Initial && rx_count == 0 {
|
if space == PacketNumberSpace::ApplicationData {
|
||||||
// For the Initial space, we only send one packet on PTO if we have not received any
|
|
||||||
// packets from the peer yet. This avoids sending useless PING-only packets
|
|
||||||
// when the Client Initial is deemed lost.
|
|
||||||
1
|
|
||||||
} else {
|
|
||||||
MAX_PTO_PACKET_COUNT
|
MAX_PTO_PACKET_COUNT
|
||||||
|
} else {
|
||||||
|
// For the Initial and Handshake spaces, we only send one packet on PTO. This avoids
|
||||||
|
// sending useless PING-only packets when only a single packet was lost, which is the
|
||||||
|
// common case. These PINGs use cwnd and amplification window space, and sending them
|
||||||
|
// hence makes the handshake more brittle.
|
||||||
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) -> Self {
|
pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet) -> Self {
|
||||||
debug_assert!(probe[space]);
|
debug_assert!(probe[space]);
|
||||||
Self {
|
Self {
|
||||||
space,
|
space,
|
||||||
count: 1,
|
count: 1,
|
||||||
packets: Self::pto_packet_count(space, rx_count),
|
packets: Self::pto_packet_count(space),
|
||||||
probe,
|
probe,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) {
|
pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet) {
|
||||||
debug_assert!(probe[space]);
|
debug_assert!(probe[space]);
|
||||||
self.space = space;
|
self.space = space;
|
||||||
self.count += 1;
|
self.count += 1;
|
||||||
self.packets = Self::pto_packet_count(space, rx_count);
|
self.packets = Self::pto_packet_count(space);
|
||||||
self.probe = probe;
|
self.probe = probe;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,7 +534,7 @@ impl LossRecovery {
|
|||||||
|
|
||||||
pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) {
|
pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) {
|
||||||
let pn_space = PacketNumberSpace::from(sent_packet.packet_type());
|
let pn_space = PacketNumberSpace::from(sent_packet.packet_type());
|
||||||
qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn());
|
qtrace!([self], "packet {}-{} sent", pn_space, sent_packet.pn());
|
||||||
if let Some(space) = self.spaces.get_mut(pn_space) {
|
if let Some(space) = self.spaces.get_mut(pn_space) {
|
||||||
path.borrow_mut().packet_sent(&mut sent_packet);
|
path.borrow_mut().packet_sent(&mut sent_packet);
|
||||||
space.on_packet_sent(sent_packet);
|
space.on_packet_sent(sent_packet);
|
||||||
@ -816,11 +804,10 @@ impl LossRecovery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn fire_pto(&mut self, pn_space: PacketNumberSpace, allow_probes: PacketNumberSpaceSet) {
|
fn fire_pto(&mut self, pn_space: PacketNumberSpace, allow_probes: PacketNumberSpaceSet) {
|
||||||
let rx_count = self.stats.borrow().packets_rx;
|
|
||||||
if let Some(st) = &mut self.pto_state {
|
if let Some(st) = &mut self.pto_state {
|
||||||
st.pto(pn_space, allow_probes, rx_count);
|
st.pto(pn_space, allow_probes);
|
||||||
} else {
|
} else {
|
||||||
self.pto_state = Some(PtoState::new(pn_space, allow_probes, rx_count));
|
self.pto_state = Some(PtoState::new(pn_space, allow_probes));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pto_state
|
self.pto_state
|
||||||
@ -852,10 +839,7 @@ impl LossRecovery {
|
|||||||
let space = self.spaces.get_mut(*pn_space).unwrap();
|
let space = self.spaces.get_mut(*pn_space).unwrap();
|
||||||
lost.extend(
|
lost.extend(
|
||||||
space
|
space
|
||||||
.pto_packets(PtoState::pto_packet_count(
|
.pto_packets(PtoState::pto_packet_count(*pn_space))
|
||||||
*pn_space,
|
|
||||||
self.stats.borrow().packets_rx,
|
|
||||||
))
|
|
||||||
.cloned(),
|
.cloned(),
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -906,7 +890,7 @@ impl LossRecovery {
|
|||||||
/// what the current congestion window is, and what the pacer says.
|
/// what the current congestion window is, and what the pacer says.
|
||||||
#[allow(clippy::option_if_let_else)]
|
#[allow(clippy::option_if_let_else)]
|
||||||
pub fn send_profile(&mut self, path: &Path, now: Instant) -> SendProfile {
|
pub fn send_profile(&mut self, path: &Path, now: Instant) -> SendProfile {
|
||||||
qdebug!([self], "get send profile {:?}", now);
|
qtrace!([self], "get send profile {:?}", now);
|
||||||
let sender = path.sender();
|
let sender = path.sender();
|
||||||
let mtu = path.plpmtu();
|
let mtu = path.plpmtu();
|
||||||
if let Some(profile) = self
|
if let Some(profile) = self
|
||||||
@ -1382,13 +1366,6 @@ mod tests {
|
|||||||
lr.discard(PacketNumberSpace::ApplicationData, now());
|
lr.discard(PacketNumberSpace::ApplicationData, now());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic(expected = "dropping spaces out of order")]
|
|
||||||
fn drop_out_of_order() {
|
|
||||||
let mut lr = Fixture::default();
|
|
||||||
lr.discard(PacketNumberSpace::Handshake, now());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ack_after_drop() {
|
fn ack_after_drop() {
|
||||||
let mut lr = Fixture::default();
|
let mut lr = Fixture::default();
|
||||||
|
@ -1483,7 +1483,7 @@ mod tests {
|
|||||||
assert!(s.has_frames_to_write());
|
assert!(s.has_frames_to_write());
|
||||||
|
|
||||||
// consume it
|
// consume it
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut token = Vec::new();
|
let mut token = Vec::new();
|
||||||
s.write_frame(&mut builder, &mut token, &mut FrameStats::default());
|
s.write_frame(&mut builder, &mut token, &mut FrameStats::default());
|
||||||
|
|
||||||
@ -1597,7 +1597,7 @@ mod tests {
|
|||||||
s.read(&mut buf).unwrap();
|
s.read(&mut buf).unwrap();
|
||||||
assert!(session_fc.borrow().frame_needed());
|
assert!(session_fc.borrow().frame_needed());
|
||||||
// consume it
|
// consume it
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut token = Vec::new();
|
let mut token = Vec::new();
|
||||||
session_fc
|
session_fc
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
@ -1618,7 +1618,7 @@ mod tests {
|
|||||||
s.read(&mut buf).unwrap();
|
s.read(&mut buf).unwrap();
|
||||||
assert!(session_fc.borrow().frame_needed());
|
assert!(session_fc.borrow().frame_needed());
|
||||||
// consume it
|
// consume it
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut token = Vec::new();
|
let mut token = Vec::new();
|
||||||
session_fc
|
session_fc
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
@ -1866,7 +1866,7 @@ mod tests {
|
|||||||
assert!(s.fc().unwrap().frame_needed());
|
assert!(s.fc().unwrap().frame_needed());
|
||||||
|
|
||||||
// Write the fc update frame
|
// Write the fc update frame
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut token = Vec::new();
|
let mut token = Vec::new();
|
||||||
let mut stats = FrameStats::default();
|
let mut stats = FrameStats::default();
|
||||||
fc.borrow_mut()
|
fc.borrow_mut()
|
||||||
|
@ -2596,7 +2596,7 @@ mod tests {
|
|||||||
ss.insert(StreamId::from(0), s);
|
ss.insert(StreamId::from(0), s);
|
||||||
|
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
|
|
||||||
// Write a small frame: no fin.
|
// Write a small frame: no fin.
|
||||||
let written = builder.len();
|
let written = builder.len();
|
||||||
@ -2684,7 +2684,7 @@ mod tests {
|
|||||||
ss.insert(StreamId::from(0), s);
|
ss.insert(StreamId::from(0), s);
|
||||||
|
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
ss.write_frames(
|
ss.write_frames(
|
||||||
TransmissionPriority::default(),
|
TransmissionPriority::default(),
|
||||||
&mut builder,
|
&mut builder,
|
||||||
@ -2762,7 +2762,7 @@ mod tests {
|
|||||||
assert_eq!(s.next_bytes(false), Some((0, &b"ab"[..])));
|
assert_eq!(s.next_bytes(false), Some((0, &b"ab"[..])));
|
||||||
|
|
||||||
// This doesn't report blocking yet.
|
// This doesn't report blocking yet.
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
let mut stats = FrameStats::default();
|
let mut stats = FrameStats::default();
|
||||||
s.write_blocked_frame(
|
s.write_blocked_frame(
|
||||||
@ -2815,7 +2815,7 @@ mod tests {
|
|||||||
assert_eq!(s.send_atomic(b"abc").unwrap(), 0);
|
assert_eq!(s.send_atomic(b"abc").unwrap(), 0);
|
||||||
|
|
||||||
// Assert that STREAM_DATA_BLOCKED is sent.
|
// Assert that STREAM_DATA_BLOCKED is sent.
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
let mut stats = FrameStats::default();
|
let mut stats = FrameStats::default();
|
||||||
s.write_blocked_frame(
|
s.write_blocked_frame(
|
||||||
@ -2902,7 +2902,7 @@ mod tests {
|
|||||||
s.mark_as_lost(len_u64, 0, true);
|
s.mark_as_lost(len_u64, 0, true);
|
||||||
|
|
||||||
// No frame should be sent here.
|
// No frame should be sent here.
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
let mut stats = FrameStats::default();
|
let mut stats = FrameStats::default();
|
||||||
s.write_stream_frame(
|
s.write_stream_frame(
|
||||||
@ -2962,7 +2962,7 @@ mod tests {
|
|||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let header_len = builder.len();
|
let header_len = builder.len();
|
||||||
builder.set_limit(header_len + space);
|
builder.set_limit(header_len + space);
|
||||||
|
|
||||||
@ -3063,7 +3063,7 @@ mod tests {
|
|||||||
s.send(data).unwrap();
|
s.send(data).unwrap();
|
||||||
s.close();
|
s.close();
|
||||||
|
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let header_len = builder.len();
|
let header_len = builder.len();
|
||||||
// Add 2 for the frame type and stream ID, then add the extra.
|
// Add 2 for the frame type and stream ID, then add the extra.
|
||||||
builder.set_limit(header_len + data.len() + 2 + extra);
|
builder.set_limit(header_len + data.len() + 2 + extra);
|
||||||
|
59
third_party/rust/neqo-transport/src/server.rs
vendored
59
third_party/rust/neqo-transport/src/server.rs
vendored
@ -10,7 +10,6 @@ use std::{
|
|||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
cmp::min,
|
cmp::min,
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
fs::OpenOptions,
|
|
||||||
ops::{Deref, DerefMut},
|
ops::{Deref, DerefMut},
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
rc::Rc,
|
rc::Rc,
|
||||||
@ -18,14 +17,12 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use neqo_common::{
|
use neqo_common::{
|
||||||
self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn,
|
event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, Datagram, Role,
|
||||||
Datagram, Role,
|
|
||||||
};
|
};
|
||||||
use neqo_crypto::{
|
use neqo_crypto::{
|
||||||
encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult,
|
encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult,
|
||||||
ZeroRttChecker,
|
ZeroRttChecker,
|
||||||
};
|
};
|
||||||
use qlog::streamer::QlogStreamer;
|
|
||||||
|
|
||||||
pub use crate::addr_valid::ValidateAddress;
|
pub use crate::addr_valid::ValidateAddress;
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -258,49 +255,17 @@ impl Server {
|
|||||||
self.qlog_dir
|
self.qlog_dir
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or_else(NeqoQlog::disabled, |qlog_dir| {
|
.map_or_else(NeqoQlog::disabled, |qlog_dir| {
|
||||||
let mut qlog_path = qlog_dir.clone();
|
NeqoQlog::enabled_with_file(
|
||||||
|
qlog_dir.clone(),
|
||||||
qlog_path.push(format!("{odcid}.qlog"));
|
Role::Server,
|
||||||
|
Some("Neqo server qlog".to_string()),
|
||||||
// The original DCID is chosen by the client. Using create_new()
|
Some("Neqo server qlog".to_string()),
|
||||||
// prevents attackers from overwriting existing logs.
|
odcid,
|
||||||
match OpenOptions::new()
|
)
|
||||||
.write(true)
|
.unwrap_or_else(|e| {
|
||||||
.create_new(true)
|
qerror!("failed to create NeqoQlog: {}", e);
|
||||||
.open(&qlog_path)
|
NeqoQlog::disabled()
|
||||||
{
|
})
|
||||||
Ok(f) => {
|
|
||||||
qinfo!("Qlog output to {}", qlog_path.display());
|
|
||||||
|
|
||||||
let streamer = QlogStreamer::new(
|
|
||||||
qlog::QLOG_VERSION.to_string(),
|
|
||||||
Some("Neqo server qlog".to_string()),
|
|
||||||
Some("Neqo server qlog".to_string()),
|
|
||||||
None,
|
|
||||||
std::time::Instant::now(),
|
|
||||||
common::qlog::new_trace(Role::Server),
|
|
||||||
qlog::events::EventImportance::Base,
|
|
||||||
Box::new(f),
|
|
||||||
);
|
|
||||||
let n_qlog = NeqoQlog::enabled(streamer, qlog_path);
|
|
||||||
match n_qlog {
|
|
||||||
Ok(nql) => nql,
|
|
||||||
Err(e) => {
|
|
||||||
// Keep going but w/o qlogging
|
|
||||||
qerror!("NeqoQlog error: {}", e);
|
|
||||||
NeqoQlog::disabled()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
qerror!(
|
|
||||||
"Could not open file {} for qlog output: {}",
|
|
||||||
qlog_path.display(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
NeqoQlog::disabled()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
third_party/rust/neqo-transport/src/stats.rs
vendored
28
third_party/rust/neqo-transport/src/stats.rs
vendored
@ -16,7 +16,7 @@ use std::{
|
|||||||
|
|
||||||
use neqo_common::qwarn;
|
use neqo_common::qwarn;
|
||||||
|
|
||||||
use crate::packet::PacketNumber;
|
use crate::{ecn::EcnCount, packet::PacketNumber};
|
||||||
|
|
||||||
pub const MAX_PTO_COUNTS: usize = 16;
|
pub const MAX_PTO_COUNTS: usize = 16;
|
||||||
|
|
||||||
@ -166,6 +166,25 @@ pub struct Stats {
|
|||||||
pub incoming_datagram_dropped: usize,
|
pub incoming_datagram_dropped: usize,
|
||||||
|
|
||||||
pub datagram_tx: DatagramStats,
|
pub datagram_tx: DatagramStats,
|
||||||
|
|
||||||
|
/// Number of paths known to be ECN capable.
|
||||||
|
pub ecn_paths_capable: usize,
|
||||||
|
/// Number of paths known to be ECN incapable.
|
||||||
|
pub ecn_paths_not_capable: usize,
|
||||||
|
/// ECN counts for outgoing UDP datagrams, returned by remote through QUIC ACKs.
|
||||||
|
///
|
||||||
|
/// Note: Given that QUIC ACKs only carry [`Ect0`], [`Ect1`] and [`Ce`], but
|
||||||
|
/// never [`NotEct`], the [`NotEct`] value will always be 0.
|
||||||
|
///
|
||||||
|
/// See also <https://www.rfc-editor.org/rfc/rfc9000.html#section-19.3.2>.
|
||||||
|
///
|
||||||
|
/// [`Ect0`]: neqo_common::tos::IpTosEcn::Ect0
|
||||||
|
/// [`Ect1`]: neqo_common::tos::IpTosEcn::Ect1
|
||||||
|
/// [`Ce`]: neqo_common::tos::IpTosEcn::Ce
|
||||||
|
/// [`NotEct`]: neqo_common::tos::IpTosEcn::NotEct
|
||||||
|
pub ecn_tx: EcnCount,
|
||||||
|
/// ECN counts for incoming UDP datagrams, read from IP TOS header.
|
||||||
|
pub ecn_rx: EcnCount,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stats {
|
impl Stats {
|
||||||
@ -222,7 +241,12 @@ impl Debug for Stats {
|
|||||||
writeln!(f, " frames rx:")?;
|
writeln!(f, " frames rx:")?;
|
||||||
self.frame_rx.fmt(f)?;
|
self.frame_rx.fmt(f)?;
|
||||||
writeln!(f, " frames tx:")?;
|
writeln!(f, " frames tx:")?;
|
||||||
self.frame_tx.fmt(f)
|
self.frame_tx.fmt(f)?;
|
||||||
|
writeln!(
|
||||||
|
f,
|
||||||
|
" ecn: {:?} for tx {:?} for rx {} capable paths {} not capable paths",
|
||||||
|
self.ecn_tx, self.ecn_rx, self.ecn_paths_capable, self.ecn_paths_not_capable
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ pub enum TransportParameter {
|
|||||||
|
|
||||||
impl TransportParameter {
|
impl TransportParameter {
|
||||||
fn encode(&self, enc: &mut Encoder, tp: TransportParameterId) {
|
fn encode(&self, enc: &mut Encoder, tp: TransportParameterId) {
|
||||||
qdebug!("TP encoded; type 0x{:02x} val {:?}", tp, self);
|
qtrace!("TP encoded; type 0x{:02x} val {:?}", tp, self);
|
||||||
enc.encode_varint(tp);
|
enc.encode_varint(tp);
|
||||||
match self {
|
match self {
|
||||||
Self::Bytes(a) => {
|
Self::Bytes(a) => {
|
||||||
@ -309,7 +309,7 @@ impl TransportParameter {
|
|||||||
if d.remaining() > 0 {
|
if d.remaining() > 0 {
|
||||||
return Err(Error::TooMuchData);
|
return Err(Error::TooMuchData);
|
||||||
}
|
}
|
||||||
qdebug!("TP decoded; type 0x{:02x} val {:?}", tp, value);
|
qtrace!("TP decoded; type 0x{:02x} val {:?}", tp, value);
|
||||||
Ok(Some((tp, value)))
|
Ok(Some((tp, value)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
183
third_party/rust/neqo-transport/src/tracking.rs
vendored
183
third_party/rust/neqo-transport/src/tracking.rs
vendored
@ -13,10 +13,9 @@ use std::{
|
|||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use enum_map::Enum;
|
use enum_map::{enum_map, Enum, EnumMap};
|
||||||
use neqo_common::{qdebug, qinfo, qtrace, qwarn, IpTosEcn};
|
use neqo_common::{qdebug, qinfo, qtrace, qwarn, IpTosEcn};
|
||||||
use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL};
|
use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL};
|
||||||
use smallvec::{smallvec, SmallVec};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ecn::EcnCount,
|
ecn::EcnCount,
|
||||||
@ -26,7 +25,6 @@ use crate::{
|
|||||||
stats::FrameStats,
|
stats::FrameStats,
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq, Enum)]
|
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq, Enum)]
|
||||||
pub enum PacketNumberSpace {
|
pub enum PacketNumberSpace {
|
||||||
Initial,
|
Initial,
|
||||||
@ -70,17 +68,17 @@ impl From<PacketType> for PacketNumberSpace {
|
|||||||
|
|
||||||
#[derive(Clone, Copy, Default)]
|
#[derive(Clone, Copy, Default)]
|
||||||
pub struct PacketNumberSpaceSet {
|
pub struct PacketNumberSpaceSet {
|
||||||
initial: bool,
|
spaces: EnumMap<PacketNumberSpace, bool>,
|
||||||
handshake: bool,
|
|
||||||
application_data: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PacketNumberSpaceSet {
|
impl PacketNumberSpaceSet {
|
||||||
pub const fn all() -> Self {
|
pub fn all() -> Self {
|
||||||
Self {
|
Self {
|
||||||
initial: true,
|
spaces: enum_map! {
|
||||||
handshake: true,
|
PacketNumberSpace::Initial => true,
|
||||||
application_data: true,
|
PacketNumberSpace::Handshake => true,
|
||||||
|
PacketNumberSpace::ApplicationData => true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -89,21 +87,13 @@ impl Index<PacketNumberSpace> for PacketNumberSpaceSet {
|
|||||||
type Output = bool;
|
type Output = bool;
|
||||||
|
|
||||||
fn index(&self, space: PacketNumberSpace) -> &Self::Output {
|
fn index(&self, space: PacketNumberSpace) -> &Self::Output {
|
||||||
match space {
|
&self.spaces[space]
|
||||||
PacketNumberSpace::Initial => &self.initial,
|
|
||||||
PacketNumberSpace::Handshake => &self.handshake,
|
|
||||||
PacketNumberSpace::ApplicationData => &self.application_data,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexMut<PacketNumberSpace> for PacketNumberSpaceSet {
|
impl IndexMut<PacketNumberSpace> for PacketNumberSpaceSet {
|
||||||
fn index_mut(&mut self, space: PacketNumberSpace) -> &mut Self::Output {
|
fn index_mut(&mut self, space: PacketNumberSpace) -> &mut Self::Output {
|
||||||
match space {
|
&mut self.spaces[space]
|
||||||
PacketNumberSpace::Initial => &mut self.initial,
|
|
||||||
PacketNumberSpace::Handshake => &mut self.handshake,
|
|
||||||
PacketNumberSpace::ApplicationData => &mut self.application_data,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,6 +235,13 @@ pub struct AckToken {
|
|||||||
ranges: Vec<PacketRange>,
|
ranges: Vec<PacketRange>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AckToken {
|
||||||
|
/// Get the space for this token.
|
||||||
|
pub const fn space(&self) -> PacketNumberSpace {
|
||||||
|
self.space
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A structure that tracks what packets have been received,
|
/// A structure that tracks what packets have been received,
|
||||||
/// and what needs acknowledgement for a packet number space.
|
/// and what needs acknowledgement for a packet number space.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -290,7 +287,12 @@ impl RecvdPackets {
|
|||||||
ack_frequency_seqno: 0,
|
ack_frequency_seqno: 0,
|
||||||
ack_delay: DEFAULT_ACK_DELAY,
|
ack_delay: DEFAULT_ACK_DELAY,
|
||||||
unacknowledged_count: 0,
|
unacknowledged_count: 0,
|
||||||
unacknowledged_tolerance: DEFAULT_ACK_PACKET_TOLERANCE,
|
unacknowledged_tolerance: if space == PacketNumberSpace::ApplicationData {
|
||||||
|
DEFAULT_ACK_PACKET_TOLERANCE
|
||||||
|
} else {
|
||||||
|
// ACK more aggressively
|
||||||
|
0
|
||||||
|
},
|
||||||
ignore_order: false,
|
ignore_order: false,
|
||||||
ecn_count: EcnCount::default(),
|
ecn_count: EcnCount::default(),
|
||||||
}
|
}
|
||||||
@ -380,7 +382,7 @@ impl RecvdPackets {
|
|||||||
/// Return true if the packet was the largest received so far.
|
/// Return true if the packet was the largest received so far.
|
||||||
pub fn set_received(&mut self, now: Instant, pn: PacketNumber, ack_eliciting: bool) -> bool {
|
pub fn set_received(&mut self, now: Instant, pn: PacketNumber, ack_eliciting: bool) -> bool {
|
||||||
let next_in_order_pn = self.ranges.front().map_or(0, |r| r.largest + 1);
|
let next_in_order_pn = self.ranges.front().map_or(0, |r| r.largest + 1);
|
||||||
qdebug!([self], "received {}, next: {}", pn, next_in_order_pn);
|
qtrace!([self], "received {}, next: {}", pn, next_in_order_pn);
|
||||||
|
|
||||||
self.add(pn);
|
self.add(pn);
|
||||||
self.trim_ranges();
|
self.trim_ranges();
|
||||||
@ -497,6 +499,9 @@ impl RecvdPackets {
|
|||||||
.take(max_ranges)
|
.take(max_ranges)
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
if ranges.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
builder.encode_varint(if self.ecn_count.is_some() {
|
builder.encode_varint(if self.ecn_count.is_some() {
|
||||||
FRAME_TYPE_ACK_ECN
|
FRAME_TYPE_ACK_ECN
|
||||||
@ -550,34 +555,25 @@ impl ::std::fmt::Display for RecvdPackets {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct AckTracker {
|
pub struct AckTracker {
|
||||||
/// This stores information about received packets in *reverse* order
|
spaces: EnumMap<PacketNumberSpace, Option<RecvdPackets>>,
|
||||||
/// by spaces. Why reverse? Because we ultimately only want to keep
|
|
||||||
/// `ApplicationData` and this allows us to drop other spaces easily.
|
|
||||||
spaces: SmallVec<[RecvdPackets; 1]>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AckTracker {
|
impl AckTracker {
|
||||||
pub fn drop_space(&mut self, space: PacketNumberSpace) {
|
pub fn drop_space(&mut self, space: PacketNumberSpace) {
|
||||||
let sp = match space {
|
assert_ne!(
|
||||||
PacketNumberSpace::Initial => self.spaces.pop(),
|
space,
|
||||||
PacketNumberSpace::Handshake => {
|
PacketNumberSpace::ApplicationData,
|
||||||
let sp = self.spaces.pop();
|
"discarding application space"
|
||||||
self.spaces.shrink_to_fit();
|
);
|
||||||
sp
|
if space == PacketNumberSpace::Handshake {
|
||||||
}
|
assert!(self.spaces[PacketNumberSpace::Initial].is_none());
|
||||||
PacketNumberSpace::ApplicationData => panic!("discarding application space"),
|
}
|
||||||
};
|
self.spaces[space].take();
|
||||||
assert_eq!(sp.unwrap().space, space, "dropping spaces out of order");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_mut(&mut self, space: PacketNumberSpace) -> Option<&mut RecvdPackets> {
|
pub fn get_mut(&mut self, space: PacketNumberSpace) -> Option<&mut RecvdPackets> {
|
||||||
self.spaces.get_mut(match space {
|
self.spaces[space].as_mut()
|
||||||
PacketNumberSpace::ApplicationData => 0,
|
|
||||||
PacketNumberSpace::Handshake => 1,
|
|
||||||
PacketNumberSpace::Initial => 2,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ack_freq(
|
pub fn ack_freq(
|
||||||
@ -588,37 +584,45 @@ impl AckTracker {
|
|||||||
ignore_order: bool,
|
ignore_order: bool,
|
||||||
) {
|
) {
|
||||||
// Only ApplicationData ever delays ACK.
|
// Only ApplicationData ever delays ACK.
|
||||||
self.get_mut(PacketNumberSpace::ApplicationData)
|
if let Some(space) = self.get_mut(PacketNumberSpace::ApplicationData) {
|
||||||
.unwrap()
|
space.ack_freq(seqno, tolerance, delay, ignore_order);
|
||||||
.ack_freq(seqno, tolerance, delay, ignore_order);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force an ACK to be generated immediately (a PING was received).
|
/// Force an ACK to be generated immediately.
|
||||||
pub fn immediate_ack(&mut self, now: Instant) {
|
pub fn immediate_ack(&mut self, space: PacketNumberSpace, now: Instant) {
|
||||||
self.get_mut(PacketNumberSpace::ApplicationData)
|
if let Some(space) = self.get_mut(space) {
|
||||||
.unwrap()
|
space.immediate_ack(now);
|
||||||
.immediate_ack(now);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine the earliest time that an ACK might be needed.
|
/// Determine the earliest time that an ACK might be needed.
|
||||||
pub fn ack_time(&self, now: Instant) -> Option<Instant> {
|
pub fn ack_time(&self, now: Instant) -> Option<Instant> {
|
||||||
for recvd in &self.spaces {
|
#[cfg(debug_assertions)]
|
||||||
qtrace!("ack_time for {} = {:?}", recvd.space, recvd.ack_time());
|
for (space, recvd) in &self.spaces {
|
||||||
|
if let Some(recvd) = recvd {
|
||||||
|
qtrace!("ack_time for {} = {:?}", space, recvd.ack_time());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.spaces.len() == 1 {
|
if self.spaces[PacketNumberSpace::Initial].is_none()
|
||||||
self.spaces[0].ack_time()
|
&& self.spaces[PacketNumberSpace::Handshake].is_none()
|
||||||
} else {
|
{
|
||||||
// Ignore any time that is in the past relative to `now`.
|
if let Some(recvd) = &self.spaces[PacketNumberSpace::ApplicationData] {
|
||||||
// That is something of a hack, but there are cases where we can't send ACK
|
return recvd.ack_time();
|
||||||
// frames for all spaces, which can mean that one space is stuck in the past.
|
}
|
||||||
// That isn't a problem because we guarantee that earlier spaces will always
|
|
||||||
// be able to send ACK frames.
|
|
||||||
self.spaces
|
|
||||||
.iter()
|
|
||||||
.filter_map(|recvd| recvd.ack_time().filter(|t| *t > now))
|
|
||||||
.min()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ignore any time that is in the past relative to `now`.
|
||||||
|
// That is something of a hack, but there are cases where we can't send ACK
|
||||||
|
// frames for all spaces, which can mean that one space is stuck in the past.
|
||||||
|
// That isn't a problem because we guarantee that earlier spaces will always
|
||||||
|
// be able to send ACK frames.
|
||||||
|
self.spaces
|
||||||
|
.values()
|
||||||
|
.flatten()
|
||||||
|
.filter_map(|recvd| recvd.ack_time().filter(|t| *t > now))
|
||||||
|
.min()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn acked(&mut self, token: &AckToken) {
|
pub fn acked(&mut self, token: &AckToken) {
|
||||||
@ -645,11 +649,11 @@ impl AckTracker {
|
|||||||
impl Default for AckTracker {
|
impl Default for AckTracker {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
spaces: smallvec![
|
spaces: enum_map! {
|
||||||
RecvdPackets::new(PacketNumberSpace::ApplicationData),
|
PacketNumberSpace::Initial => Some(RecvdPackets::new(PacketNumberSpace::Initial)),
|
||||||
RecvdPackets::new(PacketNumberSpace::Handshake),
|
PacketNumberSpace::Handshake => Some(RecvdPackets::new(PacketNumberSpace::Handshake)),
|
||||||
RecvdPackets::new(PacketNumberSpace::Initial),
|
PacketNumberSpace::ApplicationData => Some(RecvdPackets::new(PacketNumberSpace::ApplicationData)),
|
||||||
],
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -667,7 +671,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
frame::Frame,
|
frame::Frame,
|
||||||
packet::{PacketBuilder, PacketNumber},
|
packet::{PacketBuilder, PacketNumber, PacketType},
|
||||||
stats::FrameStats,
|
stats::FrameStats,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -797,7 +801,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn write_frame_at(rp: &mut RecvdPackets, now: Instant) {
|
fn write_frame_at(rp: &mut RecvdPackets, now: Instant) {
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
let mut stats = FrameStats::default();
|
let mut stats = FrameStats::default();
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
rp.write_frame(now, RTT, &mut builder, &mut tokens, &mut stats);
|
rp.write_frame(now, RTT, &mut builder, &mut tokens, &mut stats);
|
||||||
@ -942,17 +946,10 @@ mod tests {
|
|||||||
tracker.drop_space(PacketNumberSpace::ApplicationData);
|
tracker.drop_space(PacketNumberSpace::ApplicationData);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic(expected = "dropping spaces out of order")]
|
|
||||||
fn drop_out_of_order() {
|
|
||||||
let mut tracker = AckTracker::default();
|
|
||||||
tracker.drop_space(PacketNumberSpace::Handshake);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn drop_spaces() {
|
fn drop_spaces() {
|
||||||
let mut tracker = AckTracker::default();
|
let mut tracker = AckTracker::default();
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
tracker
|
tracker
|
||||||
.get_mut(PacketNumberSpace::Initial)
|
.get_mut(PacketNumberSpace::Initial)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -1017,7 +1014,7 @@ mod tests {
|
|||||||
.ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
|
.ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
|
||||||
.is_some());
|
.is_some());
|
||||||
|
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
builder.set_limit(10);
|
builder.set_limit(10);
|
||||||
|
|
||||||
let mut stats = FrameStats::default();
|
let mut stats = FrameStats::default();
|
||||||
@ -1048,7 +1045,7 @@ mod tests {
|
|||||||
.ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
|
.ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
|
||||||
.is_some());
|
.is_some());
|
||||||
|
|
||||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||||
// The code pessimistically assumes that each range needs 16 bytes to express.
|
// The code pessimistically assumes that each range needs 16 bytes to express.
|
||||||
// So this won't be enough for a second range.
|
// So this won't be enough for a second range.
|
||||||
builder.set_limit(RecvdPackets::USEFUL_ACK_LEN + 8);
|
builder.set_limit(RecvdPackets::USEFUL_ACK_LEN + 8);
|
||||||
@ -1136,4 +1133,28 @@ mod tests {
|
|||||||
assert!(copy[PacketNumberSpace::Handshake]);
|
assert!(copy[PacketNumberSpace::Handshake]);
|
||||||
assert!(copy[PacketNumberSpace::ApplicationData]);
|
assert!(copy[PacketNumberSpace::ApplicationData]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn from_packet_type() {
|
||||||
|
assert_eq!(
|
||||||
|
PacketNumberSpace::from(PacketType::Initial),
|
||||||
|
PacketNumberSpace::Initial
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
PacketNumberSpace::from(PacketType::Handshake),
|
||||||
|
PacketNumberSpace::Handshake
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
PacketNumberSpace::from(PacketType::ZeroRtt),
|
||||||
|
PacketNumberSpace::ApplicationData
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
PacketNumberSpace::from(PacketType::Short),
|
||||||
|
PacketNumberSpace::ApplicationData
|
||||||
|
);
|
||||||
|
assert!(std::panic::catch_unwind(|| {
|
||||||
|
PacketNumberSpace::from(PacketType::VersionNegotiation)
|
||||||
|
})
|
||||||
|
.is_err());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1 +1 @@
|
|||||||
{"files":{"Cargo.toml":"2a0119d7971850169f74f1229c8cc2d9a0f69f6384ea4a1a0da4f1449574a5f2","src/lib.rs":"bf3bc79b1d799a42b73e64d2b203ce688cc0859d7afa6c66eec429ec36199ba6"},"package":null}
|
{"files":{"Cargo.toml":"20aadbf0239bbfb94acf4b5a5abd606bcc0956126de4251102a4357b16e7b945","src/lib.rs":"bf3bc79b1d799a42b73e64d2b203ce688cc0859d7afa6c66eec429ec36199ba6"},"package":null}
|
2
third_party/rust/neqo-udp/Cargo.toml
vendored
2
third_party/rust/neqo-udp/Cargo.toml
vendored
@ -18,7 +18,7 @@ bench = []
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.76.0"
|
rust-version = "1.76.0"
|
||||||
name = "neqo-udp"
|
name = "neqo-udp"
|
||||||
version = "0.8.2"
|
version = "0.9.0"
|
||||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||||
build = false
|
build = false
|
||||||
autobins = false
|
autobins = false
|
||||||
|
Loading…
Reference in New Issue
Block a user