mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-23 12:51:06 +00:00
Bug 1916645 - update neqo to v0.9.0 r=kershaw,necko-reviewers
Differential Revision: https://phabricator.services.mozilla.com/D221019
This commit is contained in:
parent
fd17a9b0c5
commit
bf284bbab1
@ -90,9 +90,9 @@ git = "https://github.com/mozilla/mp4parse-rust"
|
||||
rev = "a138e40ec1c603615873e524b5b22e11c0ec4820"
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source."git+https://github.com/mozilla/neqo?tag=v0.8.2"]
|
||||
[source."git+https://github.com/mozilla/neqo?tag=v0.9.0"]
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
tag = "v0.8.2"
|
||||
tag = "v0.9.0"
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source."git+https://github.com/servo/unicode-bidi?rev=ca612daf1c08c53abe07327cb3e6ef6e0a760f0c"]
|
||||
|
31
Cargo.lock
generated
31
Cargo.lock
generated
@ -4045,8 +4045,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "neqo-bin"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"clap-verbosity-flag",
|
||||
@ -4067,21 +4067,20 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "neqo-common"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"enum-map",
|
||||
"env_logger",
|
||||
"log",
|
||||
"qlog",
|
||||
"time 0.3.36",
|
||||
"winapi",
|
||||
"windows",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "neqo-crypto"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"bindgen 0.69.4",
|
||||
"log",
|
||||
@ -4095,8 +4094,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "neqo-http3"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"enumset",
|
||||
"log",
|
||||
@ -4111,8 +4110,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "neqo-qpack"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"log",
|
||||
"neqo-common",
|
||||
@ -4123,8 +4122,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "neqo-transport"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"enum-map",
|
||||
"indexmap 2.2.6",
|
||||
@ -4138,8 +4137,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "neqo-udp"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.8.2#b7e17668eb8f2fb13c1d945a9a7f79bd31257eb8"
|
||||
version = "0.9.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.9.0#28f60bd0ba3209ecba4102eec123859a3a8afd45"
|
||||
dependencies = [
|
||||
"log",
|
||||
"neqo-common",
|
||||
|
@ -9,11 +9,11 @@ license = "MPL-2.0"
|
||||
name = "neqo_glue"
|
||||
|
||||
[dependencies]
|
||||
neqo-udp = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-udp = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
nserror = { path = "../../../xpcom/rust/nserror" }
|
||||
nsstring = { path = "../../../xpcom/rust/nsstring" }
|
||||
xpcom = { path = "../../../xpcom/rust/xpcom" }
|
||||
@ -28,7 +28,7 @@ uuid = { version = "1.0", features = ["v4"] }
|
||||
winapi = {version = "0.3", features = ["ws2def"] }
|
||||
|
||||
[dependencies.neqo-crypto]
|
||||
tag = "v0.8.2"
|
||||
tag = "v0.9.0"
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
default-features = false
|
||||
features = ["gecko"]
|
||||
|
@ -5,9 +5,7 @@
|
||||
#[cfg(not(windows))]
|
||||
use libc::{AF_INET, AF_INET6};
|
||||
use neqo_common::event::Provider;
|
||||
use neqo_common::{
|
||||
self as common, qdebug, qerror, qlog::NeqoQlog, qwarn, Datagram, Header, IpTos, Role,
|
||||
};
|
||||
use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qwarn, Datagram, Header, IpTos, Role};
|
||||
use neqo_crypto::{init, PRErrorCode};
|
||||
use neqo_http3::{
|
||||
features::extended_connect::SessionCloseReason, Error as Http3Error, Http3Client,
|
||||
@ -19,14 +17,12 @@ use neqo_transport::{
|
||||
};
|
||||
use nserror::*;
|
||||
use nsstring::*;
|
||||
use qlog::streamer::QlogStreamer;
|
||||
use std::borrow::Cow;
|
||||
use std::cell::RefCell;
|
||||
use std::cmp::{max, min};
|
||||
use std::convert::TryFrom;
|
||||
use std::convert::TryInto;
|
||||
use std::ffi::c_void;
|
||||
use std::fs::OpenOptions;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::path::PathBuf;
|
||||
@ -282,34 +278,24 @@ impl NeqoHttp3Conn {
|
||||
|
||||
if !qlog_dir.is_empty() {
|
||||
let qlog_dir_conv = str::from_utf8(qlog_dir).map_err(|_| NS_ERROR_INVALID_ARG)?;
|
||||
let mut qlog_path = PathBuf::from(qlog_dir_conv);
|
||||
qlog_path.push(format!("{}_{}.qlog", origin, Uuid::new_v4()));
|
||||
let qlog_path = PathBuf::from(qlog_dir_conv);
|
||||
|
||||
// Emit warnings but to not return an error if qlog initialization
|
||||
// fails.
|
||||
match OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(&qlog_path)
|
||||
{
|
||||
Err(_) => qwarn!("Could not open qlog path: {}", qlog_path.display()),
|
||||
Ok(f) => {
|
||||
let streamer = QlogStreamer::new(
|
||||
qlog::QLOG_VERSION.to_string(),
|
||||
Some("Firefox Client qlog".to_string()),
|
||||
Some("Firefox Client qlog".to_string()),
|
||||
None,
|
||||
std::time::Instant::now(),
|
||||
common::qlog::new_trace(Role::Client),
|
||||
qlog::events::EventImportance::Base,
|
||||
Box::new(f),
|
||||
match NeqoQlog::enabled_with_file(
|
||||
qlog_path.clone(),
|
||||
Role::Client,
|
||||
Some("Firefox Client qlog".to_string()),
|
||||
Some("Firefox Client qlog".to_string()),
|
||||
format!("{}_{}.qlog", origin, Uuid::new_v4()),
|
||||
) {
|
||||
Ok(qlog) => conn.set_qlog(qlog),
|
||||
Err(e) => {
|
||||
// Emit warnings but to not return an error if qlog initialization
|
||||
// fails.
|
||||
qwarn!(
|
||||
"failed to create NeqoQlog at {}: {}",
|
||||
qlog_path.display(),
|
||||
e
|
||||
);
|
||||
|
||||
match NeqoQlog::enabled(streamer, &qlog_path) {
|
||||
Err(_) => qwarn!("Could not write to qlog path: {}", qlog_path.display()),
|
||||
Ok(nq) => conn.set_qlog(nq),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,11 +6,11 @@ edition = "2018"
|
||||
license = "MPL-2.0"
|
||||
|
||||
[dependencies]
|
||||
neqo-bin = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.8.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-bin = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.9.0", git = "https://github.com/mozilla/neqo" }
|
||||
log = "0.4.0"
|
||||
base64 = "0.21"
|
||||
cfg-if = "1.0"
|
||||
@ -20,7 +20,7 @@ tokio = { version = "1", features = ["rt-multi-thread"] }
|
||||
mozilla-central-workspace-hack = { version = "0.1", features = ["http3server"], optional = true }
|
||||
|
||||
[dependencies.neqo-crypto]
|
||||
tag = "v0.8.2"
|
||||
tag = "v0.9.0"
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
default-features = false
|
||||
features = ["gecko"]
|
||||
|
@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"213791380401f74b5f2407818759035833dbbdcda76e35d791cd352651400f96","benches/main.rs":"aa39bf1f08863e3bace034a991c60a4723f1a7d30b3fc1d1f8c4d7f73bc748c3","src/bin/client.rs":"db77efd75dc0745b6dd983ab8fa3bc8f5f9111967f0d90d23cb19140a940246d","src/bin/server.rs":"2f7ab3c7a98117bd162e6fd07abef1d21791d1bb240db3aae61afa6ff72df83a","src/client/http09.rs":"868a55062e864e7c290e345e3049afbd49796ec3655259a681457540efa3650f","src/client/http3.rs":"7ffba6396ab5875cda5f3ab092d4cc34ab16adad30277b017bc667086d374d18","src/client/mod.rs":"3bf40a6dcc5fde24c823f55ee9d34a2e7d96d2d19980b234d3ec22e33771c14c","src/lib.rs":"e41fe10d5f45b4472ca97a8be531a6b959ec47f094cf2fad3f4f50954ce09046","src/server/http09.rs":"7b0b0459d2b71ecb1d4c93177304a8b7dc0a74dc4cb0a9875df18295ab04b271","src/server/http3.rs":"9d5361a724be1d0e234bbc4b3893a8830825e5886a24a40b96e3f87f35c7b968","src/server/mod.rs":"91f8cd6278c42eef20b6e16f3d903705073d741093bcdf161b58c01914aca2de","src/udp.rs":"81391238621282fae1efc4e5b28be7226733e1bfef7e790f21fb23395cb738bc"},"package":null}
|
||||
{"files":{"Cargo.toml":"a11377f6773fd9dd49cbc5e434fa67a94b78556b8da45eb831bc3898fc388e8c","benches/main.rs":"aa39bf1f08863e3bace034a991c60a4723f1a7d30b3fc1d1f8c4d7f73bc748c3","src/bin/client.rs":"db77efd75dc0745b6dd983ab8fa3bc8f5f9111967f0d90d23cb19140a940246d","src/bin/server.rs":"2f7ab3c7a98117bd162e6fd07abef1d21791d1bb240db3aae61afa6ff72df83a","src/client/http09.rs":"1849b2ba103ad0e6b365aa63a272457d798d0635db2711e0a88496feb6336d5b","src/client/http3.rs":"de98fc88347b5216911c9536420e6557c50241267064c0f62b5b77789db62ffa","src/client/mod.rs":"6423e41fc351ae36868a165e9bca172aac9c08195f67ca91b692f0ca58979c95","src/lib.rs":"3264b53d5d9d99420dab92578572ac7c4b3ece747840c115d2a0db6a420d56e8","src/server/http09.rs":"9ffb0f62c6202a2914086b7e1d8ba77e016c1b4f4a9895b268a6312a04ad70e3","src/server/http3.rs":"0bdab101bffda37257360f9a968d32ff8884b40f292878f3dc27b055e0b5864b","src/server/mod.rs":"e1edfc71853f8b5be96287391919dc84d24191e865f7b9b4a38eebfda07ce453","src/udp.rs":"9042b73c20223e1c7b45d862dea9417fc367032db09dd05d48ca06ac33638435"},"package":null}
|
2
third_party/rust/neqo-bin/Cargo.toml
vendored
2
third_party/rust/neqo-bin/Cargo.toml
vendored
@ -16,7 +16,7 @@ test = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-bin"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = false
|
||||
autobins = false
|
||||
|
39
third_party/rust/neqo-bin/src/client/http09.rs
vendored
39
third_party/rust/neqo-bin/src/client/http09.rs
vendored
@ -26,14 +26,27 @@ use neqo_transport::{
|
||||
use url::Url;
|
||||
|
||||
use super::{get_output_file, qlog_new, Args, CloseState, Res};
|
||||
use crate::STREAM_IO_BUFFER_SIZE;
|
||||
|
||||
pub struct Handler<'a> {
|
||||
streams: HashMap<StreamId, Option<BufWriter<File>>>,
|
||||
url_queue: VecDeque<Url>,
|
||||
handled_urls: Vec<Url>,
|
||||
all_paths: Vec<PathBuf>,
|
||||
args: &'a Args,
|
||||
token: Option<ResumptionToken>,
|
||||
needs_key_update: bool,
|
||||
read_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> Handler<'a> {
|
||||
fn reinit(&mut self) {
|
||||
for url in self.handled_urls.drain(..) {
|
||||
self.url_queue.push_front(url);
|
||||
}
|
||||
self.streams.clear();
|
||||
self.all_paths.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> super::Handler for Handler<'a> {
|
||||
@ -78,6 +91,12 @@ impl<'a> super::Handler for Handler<'a> {
|
||||
qdebug!("{event:?}");
|
||||
self.download_urls(client);
|
||||
}
|
||||
ConnectionEvent::ZeroRttRejected => {
|
||||
qdebug!("{event:?}");
|
||||
// All 0-RTT data was rejected. We need to retransmit it.
|
||||
self.reinit();
|
||||
self.download_urls(client);
|
||||
}
|
||||
ConnectionEvent::ResumptionToken(token) => {
|
||||
self.token = Some(token);
|
||||
}
|
||||
@ -92,10 +111,7 @@ impl<'a> super::Handler for Handler<'a> {
|
||||
}
|
||||
|
||||
if self.args.resume && self.token.is_none() {
|
||||
let Some(token) = client.take_resumption_token(Instant::now()) else {
|
||||
return Ok(false);
|
||||
};
|
||||
self.token = Some(token);
|
||||
self.token = client.take_resumption_token(Instant::now());
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
@ -199,10 +215,12 @@ impl<'b> Handler<'b> {
|
||||
Self {
|
||||
streams: HashMap::new(),
|
||||
url_queue,
|
||||
handled_urls: Vec::new(),
|
||||
all_paths: Vec::new(),
|
||||
args,
|
||||
token: None,
|
||||
needs_key_update: args.key_update,
|
||||
read_buffer: vec![0; STREAM_IO_BUFFER_SIZE],
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,6 +257,7 @@ impl<'b> Handler<'b> {
|
||||
client.stream_close_send(client_stream_id).unwrap();
|
||||
let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths);
|
||||
self.streams.insert(client_stream_id, out_file);
|
||||
self.handled_urls.push(url);
|
||||
true
|
||||
}
|
||||
Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => {
|
||||
@ -257,25 +276,26 @@ impl<'b> Handler<'b> {
|
||||
fn read_from_stream(
|
||||
client: &mut Connection,
|
||||
stream_id: StreamId,
|
||||
read_buffer: &mut [u8],
|
||||
output_read_data: bool,
|
||||
maybe_out_file: &mut Option<BufWriter<File>>,
|
||||
) -> Res<bool> {
|
||||
let mut data = vec![0; 4096];
|
||||
loop {
|
||||
let (sz, fin) = client.stream_recv(stream_id, &mut data)?;
|
||||
let (sz, fin) = client.stream_recv(stream_id, read_buffer)?;
|
||||
if sz == 0 {
|
||||
return Ok(fin);
|
||||
}
|
||||
let read_buffer = &read_buffer[0..sz];
|
||||
|
||||
if let Some(out_file) = maybe_out_file {
|
||||
out_file.write_all(&data[..sz])?;
|
||||
out_file.write_all(read_buffer)?;
|
||||
} else if !output_read_data {
|
||||
qdebug!("READ[{stream_id}]: {sz} bytes");
|
||||
qdebug!("READ[{stream_id}]: {} bytes", read_buffer.len());
|
||||
} else {
|
||||
qdebug!(
|
||||
"READ[{}]: {}",
|
||||
stream_id,
|
||||
String::from_utf8(data.clone()).unwrap()
|
||||
std::str::from_utf8(read_buffer).unwrap()
|
||||
);
|
||||
}
|
||||
if fin {
|
||||
@ -294,6 +314,7 @@ impl<'b> Handler<'b> {
|
||||
let fin_recvd = Self::read_from_stream(
|
||||
client,
|
||||
stream_id,
|
||||
&mut self.read_buffer,
|
||||
self.args.output_read_data,
|
||||
maybe_out_file,
|
||||
)?;
|
||||
|
54
third_party/rust/neqo-bin/src/client/http3.rs
vendored
54
third_party/rust/neqo-bin/src/client/http3.rs
vendored
@ -28,18 +28,21 @@ use neqo_transport::{
|
||||
use url::Url;
|
||||
|
||||
use super::{get_output_file, qlog_new, Args, CloseState, Res};
|
||||
use crate::STREAM_IO_BUFFER_SIZE;
|
||||
|
||||
pub struct Handler<'a> {
|
||||
#[allow(clippy::struct_field_names)]
|
||||
url_handler: UrlHandler<'a>,
|
||||
token: Option<ResumptionToken>,
|
||||
output_read_data: bool,
|
||||
read_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> Handler<'a> {
|
||||
pub(crate) fn new(url_queue: VecDeque<Url>, args: &'a Args) -> Self {
|
||||
let url_handler = UrlHandler {
|
||||
url_queue,
|
||||
handled_urls: Vec::new(),
|
||||
stream_handlers: HashMap::new(),
|
||||
all_paths: Vec::new(),
|
||||
handler_type: if args.test.is_some() {
|
||||
@ -54,6 +57,7 @@ impl<'a> Handler<'a> {
|
||||
url_handler,
|
||||
token: None,
|
||||
output_read_data: args.output_read_data,
|
||||
read_buffer: vec![0; STREAM_IO_BUFFER_SIZE],
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -151,6 +155,16 @@ impl super::Client for Http3Client {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Handler<'a> {
|
||||
fn reinit(&mut self) {
|
||||
for url in self.url_handler.handled_urls.drain(..) {
|
||||
self.url_handler.url_queue.push_front(url);
|
||||
}
|
||||
self.url_handler.stream_handlers.clear();
|
||||
self.url_handler.all_paths.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> super::Handler for Handler<'a> {
|
||||
type Client = Http3Client;
|
||||
|
||||
@ -182,16 +196,14 @@ impl<'a> super::Handler for Handler<'a> {
|
||||
qwarn!("Data on unexpected stream: {stream_id}");
|
||||
}
|
||||
Some(handler) => loop {
|
||||
let mut data = vec![0; 4096];
|
||||
let (sz, fin) = client
|
||||
.read_data(Instant::now(), stream_id, &mut data)
|
||||
.read_data(Instant::now(), stream_id, &mut self.read_buffer)
|
||||
.expect("Read should succeed");
|
||||
|
||||
handler.process_data_readable(
|
||||
stream_id,
|
||||
fin,
|
||||
data,
|
||||
sz,
|
||||
&self.read_buffer[..sz],
|
||||
self.output_read_data,
|
||||
)?;
|
||||
|
||||
@ -222,6 +234,13 @@ impl<'a> super::Handler for Handler<'a> {
|
||||
}
|
||||
Http3ClientEvent::StateChange(Http3State::Connected)
|
||||
| Http3ClientEvent::RequestsCreatable => {
|
||||
qinfo!("{event:?}");
|
||||
self.url_handler.process_urls(client);
|
||||
}
|
||||
Http3ClientEvent::ZeroRttRejected => {
|
||||
qinfo!("{event:?}");
|
||||
// All 0-RTT data was rejected. We need to retransmit it.
|
||||
self.reinit();
|
||||
self.url_handler.process_urls(client);
|
||||
}
|
||||
Http3ClientEvent::ResumptionToken(t) => self.token = Some(t),
|
||||
@ -245,8 +264,7 @@ trait StreamHandler {
|
||||
&mut self,
|
||||
stream_id: StreamId,
|
||||
fin: bool,
|
||||
data: Vec<u8>,
|
||||
sz: usize,
|
||||
data: &[u8],
|
||||
output_read_data: bool,
|
||||
) -> Res<bool>;
|
||||
fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId);
|
||||
@ -275,7 +293,7 @@ impl StreamHandlerType {
|
||||
Self::Upload => Box::new(UploadStreamHandler {
|
||||
data: vec![42; args.upload_size],
|
||||
offset: 0,
|
||||
chunk_size: 32768,
|
||||
chunk_size: STREAM_IO_BUFFER_SIZE,
|
||||
start: Instant::now(),
|
||||
}),
|
||||
}
|
||||
@ -297,21 +315,20 @@ impl StreamHandler for DownloadStreamHandler {
|
||||
&mut self,
|
||||
stream_id: StreamId,
|
||||
fin: bool,
|
||||
data: Vec<u8>,
|
||||
sz: usize,
|
||||
data: &[u8],
|
||||
output_read_data: bool,
|
||||
) -> Res<bool> {
|
||||
if let Some(out_file) = &mut self.out_file {
|
||||
if sz > 0 {
|
||||
out_file.write_all(&data[..sz])?;
|
||||
if !data.is_empty() {
|
||||
out_file.write_all(data)?;
|
||||
}
|
||||
return Ok(true);
|
||||
} else if !output_read_data {
|
||||
qdebug!("READ[{stream_id}]: {sz} bytes");
|
||||
} else if let Ok(txt) = String::from_utf8(data.clone()) {
|
||||
qdebug!("READ[{stream_id}]: {} bytes", data.len());
|
||||
} else if let Ok(txt) = std::str::from_utf8(data) {
|
||||
qdebug!("READ[{stream_id}]: {txt}");
|
||||
} else {
|
||||
qdebug!("READ[{}]: 0x{}", stream_id, hex(&data));
|
||||
qdebug!("READ[{}]: 0x{}", stream_id, hex(data));
|
||||
}
|
||||
|
||||
if fin {
|
||||
@ -344,11 +361,10 @@ impl StreamHandler for UploadStreamHandler {
|
||||
&mut self,
|
||||
stream_id: StreamId,
|
||||
_fin: bool,
|
||||
data: Vec<u8>,
|
||||
_sz: usize,
|
||||
data: &[u8],
|
||||
_output_read_data: bool,
|
||||
) -> Res<bool> {
|
||||
if let Ok(txt) = String::from_utf8(data.clone()) {
|
||||
if let Ok(txt) = std::str::from_utf8(data) {
|
||||
let trimmed_txt = txt.trim_end_matches(char::from(0));
|
||||
let parsed: usize = trimmed_txt.parse().unwrap();
|
||||
if parsed == self.data.len() {
|
||||
@ -356,7 +372,7 @@ impl StreamHandler for UploadStreamHandler {
|
||||
qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}");
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data));
|
||||
panic!("Unexpected data [{}]: 0x{}", stream_id, hex(data));
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
@ -383,6 +399,7 @@ impl StreamHandler for UploadStreamHandler {
|
||||
|
||||
struct UrlHandler<'a> {
|
||||
url_queue: VecDeque<Url>,
|
||||
handled_urls: Vec<Url>,
|
||||
stream_handlers: HashMap<StreamId, Box<dyn StreamHandler>>,
|
||||
all_paths: Vec<PathBuf>,
|
||||
handler_type: StreamHandlerType,
|
||||
@ -432,6 +449,7 @@ impl<'a> UrlHandler<'a> {
|
||||
client_stream_id,
|
||||
);
|
||||
self.stream_handlers.insert(client_stream_id, handler);
|
||||
self.handled_urls.push(url);
|
||||
true
|
||||
}
|
||||
Err(
|
||||
|
60
third_party/rust/neqo-bin/src/client/mod.rs
vendored
60
third_party/rust/neqo-bin/src/client/mod.rs
vendored
@ -23,14 +23,13 @@ use futures::{
|
||||
future::{select, Either},
|
||||
FutureExt, TryFutureExt,
|
||||
};
|
||||
use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role};
|
||||
use neqo_common::{qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role};
|
||||
use neqo_crypto::{
|
||||
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
|
||||
init, Cipher, ResumptionToken,
|
||||
};
|
||||
use neqo_http3::Output;
|
||||
use neqo_transport::{AppError, CloseReason, ConnectionId, Version};
|
||||
use qlog::{events::EventImportance, streamer::QlogStreamer};
|
||||
use tokio::time::Sleep;
|
||||
use url::{Origin, Url};
|
||||
|
||||
@ -46,7 +45,7 @@ pub enum Error {
|
||||
ArgumentError(&'static str),
|
||||
Http3Error(neqo_http3::Error),
|
||||
IoError(io::Error),
|
||||
QlogError,
|
||||
QlogError(qlog::Error),
|
||||
TransportError(neqo_transport::Error),
|
||||
ApplicationError(neqo_transport::AppError),
|
||||
CryptoError(neqo_crypto::Error),
|
||||
@ -71,8 +70,8 @@ impl From<neqo_http3::Error> for Error {
|
||||
}
|
||||
|
||||
impl From<qlog::Error> for Error {
|
||||
fn from(_err: qlog::Error) -> Self {
|
||||
Self::QlogError
|
||||
fn from(err: qlog::Error) -> Self {
|
||||
Self::QlogError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -174,7 +173,7 @@ pub struct Args {
|
||||
|
||||
impl Args {
|
||||
#[must_use]
|
||||
#[cfg(feature = "bench")]
|
||||
#[cfg(any(test, feature = "bench"))]
|
||||
#[allow(clippy::missing_panics_doc)]
|
||||
pub fn new(requests: &[u64]) -> Self {
|
||||
use std::str::FromStr;
|
||||
@ -253,6 +252,8 @@ impl Args {
|
||||
}
|
||||
self.shared.use_old_http = true;
|
||||
self.resume = true;
|
||||
// PMTUD probes inflate what we sent in 1-RTT, causing QNS to fail the test.
|
||||
self.shared.quic_parameters.no_pmtud = true;
|
||||
}
|
||||
"multiconnect" => {
|
||||
self.shared.use_old_http = true;
|
||||
@ -277,6 +278,11 @@ impl Args {
|
||||
_ => exit(127),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "bench"))]
|
||||
pub fn set_qlog_dir(&mut self, dir: PathBuf) {
|
||||
self.shared.qlog_dir = Some(dir);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_output_file(
|
||||
@ -453,32 +459,26 @@ impl<'a, H: Handler> Runner<'a, H> {
|
||||
}
|
||||
|
||||
fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res<NeqoQlog> {
|
||||
if let Some(qlog_dir) = &args.shared.qlog_dir {
|
||||
let mut qlog_path = qlog_dir.clone();
|
||||
let filename = format!("{hostname}-{cid}.sqlog");
|
||||
qlog_path.push(filename);
|
||||
let Some(qlog_dir) = args.shared.qlog_dir.clone() else {
|
||||
return Ok(NeqoQlog::disabled());
|
||||
};
|
||||
|
||||
let f = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(&qlog_path)?;
|
||||
// hostname might be an IPv6 address, e.g. `[::1]`. `:` is an invalid
|
||||
// Windows file name character.
|
||||
#[cfg(windows)]
|
||||
let hostname: String = hostname
|
||||
.chars()
|
||||
.map(|c| if c == ':' { '_' } else { c })
|
||||
.collect();
|
||||
|
||||
let streamer = QlogStreamer::new(
|
||||
qlog::QLOG_VERSION.to_string(),
|
||||
Some("Example qlog".to_string()),
|
||||
Some("Example qlog description".to_string()),
|
||||
None,
|
||||
std::time::Instant::now(),
|
||||
common::qlog::new_trace(Role::Client),
|
||||
EventImportance::Base,
|
||||
Box::new(f),
|
||||
);
|
||||
|
||||
Ok(NeqoQlog::enabled(streamer, qlog_path)?)
|
||||
} else {
|
||||
Ok(NeqoQlog::disabled())
|
||||
}
|
||||
NeqoQlog::enabled_with_file(
|
||||
qlog_dir,
|
||||
Role::Client,
|
||||
Some("Example qlog".to_string()),
|
||||
Some("Example qlog description".to_string()),
|
||||
format!("{hostname}-{cid}"),
|
||||
)
|
||||
.map_err(Error::QlogError)
|
||||
}
|
||||
|
||||
pub async fn client(mut args: Args) -> Res<()> {
|
||||
|
79
third_party/rust/neqo-bin/src/lib.rs
vendored
79
third_party/rust/neqo-bin/src/lib.rs
vendored
@ -24,6 +24,11 @@ pub mod client;
|
||||
pub mod server;
|
||||
pub mod udp;
|
||||
|
||||
/// Firefox default value
|
||||
///
|
||||
/// See `network.buffer.cache.size` pref <https://searchfox.org/mozilla-central/rev/f6e3b81aac49e602f06c204f9278da30993cdc8a/modules/libpref/init/all.js#3212>
|
||||
const STREAM_IO_BUFFER_SIZE: usize = 32 * 1024;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct SharedArgs {
|
||||
#[command(flatten)]
|
||||
@ -65,7 +70,7 @@ pub struct SharedArgs {
|
||||
pub quic_parameters: QuicParameters,
|
||||
}
|
||||
|
||||
#[cfg(feature = "bench")]
|
||||
#[cfg(any(test, feature = "bench"))]
|
||||
impl Default for SharedArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
@ -132,7 +137,7 @@ pub struct QuicParameters {
|
||||
pub preferred_address_v6: Option<String>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "bench")]
|
||||
#[cfg(any(test, feature = "bench"))]
|
||||
impl Default for QuicParameters {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
@ -252,3 +257,73 @@ impl Display for Error {
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{fs, path::PathBuf, str::FromStr, time::SystemTime};
|
||||
|
||||
use crate::{client, server};
|
||||
|
||||
struct TempDir {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl TempDir {
|
||||
fn new() -> Self {
|
||||
let mut dir = std::env::temp_dir();
|
||||
dir.push(format!(
|
||||
"neqo-bin-test-{}",
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs()
|
||||
));
|
||||
fs::create_dir(&dir).unwrap();
|
||||
Self { path: dir }
|
||||
}
|
||||
|
||||
fn path(&self) -> PathBuf {
|
||||
self.path.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TempDir {
|
||||
fn drop(&mut self) {
|
||||
if self.path.exists() {
|
||||
fs::remove_dir_all(&self.path).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn write_qlog_file() {
|
||||
neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap();
|
||||
|
||||
let temp_dir = TempDir::new();
|
||||
|
||||
let mut client_args = client::Args::new(&[1]);
|
||||
client_args.set_qlog_dir(temp_dir.path());
|
||||
let mut server_args = server::Args::default();
|
||||
server_args.set_qlog_dir(temp_dir.path());
|
||||
|
||||
let client = client::client(client_args);
|
||||
let server = Box::pin(server::server(server_args));
|
||||
tokio::select! {
|
||||
_ = client => {}
|
||||
res = server => panic!("expect server not to terminate: {res:?}"),
|
||||
};
|
||||
|
||||
// Verify that the directory contains two non-empty files
|
||||
let entries: Vec<_> = fs::read_dir(temp_dir.path())
|
||||
.unwrap()
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
assert_eq!(entries.len(), 2, "expect 2 files in the directory");
|
||||
|
||||
for entry in entries {
|
||||
let metadata = entry.metadata().unwrap();
|
||||
assert!(metadata.is_file(), "expect a file, found something else");
|
||||
assert!(metadata.len() > 0, "expect file not be empty");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
159
third_party/rust/neqo-bin/src/server/http09.rs
vendored
159
third_party/rust/neqo-bin/src/server/http09.rs
vendored
@ -4,7 +4,7 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc, time::Instant};
|
||||
use std::{borrow::Cow, cell::RefCell, collections::HashMap, fmt::Display, rc::Rc, time::Instant};
|
||||
|
||||
use neqo_common::{event::Provider, hex, qdebug, qerror, qinfo, qwarn, Datagram};
|
||||
use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay};
|
||||
@ -15,12 +15,13 @@ use neqo_transport::{
|
||||
};
|
||||
use regex::Regex;
|
||||
|
||||
use super::{qns_read_response, Args};
|
||||
use super::{qns_read_response, Args, ResponseData};
|
||||
use crate::STREAM_IO_BUFFER_SIZE;
|
||||
|
||||
#[derive(Default)]
|
||||
struct HttpStreamState {
|
||||
writable: bool,
|
||||
data_to_send: Option<(Vec<u8>, usize)>,
|
||||
data_to_send: Option<ResponseData>,
|
||||
}
|
||||
|
||||
pub struct HttpServer {
|
||||
@ -29,6 +30,7 @@ pub struct HttpServer {
|
||||
read_state: HashMap<StreamId, Vec<u8>>,
|
||||
is_qns_test: bool,
|
||||
regex: Regex,
|
||||
read_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
@ -72,6 +74,7 @@ impl HttpServer {
|
||||
} else {
|
||||
Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap()
|
||||
},
|
||||
read_buffer: vec![0; STREAM_IO_BUFFER_SIZE],
|
||||
})
|
||||
}
|
||||
|
||||
@ -87,11 +90,63 @@ impl HttpServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn write(&mut self, stream_id: StreamId, data: Option<Vec<u8>>, conn: &ConnectionRef) {
|
||||
let resp = data.unwrap_or_else(|| Vec::from(&b"404 That request was nonsense\r\n"[..]));
|
||||
fn stream_readable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||
if !stream_id.is_client_initiated() || !stream_id.is_bidi() {
|
||||
qdebug!("Stream {} not client-initiated bidi, ignoring", stream_id);
|
||||
return;
|
||||
}
|
||||
let (sz, fin) = conn
|
||||
.borrow_mut()
|
||||
.stream_recv(stream_id, &mut self.read_buffer)
|
||||
.expect("Read should succeed");
|
||||
|
||||
if sz == 0 {
|
||||
if !fin {
|
||||
qdebug!("size 0 but !fin");
|
||||
}
|
||||
return;
|
||||
}
|
||||
let read_buffer = &self.read_buffer[..sz];
|
||||
|
||||
let buf = self.read_state.remove(&stream_id).map_or(
|
||||
Cow::Borrowed(read_buffer),
|
||||
|mut existing| {
|
||||
existing.extend_from_slice(read_buffer);
|
||||
Cow::Owned(existing)
|
||||
},
|
||||
);
|
||||
|
||||
let Ok(msg) = std::str::from_utf8(&buf[..]) else {
|
||||
self.save_partial(stream_id, buf.to_vec(), conn);
|
||||
return;
|
||||
};
|
||||
|
||||
let m = self.regex.captures(msg);
|
||||
let Some(path) = m.and_then(|m| m.get(1)) else {
|
||||
self.save_partial(stream_id, buf.to_vec(), conn);
|
||||
return;
|
||||
};
|
||||
|
||||
let resp: ResponseData = {
|
||||
let path = path.as_str();
|
||||
qdebug!("Path = '{path}'");
|
||||
if self.is_qns_test {
|
||||
match qns_read_response(path) {
|
||||
Ok(data) => data.into(),
|
||||
Err(e) => {
|
||||
qerror!("Failed to read {path}: {e}");
|
||||
b"404".to_vec().into()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let count = path.parse().unwrap();
|
||||
ResponseData::zeroes(count)
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(stream_state) = self.write_state.get_mut(&stream_id) {
|
||||
match stream_state.data_to_send {
|
||||
None => stream_state.data_to_send = Some((resp, 0)),
|
||||
None => stream_state.data_to_send = Some(resp),
|
||||
Some(_) => {
|
||||
qdebug!("Data already set, doing nothing");
|
||||
}
|
||||
@ -104,90 +159,26 @@ impl HttpServer {
|
||||
stream_id,
|
||||
HttpStreamState {
|
||||
writable: false,
|
||||
data_to_send: Some((resp, 0)),
|
||||
data_to_send: Some(resp),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn stream_readable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||
if !stream_id.is_client_initiated() || !stream_id.is_bidi() {
|
||||
qdebug!("Stream {} not client-initiated bidi, ignoring", stream_id);
|
||||
return;
|
||||
}
|
||||
let mut data = vec![0; 4000];
|
||||
let (sz, fin) = conn
|
||||
.borrow_mut()
|
||||
.stream_recv(stream_id, &mut data)
|
||||
.expect("Read should succeed");
|
||||
|
||||
if sz == 0 {
|
||||
if !fin {
|
||||
qdebug!("size 0 but !fin");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
data.truncate(sz);
|
||||
let buf = if let Some(mut existing) = self.read_state.remove(&stream_id) {
|
||||
existing.append(&mut data);
|
||||
existing
|
||||
} else {
|
||||
data
|
||||
};
|
||||
|
||||
let Ok(msg) = std::str::from_utf8(&buf[..]) else {
|
||||
self.save_partial(stream_id, buf, conn);
|
||||
return;
|
||||
};
|
||||
|
||||
let m = self.regex.captures(msg);
|
||||
let Some(path) = m.and_then(|m| m.get(1)) else {
|
||||
self.save_partial(stream_id, buf, conn);
|
||||
return;
|
||||
};
|
||||
|
||||
let resp = {
|
||||
let path = path.as_str();
|
||||
qdebug!("Path = '{path}'");
|
||||
if self.is_qns_test {
|
||||
match qns_read_response(path) {
|
||||
Ok(data) => Some(data),
|
||||
Err(e) => {
|
||||
qerror!("Failed to read {path}: {e}");
|
||||
Some(b"404".to_vec())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let count = path.parse().unwrap();
|
||||
Some(vec![b'a'; count])
|
||||
}
|
||||
};
|
||||
self.write(stream_id, resp, conn);
|
||||
}
|
||||
|
||||
fn stream_writable(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||
match self.write_state.get_mut(&stream_id) {
|
||||
None => {
|
||||
qwarn!("Unknown stream {stream_id}, ignoring event");
|
||||
}
|
||||
Some(stream_state) => {
|
||||
stream_state.writable = true;
|
||||
if let Some((data, ref mut offset)) = &mut stream_state.data_to_send {
|
||||
let sent = conn
|
||||
.borrow_mut()
|
||||
.stream_send(stream_id, &data[*offset..])
|
||||
.unwrap();
|
||||
qdebug!("Wrote {}", sent);
|
||||
*offset += sent;
|
||||
if *offset == data.len() {
|
||||
qinfo!("Sent {sent} on {stream_id}, closing");
|
||||
conn.borrow_mut().stream_close_send(stream_id).unwrap();
|
||||
self.write_state.remove(&stream_id);
|
||||
} else {
|
||||
stream_state.writable = false;
|
||||
}
|
||||
}
|
||||
let Some(stream_state) = self.write_state.get_mut(&stream_id) else {
|
||||
qwarn!("Unknown stream {stream_id}, ignoring event");
|
||||
return;
|
||||
};
|
||||
|
||||
stream_state.writable = true;
|
||||
if let Some(resp) = &mut stream_state.data_to_send {
|
||||
resp.send_h09(stream_id, conn);
|
||||
if resp.done() {
|
||||
conn.borrow_mut().stream_close_send(stream_id).unwrap();
|
||||
self.write_state.remove(&stream_id);
|
||||
} else {
|
||||
stream_state.writable = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
73
third_party/rust/neqo-bin/src/server/http3.rs
vendored
73
third_party/rust/neqo-bin/src/server/http3.rs
vendored
@ -5,23 +5,21 @@
|
||||
// except according to those terms.
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
cell::RefCell,
|
||||
cmp::min,
|
||||
collections::HashMap,
|
||||
fmt::{self, Display},
|
||||
rc::Rc,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header};
|
||||
use neqo_common::{hex, qdebug, qerror, qinfo, Datagram, Header};
|
||||
use neqo_crypto::{generate_ech_keys, random, AntiReplay};
|
||||
use neqo_http3::{
|
||||
Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId,
|
||||
};
|
||||
use neqo_transport::{server::ValidateAddress, ConnectionIdGenerator};
|
||||
|
||||
use super::{qns_read_response, Args};
|
||||
use super::{qns_read_response, Args, ResponseData};
|
||||
|
||||
pub struct HttpServer {
|
||||
server: Http3Server,
|
||||
@ -32,8 +30,6 @@ pub struct HttpServer {
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
const MESSAGE: &'static [u8] = &[0; 4096];
|
||||
|
||||
pub fn new(
|
||||
args: &Args,
|
||||
anti_replay: AntiReplay,
|
||||
@ -127,9 +123,9 @@ impl super::HttpServer for HttpServer {
|
||||
} else if let Ok(count) =
|
||||
path.value().trim_matches(|p| p == '/').parse::<usize>()
|
||||
{
|
||||
ResponseData::repeat(Self::MESSAGE, count)
|
||||
ResponseData::zeroes(count)
|
||||
} else {
|
||||
ResponseData::from(Self::MESSAGE)
|
||||
ResponseData::from(path.value())
|
||||
};
|
||||
|
||||
stream
|
||||
@ -138,7 +134,7 @@ impl super::HttpServer for HttpServer {
|
||||
Header::new("content-length", response.remaining.to_string()),
|
||||
])
|
||||
.unwrap();
|
||||
response.send(&stream);
|
||||
response.send_h3(&stream);
|
||||
if response.done() {
|
||||
stream.stream_close_send().unwrap();
|
||||
} else {
|
||||
@ -148,7 +144,7 @@ impl super::HttpServer for HttpServer {
|
||||
Http3ServerEvent::DataWritable { stream } => {
|
||||
if self.posts.get_mut(&stream).is_none() {
|
||||
if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) {
|
||||
remaining.send(&stream);
|
||||
remaining.send_h3(&stream);
|
||||
if remaining.done() {
|
||||
self.remaining_data.remove(&stream.stream_id());
|
||||
stream.stream_close_send().unwrap();
|
||||
@ -181,60 +177,3 @@ impl super::HttpServer for HttpServer {
|
||||
self.server.has_events()
|
||||
}
|
||||
}
|
||||
|
||||
struct ResponseData {
|
||||
data: Cow<'static, [u8]>,
|
||||
offset: usize,
|
||||
remaining: usize,
|
||||
}
|
||||
|
||||
impl From<&[u8]> for ResponseData {
|
||||
fn from(data: &[u8]) -> Self {
|
||||
Self::from(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ResponseData {
|
||||
fn from(data: Vec<u8>) -> Self {
|
||||
let remaining = data.len();
|
||||
Self {
|
||||
data: Cow::Owned(data),
|
||||
offset: 0,
|
||||
remaining,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseData {
|
||||
const fn repeat(buf: &'static [u8], total: usize) -> Self {
|
||||
Self {
|
||||
data: Cow::Borrowed(buf),
|
||||
offset: 0,
|
||||
remaining: total,
|
||||
}
|
||||
}
|
||||
|
||||
fn send(&mut self, stream: &Http3OrWebTransportStream) {
|
||||
while self.remaining > 0 {
|
||||
let end = min(self.data.len(), self.offset + self.remaining);
|
||||
let slice = &self.data[self.offset..end];
|
||||
match stream.send_data(slice) {
|
||||
Ok(0) => {
|
||||
return;
|
||||
}
|
||||
Ok(sent) => {
|
||||
self.remaining -= sent;
|
||||
self.offset = (self.offset + sent) % self.data.len();
|
||||
}
|
||||
Err(e) => {
|
||||
qwarn!("Error writing to stream {}: {:?}", stream, e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const fn done(&self) -> bool {
|
||||
self.remaining == 0
|
||||
}
|
||||
}
|
||||
|
100
third_party/rust/neqo-bin/src/server/mod.rs
vendored
100
third_party/rust/neqo-bin/src/server/mod.rs
vendored
@ -7,7 +7,9 @@
|
||||
#![allow(clippy::future_not_send)]
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
cell::RefCell,
|
||||
cmp::min,
|
||||
fmt::{self, Display},
|
||||
fs, io,
|
||||
net::{SocketAddr, ToSocketAddrs},
|
||||
@ -28,10 +30,11 @@ use neqo_crypto::{
|
||||
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
|
||||
init_db, AntiReplay, Cipher,
|
||||
};
|
||||
use neqo_transport::{Output, RandomConnectionIdGenerator, Version};
|
||||
use neqo_http3::{Http3OrWebTransportStream, StreamId};
|
||||
use neqo_transport::{server::ConnectionRef, Output, RandomConnectionIdGenerator, Version};
|
||||
use tokio::time::Sleep;
|
||||
|
||||
use crate::SharedArgs;
|
||||
use crate::{SharedArgs, STREAM_IO_BUFFER_SIZE};
|
||||
|
||||
const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10);
|
||||
|
||||
@ -118,7 +121,7 @@ pub struct Args {
|
||||
ech: bool,
|
||||
}
|
||||
|
||||
#[cfg(feature = "bench")]
|
||||
#[cfg(any(test, feature = "bench"))]
|
||||
impl Default for Args {
|
||||
fn default() -> Self {
|
||||
use std::str::FromStr;
|
||||
@ -175,6 +178,11 @@ impl Args {
|
||||
Instant::now()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "bench"))]
|
||||
pub fn set_qlog_dir(&mut self, dir: PathBuf) {
|
||||
self.shared.qlog_dir = Some(dir);
|
||||
}
|
||||
}
|
||||
|
||||
fn qns_read_response(filename: &str) -> Result<Vec<u8>, io::Error> {
|
||||
@ -390,3 +398,89 @@ pub async fn server(mut args: Args) -> Res<()> {
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ResponseData {
|
||||
data: Cow<'static, [u8]>,
|
||||
offset: usize,
|
||||
remaining: usize,
|
||||
}
|
||||
|
||||
impl From<&[u8]> for ResponseData {
|
||||
fn from(data: &[u8]) -> Self {
|
||||
Self::from(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ResponseData {
|
||||
fn from(data: Vec<u8>) -> Self {
|
||||
let remaining = data.len();
|
||||
Self {
|
||||
data: Cow::Owned(data),
|
||||
offset: 0,
|
||||
remaining,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ResponseData {
|
||||
fn from(data: &str) -> Self {
|
||||
Self::from(data.as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseData {
|
||||
const fn zeroes(total: usize) -> Self {
|
||||
const MESSAGE: &[u8] = &[0; STREAM_IO_BUFFER_SIZE];
|
||||
Self {
|
||||
data: Cow::Borrowed(MESSAGE),
|
||||
offset: 0,
|
||||
remaining: total,
|
||||
}
|
||||
}
|
||||
|
||||
fn slice(&self) -> &[u8] {
|
||||
let end = min(self.data.len(), self.offset + self.remaining);
|
||||
&self.data[self.offset..end]
|
||||
}
|
||||
|
||||
fn send_h3(&mut self, stream: &Http3OrWebTransportStream) {
|
||||
while self.remaining > 0 {
|
||||
match stream.send_data(self.slice()) {
|
||||
Ok(0) => {
|
||||
return;
|
||||
}
|
||||
Ok(sent) => {
|
||||
self.remaining -= sent;
|
||||
self.offset = (self.offset + sent) % self.data.len();
|
||||
}
|
||||
Err(e) => {
|
||||
qwarn!("Error writing to stream {}: {:?}", stream, e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_h09(&mut self, stream_id: StreamId, conn: &ConnectionRef) {
|
||||
while self.remaining > 0 {
|
||||
match conn
|
||||
.borrow_mut()
|
||||
.stream_send(stream_id, self.slice())
|
||||
.unwrap()
|
||||
{
|
||||
0 => {
|
||||
return;
|
||||
}
|
||||
sent => {
|
||||
self.remaining -= sent;
|
||||
self.offset = (self.offset + sent) % self.data.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const fn done(&self) -> bool {
|
||||
self.remaining == 0
|
||||
}
|
||||
}
|
||||
|
1
third_party/rust/neqo-bin/src/udp.rs
vendored
1
third_party/rust/neqo-bin/src/udp.rs
vendored
@ -9,6 +9,7 @@ use std::{io, net::SocketAddr};
|
||||
use neqo_common::Datagram;
|
||||
|
||||
/// Ideally this would live in [`neqo-udp`]. [`neqo-udp`] is used in Firefox.
|
||||
///
|
||||
/// Firefox uses `cargo vet`. [`tokio`] the dependency of [`neqo-udp`] is not
|
||||
/// audited as `safe-to-deploy`. `cargo vet` will require `safe-to-deploy` for
|
||||
/// [`tokio`] even when behind a feature flag.
|
||||
|
@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"3c2a56e78b593343b3d42f35bf87d0ea7cc628d2ab873ff6992c89336e0a44aa","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"549ee76e90898d37102bd4eabfce69a98aaec6862785eaeb4c9af57b7a36a655","src/datagram.rs":"2acecfcbecfbb767ea920e3b22388e67b31fcda776cae5b2d7ecbc67dd9febf7","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/fuzz.rs":"1ca74a34bdc97fedecf8a63c4a13cc487d1b2212398fb76f67792c822002138d","src/header.rs":"480a7848466249a78acddbf0bc0b4a096189abc14a89ad1a0943be571add2c2b","src/hrtime.rs":"93a544743f3994e5d4c494b313a9532ab5bd23541ff63a747cb377ad6d5edc72","src/incrdecoder.rs":"5c45034e61e75c76d2bca8b075c3e7a3cdd8af8c82b67c76283a2b08ab11846b","src/lib.rs":"2381fc00127a7eaf2265c3a13dc1e1d5843e048f3a8a1c97f1e6621c038de380","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"1cee4ff3bc9bf735a1bb913e1515ef240a70326a34c56a6ce89de02bc9f3459c","src/tos.rs":"28fd9acfce06f68ac6691efd2609618850182f77ef3717ce2db07bfac19a9396","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
|
||||
{"files":{"Cargo.toml":"bbf1410d4b957a9cd30396819738865296b39a8c1b16aaaf1f81642039e1ff37","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"549ee76e90898d37102bd4eabfce69a98aaec6862785eaeb4c9af57b7a36a655","src/datagram.rs":"2acecfcbecfbb767ea920e3b22388e67b31fcda776cae5b2d7ecbc67dd9febf7","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/fuzz.rs":"1ca74a34bdc97fedecf8a63c4a13cc487d1b2212398fb76f67792c822002138d","src/header.rs":"480a7848466249a78acddbf0bc0b4a096189abc14a89ad1a0943be571add2c2b","src/hrtime.rs":"cbae4363ba64ff208d818d1a6ff0b42ec40a4e2b01b9cec224e57b4dc70c3830","src/incrdecoder.rs":"5c45034e61e75c76d2bca8b075c3e7a3cdd8af8c82b67c76283a2b08ab11846b","src/lib.rs":"2381fc00127a7eaf2265c3a13dc1e1d5843e048f3a8a1c97f1e6621c038de380","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"f53cb2a52dd7725c577d4e42065fb1c498ccc33dff0449b6889d9fbc1fdb96e2","src/tos.rs":"28fd9acfce06f68ac6691efd2609618850182f77ef3717ce2db07bfac19a9396","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
|
14
third_party/rust/neqo-common/Cargo.toml
vendored
14
third_party/rust/neqo-common/Cargo.toml
vendored
@ -17,7 +17,7 @@ bench = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-common"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = "build.rs"
|
||||
autobins = false
|
||||
@ -73,11 +73,6 @@ default-features = false
|
||||
version = "0.13"
|
||||
default-features = false
|
||||
|
||||
[dependencies.time]
|
||||
version = "0.3"
|
||||
features = ["formatting"]
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.test-fixture]
|
||||
path = "../test-fixture"
|
||||
|
||||
@ -85,9 +80,10 @@ path = "../test-fixture"
|
||||
build-fuzzing-corpus = ["hex"]
|
||||
ci = []
|
||||
|
||||
[target."cfg(windows)".dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["timeapi"]
|
||||
[target."cfg(windows)".dependencies.windows]
|
||||
version = "0.58"
|
||||
features = ["Win32_Media"]
|
||||
default-features = false
|
||||
|
||||
[lints.clippy]
|
||||
multiple_crate_versions = "allow"
|
||||
|
12
third_party/rust/neqo-common/src/hrtime.rs
vendored
12
third_party/rust/neqo-common/src/hrtime.rs
vendored
@ -11,9 +11,7 @@ use std::{
|
||||
};
|
||||
|
||||
#[cfg(windows)]
|
||||
use winapi::shared::minwindef::UINT;
|
||||
#[cfg(windows)]
|
||||
use winapi::um::timeapi::{timeBeginPeriod, timeEndPeriod};
|
||||
use windows::Win32::Media::{timeBeginPeriod, timeEndPeriod};
|
||||
|
||||
/// A quantized `Duration`. This currently just produces 16 discrete values
|
||||
/// corresponding to whole milliseconds. Future implementations might choose
|
||||
@ -26,8 +24,8 @@ impl Period {
|
||||
const MIN: Self = Self(1);
|
||||
|
||||
#[cfg(windows)]
|
||||
fn as_uint(self) -> UINT {
|
||||
UINT::from(self.0)
|
||||
fn as_u32(self) -> u32 {
|
||||
u32::from(self.0)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
@ -299,7 +297,7 @@ impl Time {
|
||||
#[cfg(target_os = "windows")]
|
||||
fn start(&self) {
|
||||
if let Some(p) = self.active {
|
||||
_ = unsafe { timeBeginPeriod(p.as_uint()) };
|
||||
_ = unsafe { timeBeginPeriod(p.as_u32()) };
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,7 +308,7 @@ impl Time {
|
||||
#[cfg(windows)]
|
||||
fn stop(&self) {
|
||||
if let Some(p) = self.active {
|
||||
_ = unsafe { timeEndPeriod(p.as_uint()) };
|
||||
_ = unsafe { timeEndPeriod(p.as_u32()) };
|
||||
}
|
||||
}
|
||||
|
||||
|
62
third_party/rust/neqo-common/src/qlog.rs
vendored
62
third_party/rust/neqo-common/src/qlog.rs
vendored
@ -6,9 +6,12 @@
|
||||
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
fmt,
|
||||
path::{Path, PathBuf},
|
||||
fmt::{self, Display},
|
||||
fs::OpenOptions,
|
||||
io::BufWriter,
|
||||
path::PathBuf,
|
||||
rc::Rc,
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
use qlog::{
|
||||
@ -29,21 +32,53 @@ pub struct NeqoQlogShared {
|
||||
}
|
||||
|
||||
impl NeqoQlog {
|
||||
/// Create an enabled `NeqoQlog` configuration backed by a file.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return `qlog::Error` if it cannot write to the new file.
|
||||
pub fn enabled_with_file(
|
||||
mut qlog_path: PathBuf,
|
||||
role: Role,
|
||||
title: Option<String>,
|
||||
description: Option<String>,
|
||||
file_prefix: impl Display,
|
||||
) -> Result<Self, qlog::Error> {
|
||||
qlog_path.push(format!("{file_prefix}.sqlog"));
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.write(true)
|
||||
// As a server, the original DCID is chosen by the client. Using
|
||||
// create_new() prevents attackers from overwriting existing logs.
|
||||
.create_new(true)
|
||||
.open(&qlog_path)
|
||||
.map_err(qlog::Error::IoError)?;
|
||||
|
||||
let streamer = QlogStreamer::new(
|
||||
qlog::QLOG_VERSION.to_string(),
|
||||
title,
|
||||
description,
|
||||
None,
|
||||
std::time::Instant::now(),
|
||||
new_trace(role),
|
||||
qlog::events::EventImportance::Base,
|
||||
Box::new(BufWriter::new(file)),
|
||||
);
|
||||
Self::enabled(streamer, qlog_path)
|
||||
}
|
||||
|
||||
/// Create an enabled `NeqoQlog` configuration.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return `qlog::Error` if cannot write to the new log.
|
||||
pub fn enabled(
|
||||
mut streamer: QlogStreamer,
|
||||
qlog_path: impl AsRef<Path>,
|
||||
) -> Result<Self, qlog::Error> {
|
||||
/// Will return `qlog::Error` if it cannot write to the new log.
|
||||
pub fn enabled(mut streamer: QlogStreamer, qlog_path: PathBuf) -> Result<Self, qlog::Error> {
|
||||
streamer.start_log()?;
|
||||
|
||||
Ok(Self {
|
||||
inner: Rc::new(RefCell::new(Some(NeqoQlogShared {
|
||||
qlog_path,
|
||||
streamer,
|
||||
qlog_path: qlog_path.as_ref().to_owned(),
|
||||
}))),
|
||||
})
|
||||
}
|
||||
@ -138,13 +173,10 @@ pub fn new_trace(role: Role) -> qlog::TraceSeq {
|
||||
common_fields: Some(CommonFields {
|
||||
group_id: None,
|
||||
protocol_type: None,
|
||||
reference_time: {
|
||||
// It is better to allow this than deal with a conversion from i64 to f64.
|
||||
// We can't do the obvious two-step conversion with f64::from(i32::try_from(...)),
|
||||
// because that overflows earlier than is ideal. This should be fine for a while.
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
Some(time::OffsetDateTime::now_utc().unix_timestamp() as f64)
|
||||
},
|
||||
reference_time: SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64() * 1_000.0)
|
||||
.ok(),
|
||||
time_format: Some("relative".to_string()),
|
||||
}),
|
||||
}
|
||||
|
@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"a4b882fb4d24557b4d365d13d83d46bba448648c834ab5bb488feb369be18188","bindings/bindings.toml":"0e06a03035a90ec5f823b30c8b78ec010a332ae0e5ed0c953da2e4c406451793","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"51cfa35860a4c1a0f16e3fc2e2540b02cd9bdf1598f0ca65b74cf4c02fca5be3","min_version.txt":"94ebbba5fc5de230ca467b7e316e9202e4a86c603b3a629cffd647859f48b730","src/aead.rs":"6410bcbe717a6b9ea6f11209b0888033358113ebc05b8a95cec1980d1360be4d","src/aead_null.rs":"81163fafef59bd2800bd0a078d53d0f05ee114f0e22165717823a5ff1cb908af","src/agent.rs":"607f8a648b2099e81750d3d4076a8ca485c79603011d6b0fb2a515aac400c514","src/agentio.rs":"22e63d5efefbff41113cf002a75bb08f15228cb83e9e2cba65eb6da52dad0264","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8e75e69ec3544474b21f8915a7559463889c2f608b201dee274a8d701880950e","src/constants.rs":"f5c779db128a8b0607841ca18c376971017eb327e102e5e6959a7d8effe4b3a6","src/ech.rs":"75dd192423e8996d9061da5e9c20d30bff5153b9344132eda4fe321c4c141870","src/err.rs":"2366501e0b48933a6a2e1c5b934aa55108c093729c84878b45e1e012e4e45d51","src/exp.rs":"d953873e87430b1c84d4a83c8eb3815041f5585b210bbaf59ae2c4d0057f5edd","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"8745ba761be821c1819cedf6dfd91f8b3148c6718053a4a74f33eb50c7d0cc40","src/hp.rs":"510a4a7f278203aa306ead05608f99397edc3806dc22b0af9e28c665b43ae56c","src/lib.rs":"db01ac68d002055bf12d940442c9b9195cc1331bb779571794eae6dc1223eef6","src/min_version.rs":"c6e1f98b9f56db0622ac38c1be131c55acf4a0f09ed0d6283f4d6308e2d1301a","src/p11.rs":"375397b18fcdf36dcdd22c164c8572dd83caf01b8d0065be3029444b197e1464","src/prio.rs":"5cf0105e78b1db43c65283208174abc3714a41dbb4d5cd80ac547a5a5a7c627c","src/replay.rs":"ad5be8e5d20cde477e7fa734000d880bc36d8288d4689e57332f212f65dde716","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"2c47935c5b8c42363897881eaa0c171e84cf031e57a6e1387b99327080e8dd60","src/selfencrypt.rs":"018c2dacabd3e463fdadd5707715b23c26c261c4c7d86e66c62f0acec986cad9","src/ssl.rs":"59bafcaed7caa66fe448339a1f75ce807ef92fc28247709df4f8058499b0787e","src/time.rs":"ade63a72ae90796d7fcccadbb15efc4594fcdb68913a914a657d4556fde88f62","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"cbd0011f1d33281883a45d433228221062424c94e86decade5697731c08a1c52","tests/ext.rs":"57af4e2df211fa8afdb73125d4344ef5c70c1ea4579107c3e6f5746308ee3e7b","tests/handshake.rs":"aa904736d36cc5d5cc0c4f6053b529987f33f944a73411bf08e01d30c4867186","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"ccda23018dac70b3ff3742afcb0fbae0735be9aeb36644a4ae2b1d7c9126801c","tests/init.rs":"3e15150c4b324c06ca5e8935618e4008da53dc0ef4b69325d150831e87dc0b63","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}
|
||||
{"files":{"Cargo.toml":"01bffdf3b47044fe1916af7d766e224b535852433c16aae593731baf8baa20c3","bindings/bindings.toml":"0e06a03035a90ec5f823b30c8b78ec010a332ae0e5ed0c953da2e4c406451793","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"3618becbcf1d8d47fe681c13ff9fce070688c67db9d5203e6e8bc038e19a48fc","min_version.txt":"94ebbba5fc5de230ca467b7e316e9202e4a86c603b3a629cffd647859f48b730","src/aead.rs":"6410bcbe717a6b9ea6f11209b0888033358113ebc05b8a95cec1980d1360be4d","src/aead_null.rs":"81163fafef59bd2800bd0a078d53d0f05ee114f0e22165717823a5ff1cb908af","src/agent.rs":"d24f1a3df8300b93a1b606b2089bd758c9aa41c3a9e333089e6165b3449df94f","src/agentio.rs":"22e63d5efefbff41113cf002a75bb08f15228cb83e9e2cba65eb6da52dad0264","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8e75e69ec3544474b21f8915a7559463889c2f608b201dee274a8d701880950e","src/constants.rs":"f5c779db128a8b0607841ca18c376971017eb327e102e5e6959a7d8effe4b3a6","src/ech.rs":"75dd192423e8996d9061da5e9c20d30bff5153b9344132eda4fe321c4c141870","src/err.rs":"2366501e0b48933a6a2e1c5b934aa55108c093729c84878b45e1e012e4e45d51","src/exp.rs":"d953873e87430b1c84d4a83c8eb3815041f5585b210bbaf59ae2c4d0057f5edd","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"8745ba761be821c1819cedf6dfd91f8b3148c6718053a4a74f33eb50c7d0cc40","src/hp.rs":"510a4a7f278203aa306ead05608f99397edc3806dc22b0af9e28c665b43ae56c","src/lib.rs":"c8bd48f9d1d2ebbccfa1224047de3cc47f8bbd0f9fbc4fe073454d0288c66856","src/min_version.rs":"c6e1f98b9f56db0622ac38c1be131c55acf4a0f09ed0d6283f4d6308e2d1301a","src/p11.rs":"375397b18fcdf36dcdd22c164c8572dd83caf01b8d0065be3029444b197e1464","src/prio.rs":"5cf0105e78b1db43c65283208174abc3714a41dbb4d5cd80ac547a5a5a7c627c","src/replay.rs":"5cda39bc8fa8a07c493b761b8dfb5cbc9f669f97a2df7832a028ab366b3426be","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"2c47935c5b8c42363897881eaa0c171e84cf031e57a6e1387b99327080e8dd60","src/selfencrypt.rs":"018c2dacabd3e463fdadd5707715b23c26c261c4c7d86e66c62f0acec986cad9","src/ssl.rs":"59bafcaed7caa66fe448339a1f75ce807ef92fc28247709df4f8058499b0787e","src/time.rs":"ade63a72ae90796d7fcccadbb15efc4594fcdb68913a914a657d4556fde88f62","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"cbd0011f1d33281883a45d433228221062424c94e86decade5697731c08a1c52","tests/ext.rs":"57af4e2df211fa8afdb73125d4344ef5c70c1ea4579107c3e6f5746308ee3e7b","tests/handshake.rs":"aa904736d36cc5d5cc0c4f6053b529987f33f944a73411bf08e01d30c4867186","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"ccda23018dac70b3ff3742afcb0fbae0735be9aeb36644a4ae2b1d7c9126801c","tests/init.rs":"3e15150c4b324c06ca5e8935618e4008da53dc0ef4b69325d150831e87dc0b63","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}
|
2
third_party/rust/neqo-crypto/Cargo.toml
vendored
2
third_party/rust/neqo-crypto/Cargo.toml
vendored
@ -17,7 +17,7 @@ bench = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-crypto"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = "build.rs"
|
||||
autobins = false
|
||||
|
2
third_party/rust/neqo-crypto/build.rs
vendored
2
third_party/rust/neqo-crypto/build.rs
vendored
@ -66,7 +66,7 @@ fn is_debug() -> bool {
|
||||
// Rather than download the 400Mb+ files, like gecko does, let's just reuse their work.
|
||||
fn setup_clang() {
|
||||
// If this isn't Windows, or we're in CI, then we don't need to do anything.
|
||||
if env::consts::OS != "windows" || env::var("GITHUB_WORKFLOW").unwrap() == "CI" {
|
||||
if env::consts::OS != "windows" || env::var("GITHUB_WORKFLOW").unwrap_or_default() == "CI" {
|
||||
return;
|
||||
}
|
||||
println!("rerun-if-env-changed=LIBCLANG_PATH");
|
||||
|
4
third_party/rust/neqo-crypto/src/agent.rs
vendored
4
third_party/rust/neqo-crypto/src/agent.rs
vendored
@ -117,12 +117,12 @@ pub struct SecretAgentPreInfo {
|
||||
}
|
||||
|
||||
macro_rules! preinfo_arg {
|
||||
($v:ident, $m:ident, $f:ident: $t:ident $(,)?) => {
|
||||
($v:ident, $m:ident, $f:ident: $t:ty $(,)?) => {
|
||||
#[must_use]
|
||||
pub fn $v(&self) -> Option<$t> {
|
||||
match self.info.valuesSet & ssl::$m {
|
||||
0 => None,
|
||||
_ => Some($t::try_from(self.info.$f).unwrap()),
|
||||
_ => Some(<$t>::try_from(self.info.$f).unwrap()),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
102
third_party/rust/neqo-crypto/src/lib.rs
vendored
102
third_party/rust/neqo-crypto/src/lib.rs
vendored
@ -91,10 +91,6 @@ impl Drop for NssLoaded {
|
||||
|
||||
static INITIALIZED: OnceLock<Res<NssLoaded>> = OnceLock::new();
|
||||
|
||||
fn already_initialized() -> bool {
|
||||
unsafe { nss::NSS_IsInitialized() != 0 }
|
||||
}
|
||||
|
||||
fn version_check() -> Res<()> {
|
||||
let min_ver = CString::new(MINIMUM_NSS_VERSION)?;
|
||||
if unsafe { nss::NSS_VersionCheck(min_ver.as_ptr()) } == 0 {
|
||||
@ -104,36 +100,6 @@ fn version_check() -> Res<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize NSS. This only executes the initialization routines once, so if there is any chance
|
||||
/// that
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// When NSS initialization fails.
|
||||
pub fn init() -> Res<()> {
|
||||
// Set time zero.
|
||||
time::init();
|
||||
let res = INITIALIZED.get_or_init(|| {
|
||||
version_check()?;
|
||||
if already_initialized() {
|
||||
return Ok(NssLoaded::External);
|
||||
}
|
||||
|
||||
secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?;
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
||||
secstatus_to_res(unsafe {
|
||||
p11::NSS_SetAlgorithmPolicy(
|
||||
p11::SECOidTag::SEC_OID_XYBER768D00,
|
||||
p11::NSS_USE_ALG_IN_SSL_KX,
|
||||
0,
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(NssLoaded::NoDb)
|
||||
});
|
||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||
}
|
||||
|
||||
/// This enables SSLTRACE by calling a simple, harmless function to trigger its
|
||||
/// side effects. SSLTRACE is not enabled in NSS until a socket is made or
|
||||
/// global options are accessed. Reading an option is the least impact approach.
|
||||
@ -145,20 +111,15 @@ fn enable_ssl_trace() -> Res<()> {
|
||||
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) })
|
||||
}
|
||||
|
||||
/// Initialize with a database.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If NSS cannot be initialized.
|
||||
pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
||||
fn init_once(db: Option<PathBuf>) -> Res<NssLoaded> {
|
||||
// Set time zero.
|
||||
time::init();
|
||||
let res = INITIALIZED.get_or_init(|| {
|
||||
version_check()?;
|
||||
if already_initialized() {
|
||||
return Ok(NssLoaded::External);
|
||||
}
|
||||
version_check()?;
|
||||
if unsafe { nss::NSS_IsInitialized() != 0 } {
|
||||
return Ok(NssLoaded::External);
|
||||
}
|
||||
|
||||
let path = dir.into();
|
||||
let state = if let Some(path) = db {
|
||||
if !path.is_dir() {
|
||||
return Err(Error::InternalError);
|
||||
}
|
||||
@ -175,23 +136,48 @@ pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
||||
)
|
||||
})?;
|
||||
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
||||
secstatus_to_res(unsafe {
|
||||
p11::NSS_SetAlgorithmPolicy(
|
||||
p11::SECOidTag::SEC_OID_XYBER768D00,
|
||||
p11::NSS_USE_ALG_IN_SSL_KX,
|
||||
0,
|
||||
)
|
||||
})?;
|
||||
secstatus_to_res(unsafe {
|
||||
ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr())
|
||||
})?;
|
||||
NssLoaded::Db
|
||||
} else {
|
||||
secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?;
|
||||
NssLoaded::NoDb
|
||||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
enable_ssl_trace()?;
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
||||
secstatus_to_res(unsafe {
|
||||
p11::NSS_SetAlgorithmPolicy(
|
||||
p11::SECOidTag::SEC_OID_XYBER768D00,
|
||||
p11::NSS_USE_ALG_IN_SSL_KX,
|
||||
0,
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(NssLoaded::Db)
|
||||
});
|
||||
#[cfg(debug_assertions)]
|
||||
enable_ssl_trace()?;
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Initialize NSS. This only executes the initialization routines once, so if there is any chance
|
||||
/// that this is invoked twice, that's OK.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// When NSS initialization fails.
|
||||
pub fn init() -> Res<()> {
|
||||
let res = INITIALIZED.get_or_init(|| init_once(None));
|
||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||
}
|
||||
|
||||
/// Initialize with a database.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If NSS cannot be initialized.
|
||||
pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
||||
let res = INITIALIZED.get_or_init(|| init_once(Some(dir.into())));
|
||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||
}
|
||||
|
||||
|
1
third_party/rust/neqo-crypto/src/replay.rs
vendored
1
third_party/rust/neqo-crypto/src/replay.rs
vendored
@ -40,6 +40,7 @@ scoped_ptr!(
|
||||
);
|
||||
|
||||
/// `AntiReplay` is used by servers when processing 0-RTT handshakes.
|
||||
///
|
||||
/// It limits the exposure of servers to replay attack by rejecting 0-RTT
|
||||
/// if it appears to be a replay. There is a false-positive rate that can be
|
||||
/// managed by tuning the parameters used to create the context.
|
||||
|
@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"db789a718ec09df778191371010b6530ac9ff3107454e88ef09300e02505adc9","src/buffered_send_stream.rs":"dfb248c66ea65418b0c7798c2ecaa3ed70ef1af818ef58d53ef742b3445077b7","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"7f0df52bceda1923aef2b7c5c64a532f49ea083ea45e3dcd5bd4b03031b89643","src/connection.rs":"0d7b2e529839fe6c6f7bcb6117dc8734f0dc5cce1dfb3e2541c9710488e1b753","src/connection_client.rs":"8d6d1518bee62519911dd2571e97d463d9e05cb13ec55bc1cf6f6712c920972e","src/connection_server.rs":"02fda7595a33c57d0b3ccede51a1e7a8c9073e1ec107ca1b56c56f1728db2318","src/control_stream_local.rs":"20917762c7e7c1112c56abf1cbaf0ad7f0eab97d8db9a3b10ff524315a235670","src/control_stream_remote.rs":"3729f67aa0681b1dbd4147063890f8440f27d82454776500ae964a17cda4d6b5","src/features/extended_connect/mod.rs":"cbeb2294eaf34f08a2c0d0fe4d3473aea9c65df6faaec9dc3ed29dcb577b1c3f","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"51d6f3828c44b438eb1776e8dcce531af520f28bc0d715807d3f53a0eaa071d1","src/features/extended_connect/tests/webtransport/mod.rs":"27f77213414089148e94067bfc54133945a971fd7ddd6936bbfeabb9badc7e67","src/features/extended_connect/tests/webtransport/negotiation.rs":"a22094dbaf0754d39ac8ac08fce1ae34ace108220b696c7d618567df56cddeec","src/features/extended_connect/tests/webtransport/sessions.rs":"cf8aa14087cc3ff42657d86ecacbd51bc182357fdcbd10f57d32784abb415a12","src/features/extended_connect/tests/webtransport/streams.rs":"4c136855292d5ba5169f41c18beea13e7f1e014a0acb13c565c872d3a80d6377","src/features/extended_connect/webtransport_session.rs":"da0b99092d8af8d4f7699c8d45e2e4057f4de38d6fa99e27e3a7feffa569374f","src/features/extended_connect/webtransport_streams.rs":"9855d77705acb7d21566333c4b297816e363be2ade14b8685fd1df4a4861cf74","src/features/mod.rs":"89056df3a868cb0037963c942fc27093cc16d84538ffca2d4759f9a6a6c74c7f","src/frames/hframe.rs":"72349bf4e9dd5c57dc5443bb9aa079887e2742dc08d77ea55567e3b09e0de4d8","src/frames/mod.rs":"0e6d49888d723b2c2c73df11020ceb88d9f062e9d4dc436eb38173e0b772d905","src/frames/reader.rs":"8c7ea836a466410bd3c98848b4852945ae30e1306f73290c401c686998bde16d","src/frames/tests/hframe.rs":"53941fd7656f5e424d499278e6d9ba93ce716f219e86fe6fa08c058ea92f8d7b","src/frames/tests/mod.rs":"c6bbf85fbc6cb9adf6115d315f0564317eefd83ff3177c93050844ad77f6e694","src/frames/tests/reader.rs":"9ee0d9cdd87b98da2b94e577bbcc2bfde6d72be5177bf02364188935f79cb36a","src/frames/tests/wtframe.rs":"c6598d24f5e12972f02de6e1394362671633982db637a07e1c0bb9b56d93ea2a","src/frames/wtframe.rs":"ad6dd63c54a0305c045cd983d5889ae86a5a1afe1e7c13e1c169de9af440759e","src/headers_checks.rs":"69964deb121721be01df7174c177543c161389295ce1450d348369279e312ba4","src/lib.rs":"3fb980eee46bee8dcb97ad9d55014555d8994a7a2d040ca223f2d28fe7d923ef","src/priority.rs":"946307329f31819d969093406ae5448f7923343ccc112221ea6eedf86cf447dc","src/push_controller.rs":"53f72e8043505f85cba0f9c16b4a5ce14d6668b030d773067bc88b2a10bdd25b","src/qlog.rs":"db5f2dd6566d44b4f0541f75266b417b558c09e62141f056885cb8c66478a932","src/qpack_decoder_receiver.rs":"eb06c4be59da567fef70c20daa2c0f165c768131165479a210e69659f168b88f","src/qpack_encoder_receiver.rs":"831f3da9ec17966286786ba3f2c723395a132e65d6a33b4ec341fe7640c1a53d","src/recv_message.rs":"c3acf0544680f88ccd3500e6bea949c1bb43e2fb0a8922edc8f837d0166c89f8","src/request_target.rs":"9720b9f87d66a7c2301bba7de5a5a9300f547613a63153a4d35c7a7506a59b31","src/send_message.rs":"be4e9f64db2c25eb7176b84695e608e768115d62e615d389a33d26f7cd5b0c6c","src/server.rs":"8d48376abf36d036f51a84cddcc3d5acd56786b181fba0e24449e1417b030d63","src/server_connection_events.rs":"1396baab265a814045ccfe63d637a4fdc32a667b5eb2925fa4951f5c3078fb20","src/server_events.rs":"02fc8c0711efd758fb1ddee27d257c12ed35e2a989e7bf3de44bd662dc8234e3","src/settings.rs":"d0f8c546e70161422a029a40564b9e9b953fe671c60835196b16f3364779eaf9","src/stream_type_reader.rs":"0bc91ee4c2a516053cd2b55a60f9bd8e62008cde94274e281224cdffe352a907","tests/httpconn.rs":"87c32197258711d916cace23ed850c5bf0198f5e32756c68a32d91206b6e6db8","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"cdf7028eb64f8f3778c3bbb2a10e9482c4e995e9e1813143ccd83ec96b2d4b6a","tests/webtransport.rs":"02b81be0a20252a8bb0796b5287e426c1af5ddaf5a47d68aa9165393cba83c45"},"package":null}
|
||||
{"files":{"Cargo.toml":"54df05103756645fe2a74baa9aae7ddc59cc782afe3f021f74c05e0e532352b8","src/buffered_send_stream.rs":"dfb248c66ea65418b0c7798c2ecaa3ed70ef1af818ef58d53ef742b3445077b7","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"7f0df52bceda1923aef2b7c5c64a532f49ea083ea45e3dcd5bd4b03031b89643","src/connection.rs":"1bf52ac3f3714f5bb2b1237fdb7b026ee4a2183f8f173120661f46213f8c5daa","src/connection_client.rs":"ec979c1ed03002ec2095aab40986089d6c2b0eda541566e0023424a0a896b687","src/connection_server.rs":"cf4da2cdd823e31d2352e45de84d366c45bd3d8adf38c9151a84d808bda80209","src/control_stream_local.rs":"20917762c7e7c1112c56abf1cbaf0ad7f0eab97d8db9a3b10ff524315a235670","src/control_stream_remote.rs":"3729f67aa0681b1dbd4147063890f8440f27d82454776500ae964a17cda4d6b5","src/features/extended_connect/mod.rs":"cbeb2294eaf34f08a2c0d0fe4d3473aea9c65df6faaec9dc3ed29dcb577b1c3f","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"51d6f3828c44b438eb1776e8dcce531af520f28bc0d715807d3f53a0eaa071d1","src/features/extended_connect/tests/webtransport/mod.rs":"27f77213414089148e94067bfc54133945a971fd7ddd6936bbfeabb9badc7e67","src/features/extended_connect/tests/webtransport/negotiation.rs":"a22094dbaf0754d39ac8ac08fce1ae34ace108220b696c7d618567df56cddeec","src/features/extended_connect/tests/webtransport/sessions.rs":"cf8aa14087cc3ff42657d86ecacbd51bc182357fdcbd10f57d32784abb415a12","src/features/extended_connect/tests/webtransport/streams.rs":"4c136855292d5ba5169f41c18beea13e7f1e014a0acb13c565c872d3a80d6377","src/features/extended_connect/webtransport_session.rs":"a55876a7ba1de47950f4209cfaa0e04ddbc54fb4109d0133f8e6e6b150971563","src/features/extended_connect/webtransport_streams.rs":"9855d77705acb7d21566333c4b297816e363be2ade14b8685fd1df4a4861cf74","src/features/mod.rs":"89056df3a868cb0037963c942fc27093cc16d84538ffca2d4759f9a6a6c74c7f","src/frames/hframe.rs":"de2c3d1a9205b0459fe676d7d5e1c0e463d3c1dd9e5f518a07b2e4ebbe66e3ec","src/frames/mod.rs":"0e6d49888d723b2c2c73df11020ceb88d9f062e9d4dc436eb38173e0b772d905","src/frames/reader.rs":"36e113164995fbd202c5024b51230c12fa7134b1759170abfd4fc1b4e7f5a5da","src/frames/tests/hframe.rs":"53941fd7656f5e424d499278e6d9ba93ce716f219e86fe6fa08c058ea92f8d7b","src/frames/tests/mod.rs":"c6bbf85fbc6cb9adf6115d315f0564317eefd83ff3177c93050844ad77f6e694","src/frames/tests/reader.rs":"9ee0d9cdd87b98da2b94e577bbcc2bfde6d72be5177bf02364188935f79cb36a","src/frames/tests/wtframe.rs":"c6598d24f5e12972f02de6e1394362671633982db637a07e1c0bb9b56d93ea2a","src/frames/wtframe.rs":"0f0366e590f7409580459e8a8b86fc48308ca7585837dddd7c319581a9a5a972","src/headers_checks.rs":"69964deb121721be01df7174c177543c161389295ce1450d348369279e312ba4","src/lib.rs":"3fb980eee46bee8dcb97ad9d55014555d8994a7a2d040ca223f2d28fe7d923ef","src/priority.rs":"946307329f31819d969093406ae5448f7923343ccc112221ea6eedf86cf447dc","src/push_controller.rs":"53f72e8043505f85cba0f9c16b4a5ce14d6668b030d773067bc88b2a10bdd25b","src/qlog.rs":"db5f2dd6566d44b4f0541f75266b417b558c09e62141f056885cb8c66478a932","src/qpack_decoder_receiver.rs":"eb06c4be59da567fef70c20daa2c0f165c768131165479a210e69659f168b88f","src/qpack_encoder_receiver.rs":"831f3da9ec17966286786ba3f2c723395a132e65d6a33b4ec341fe7640c1a53d","src/recv_message.rs":"8b2fb49850560b32dcdd7a90933361ef7d61bc42daad3f2952462913d49e8787","src/request_target.rs":"9720b9f87d66a7c2301bba7de5a5a9300f547613a63153a4d35c7a7506a59b31","src/send_message.rs":"be4e9f64db2c25eb7176b84695e608e768115d62e615d389a33d26f7cd5b0c6c","src/server.rs":"8d48376abf36d036f51a84cddcc3d5acd56786b181fba0e24449e1417b030d63","src/server_connection_events.rs":"1396baab265a814045ccfe63d637a4fdc32a667b5eb2925fa4951f5c3078fb20","src/server_events.rs":"02fc8c0711efd758fb1ddee27d257c12ed35e2a989e7bf3de44bd662dc8234e3","src/settings.rs":"d0f8c546e70161422a029a40564b9e9b953fe671c60835196b16f3364779eaf9","src/stream_type_reader.rs":"4e79202e7f1415165fe4eb88b9af67cbb8f85a13d68a577249c397fd5a78dbfb","tests/httpconn.rs":"87c32197258711d916cace23ed850c5bf0198f5e32756c68a32d91206b6e6db8","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"cdf7028eb64f8f3778c3bbb2a10e9482c4e995e9e1813143ccd83ec96b2d4b6a","tests/webtransport.rs":"02b81be0a20252a8bb0796b5287e426c1af5ddaf5a47d68aa9165393cba83c45"},"package":null}
|
2
third_party/rust/neqo-http3/Cargo.toml
vendored
2
third_party/rust/neqo-http3/Cargo.toml
vendored
@ -17,7 +17,7 @@ bench = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-http3"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = false
|
||||
autobins = false
|
||||
|
14
third_party/rust/neqo-http3/src/connection.rs
vendored
14
third_party/rust/neqo-http3/src/connection.rs
vendored
@ -533,7 +533,9 @@ impl Http3Connection {
|
||||
Ok(ReceiveOutput::ControlFrames(rest))
|
||||
}
|
||||
ReceiveOutput::NewStream(
|
||||
NewStreamType::Push(_) | NewStreamType::Http | NewStreamType::WebTransportStream(_),
|
||||
NewStreamType::Push(_)
|
||||
| NewStreamType::Http(_)
|
||||
| NewStreamType::WebTransportStream(_),
|
||||
) => Ok(output),
|
||||
ReceiveOutput::NewStream(_) => {
|
||||
unreachable!("NewStream should have been handled already")
|
||||
@ -723,7 +725,7 @@ impl Http3Connection {
|
||||
)),
|
||||
);
|
||||
}
|
||||
NewStreamType::Http => {
|
||||
NewStreamType::Http(_) => {
|
||||
qinfo!([self], "A new http stream {}.", stream_id);
|
||||
}
|
||||
NewStreamType::WebTransportStream(session_id) => {
|
||||
@ -755,9 +757,9 @@ impl Http3Connection {
|
||||
NewStreamType::Control | NewStreamType::Decoder | NewStreamType::Encoder => {
|
||||
self.stream_receive(conn, stream_id)
|
||||
}
|
||||
NewStreamType::Push(_) | NewStreamType::Http | NewStreamType::WebTransportStream(_) => {
|
||||
Ok(ReceiveOutput::NewStream(stream_type))
|
||||
}
|
||||
NewStreamType::Push(_)
|
||||
| NewStreamType::Http(_)
|
||||
| NewStreamType::WebTransportStream(_) => Ok(ReceiveOutput::NewStream(stream_type)),
|
||||
NewStreamType::Unknown => Ok(ReceiveOutput::NoOutput),
|
||||
}
|
||||
}
|
||||
@ -919,7 +921,7 @@ impl Http3Connection {
|
||||
message_type: MessageType::Response,
|
||||
stream_type,
|
||||
stream_id,
|
||||
header_frame_type_read: false,
|
||||
first_frame_type: None,
|
||||
},
|
||||
Rc::clone(&self.qpack_decoder),
|
||||
recv_events,
|
||||
|
@ -1094,7 +1094,7 @@ impl Http3Client {
|
||||
ReceiveOutput::NewStream(NewStreamType::Push(push_id)) => {
|
||||
self.handle_new_push_stream(stream_id, push_id)
|
||||
}
|
||||
ReceiveOutput::NewStream(NewStreamType::Http) => Err(Error::HttpStreamCreation),
|
||||
ReceiveOutput::NewStream(NewStreamType::Http(_)) => Err(Error::HttpStreamCreation),
|
||||
ReceiveOutput::NewStream(NewStreamType::WebTransportStream(session_id)) => {
|
||||
self.base_handler.webtransport_create_stream_remote(
|
||||
StreamId::from(session_id),
|
||||
@ -1162,7 +1162,7 @@ impl Http3Client {
|
||||
message_type: MessageType::Response,
|
||||
stream_type: Http3StreamType::Push,
|
||||
stream_id,
|
||||
header_frame_type_read: false,
|
||||
first_frame_type: None,
|
||||
},
|
||||
Rc::clone(&self.base_handler.qpack_decoder),
|
||||
Box::new(RecvPushEvents::new(push_id, Rc::clone(&self.push_handler))),
|
||||
|
@ -318,7 +318,7 @@ impl Http3ServerHandler {
|
||||
fn handle_stream_readable(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> {
|
||||
match self.base_handler.handle_stream_readable(conn, stream_id)? {
|
||||
ReceiveOutput::NewStream(NewStreamType::Push(_)) => Err(Error::HttpStreamCreation),
|
||||
ReceiveOutput::NewStream(NewStreamType::Http) => {
|
||||
ReceiveOutput::NewStream(NewStreamType::Http(first_frame_type)) => {
|
||||
self.base_handler.add_streams(
|
||||
stream_id,
|
||||
Box::new(SendMessage::new(
|
||||
@ -333,7 +333,7 @@ impl Http3ServerHandler {
|
||||
message_type: MessageType::Request,
|
||||
stream_type: Http3StreamType::Http,
|
||||
stream_id,
|
||||
header_frame_type_read: true,
|
||||
first_frame_type: Some(first_frame_type),
|
||||
},
|
||||
Rc::clone(&self.base_handler.qpack_decoder),
|
||||
Box::new(self.events.clone()),
|
||||
|
@ -70,7 +70,7 @@ impl WebTransportSession {
|
||||
message_type: MessageType::Response,
|
||||
stream_type: Http3StreamType::ExtendedConnect,
|
||||
stream_id: session_id,
|
||||
header_frame_type_read: false,
|
||||
first_frame_type: None,
|
||||
},
|
||||
qpack_decoder,
|
||||
Box::new(stream_event_listener.clone()),
|
||||
|
44
third_party/rust/neqo-http3/src/frames/hframe.rs
vendored
44
third_party/rust/neqo-http3/src/frames/hframe.rs
vendored
@ -12,19 +12,31 @@ use neqo_transport::StreamId;
|
||||
|
||||
use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res};
|
||||
|
||||
pub type HFrameType = u64;
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct HFrameType(pub u64);
|
||||
|
||||
pub const H3_FRAME_TYPE_DATA: HFrameType = 0x0;
|
||||
pub const H3_FRAME_TYPE_HEADERS: HFrameType = 0x1;
|
||||
pub const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = 0x3;
|
||||
pub const H3_FRAME_TYPE_SETTINGS: HFrameType = 0x4;
|
||||
pub const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = 0x5;
|
||||
pub const H3_FRAME_TYPE_GOAWAY: HFrameType = 0x7;
|
||||
pub const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = 0xd;
|
||||
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: HFrameType = 0xf0700;
|
||||
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: HFrameType = 0xf0701;
|
||||
pub const H3_FRAME_TYPE_DATA: HFrameType = HFrameType(0x0);
|
||||
pub const H3_FRAME_TYPE_HEADERS: HFrameType = HFrameType(0x1);
|
||||
pub const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = HFrameType(0x3);
|
||||
pub const H3_FRAME_TYPE_SETTINGS: HFrameType = HFrameType(0x4);
|
||||
pub const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = HFrameType(0x5);
|
||||
pub const H3_FRAME_TYPE_GOAWAY: HFrameType = HFrameType(0x7);
|
||||
pub const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = HFrameType(0xd);
|
||||
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: HFrameType = HFrameType(0xf0700);
|
||||
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: HFrameType = HFrameType(0xf0701);
|
||||
|
||||
pub const H3_RESERVED_FRAME_TYPES: &[HFrameType] = &[0x2, 0x6, 0x8, 0x9];
|
||||
pub const H3_RESERVED_FRAME_TYPES: &[HFrameType] = &[
|
||||
HFrameType(0x2),
|
||||
HFrameType(0x6),
|
||||
HFrameType(0x8),
|
||||
HFrameType(0x9),
|
||||
];
|
||||
|
||||
impl From<HFrameType> for u64 {
|
||||
fn from(t: HFrameType) -> Self {
|
||||
t.0
|
||||
}
|
||||
}
|
||||
|
||||
// data for DATA frame is not read into HFrame::Data.
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
@ -74,7 +86,9 @@ impl HFrame {
|
||||
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
|
||||
Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST,
|
||||
Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH,
|
||||
Self::Grease => Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21,
|
||||
Self::Grease => {
|
||||
HFrameType(Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -143,14 +157,14 @@ impl HFrame {
|
||||
}
|
||||
|
||||
impl FrameDecoder<Self> for HFrame {
|
||||
fn frame_type_allowed(frame_type: u64) -> Res<()> {
|
||||
fn frame_type_allowed(frame_type: HFrameType) -> Res<()> {
|
||||
if H3_RESERVED_FRAME_TYPES.contains(&frame_type) {
|
||||
return Err(Error::HttpFrameUnexpected);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
||||
fn decode(frame_type: HFrameType, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
||||
if frame_type == H3_FRAME_TYPE_DATA {
|
||||
Ok(Some(Self::Data { len: frame_len }))
|
||||
} else if let Some(payload) = data {
|
||||
@ -207,7 +221,7 @@ impl FrameDecoder<Self> for HFrame {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_known_type(frame_type: u64) -> bool {
|
||||
fn is_known_type(frame_type: HFrameType) -> bool {
|
||||
matches!(
|
||||
frame_type,
|
||||
H3_FRAME_TYPE_DATA
|
||||
|
22
third_party/rust/neqo-http3/src/frames/reader.rs
vendored
22
third_party/rust/neqo-http3/src/frames/reader.rs
vendored
@ -14,23 +14,25 @@ use neqo_common::{
|
||||
};
|
||||
use neqo_transport::{Connection, StreamId};
|
||||
|
||||
use super::hframe::HFrameType;
|
||||
use crate::{Error, RecvStream, Res};
|
||||
|
||||
const MAX_READ_SIZE: usize = 4096;
|
||||
|
||||
pub trait FrameDecoder<T> {
|
||||
fn is_known_type(frame_type: u64) -> bool;
|
||||
fn is_known_type(frame_type: HFrameType) -> bool;
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`.
|
||||
fn frame_type_allowed(_frame_type: u64) -> Res<()> {
|
||||
fn frame_type_allowed(_frame_type: HFrameType) -> Res<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// If a frame cannot be properly decoded.
|
||||
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<T>>;
|
||||
fn decode(frame_type: HFrameType, frame_len: u64, data: Option<&[u8]>) -> Res<Option<T>>;
|
||||
}
|
||||
|
||||
pub trait StreamReader {
|
||||
@ -95,7 +97,7 @@ enum FrameReaderState {
|
||||
#[derive(Debug)]
|
||||
pub struct FrameReader {
|
||||
state: FrameReaderState,
|
||||
frame_type: u64,
|
||||
frame_type: HFrameType,
|
||||
frame_len: u64,
|
||||
}
|
||||
|
||||
@ -112,13 +114,13 @@ impl FrameReader {
|
||||
state: FrameReaderState::GetType {
|
||||
decoder: IncrementalDecoderUint::default(),
|
||||
},
|
||||
frame_type: 0,
|
||||
frame_type: HFrameType(u64::MAX),
|
||||
frame_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_with_type(frame_type: u64) -> Self {
|
||||
pub fn new_with_type(frame_type: HFrameType) -> Self {
|
||||
Self {
|
||||
state: FrameReaderState::GetLength {
|
||||
decoder: IncrementalDecoderUint::default(),
|
||||
@ -202,13 +204,13 @@ impl FrameReader {
|
||||
FrameReaderState::GetType { decoder } => {
|
||||
if let Some(v) = decoder.consume(&mut input) {
|
||||
qtrace!("FrameReader::receive: read frame type {}", v);
|
||||
self.frame_type_decoded::<T>(v)?;
|
||||
self.frame_type_decoded::<T>(HFrameType(v))?;
|
||||
}
|
||||
}
|
||||
FrameReaderState::GetLength { decoder } => {
|
||||
if let Some(len) = decoder.consume(&mut input) {
|
||||
qtrace!(
|
||||
"FrameReader::receive: frame type {} length {}",
|
||||
"FrameReader::receive: frame type {:?} length {}",
|
||||
self.frame_type,
|
||||
len
|
||||
);
|
||||
@ -218,7 +220,7 @@ impl FrameReader {
|
||||
FrameReaderState::GetData { decoder } => {
|
||||
if let Some(data) = decoder.consume(&mut input) {
|
||||
qtrace!(
|
||||
"received frame {}: {}",
|
||||
"received frame {:?}: {}",
|
||||
self.frame_type,
|
||||
hex_with_len(&data[..])
|
||||
);
|
||||
@ -236,7 +238,7 @@ impl FrameReader {
|
||||
}
|
||||
|
||||
impl FrameReader {
|
||||
fn frame_type_decoded<T: FrameDecoder<T>>(&mut self, frame_type: u64) -> Res<()> {
|
||||
fn frame_type_decoded<T: FrameDecoder<T>>(&mut self, frame_type: HFrameType) -> Res<()> {
|
||||
T::frame_type_allowed(frame_type)?;
|
||||
self.frame_type = frame_type;
|
||||
self.state = FrameReaderState::GetLength {
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
use neqo_common::{Decoder, Encoder};
|
||||
|
||||
use super::hframe::HFrameType;
|
||||
use crate::{frames::reader::FrameDecoder, Error, Res};
|
||||
|
||||
pub type WebTransportFrameType = u64;
|
||||
@ -29,10 +30,10 @@ impl WebTransportFrame {
|
||||
}
|
||||
|
||||
impl FrameDecoder<Self> for WebTransportFrame {
|
||||
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
||||
fn decode(frame_type: HFrameType, frame_len: u64, data: Option<&[u8]>) -> Res<Option<Self>> {
|
||||
if let Some(payload) = data {
|
||||
let mut dec = Decoder::from(payload);
|
||||
if frame_type == WT_FRAME_CLOSE_SESSION {
|
||||
if frame_type == HFrameType(WT_FRAME_CLOSE_SESSION) {
|
||||
if frame_len > WT_FRAME_CLOSE_MAX_MESSAGE_SIZE + 4 {
|
||||
return Err(Error::HttpMessageError);
|
||||
}
|
||||
@ -50,7 +51,7 @@ impl FrameDecoder<Self> for WebTransportFrame {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_known_type(frame_type: u64) -> bool {
|
||||
frame_type == WT_FRAME_CLOSE_SESSION
|
||||
fn is_known_type(frame_type: HFrameType) -> bool {
|
||||
frame_type == HFrameType(WT_FRAME_CLOSE_SESSION)
|
||||
}
|
||||
}
|
||||
|
14
third_party/rust/neqo-http3/src/recv_message.rs
vendored
14
third_party/rust/neqo-http3/src/recv_message.rs
vendored
@ -11,7 +11,7 @@ use neqo_qpack::decoder::QPackDecoder;
|
||||
use neqo_transport::{Connection, StreamId};
|
||||
|
||||
use crate::{
|
||||
frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS},
|
||||
frames::{hframe::HFrameType, FrameReader, HFrame, StreamReaderConnectionWrapper},
|
||||
headers_checks::{headers_valid, is_interim},
|
||||
priority::PriorityHandler,
|
||||
push_controller::PushController,
|
||||
@ -24,7 +24,7 @@ pub struct RecvMessageInfo {
|
||||
pub message_type: MessageType,
|
||||
pub stream_type: Http3StreamType,
|
||||
pub stream_id: StreamId,
|
||||
pub header_frame_type_read: bool,
|
||||
pub first_frame_type: Option<u64>,
|
||||
}
|
||||
|
||||
/*
|
||||
@ -94,11 +94,11 @@ impl RecvMessage {
|
||||
) -> Self {
|
||||
Self {
|
||||
state: RecvMessageState::WaitingForResponseHeaders {
|
||||
frame_reader: if message_info.header_frame_type_read {
|
||||
FrameReader::new_with_type(H3_FRAME_TYPE_HEADERS)
|
||||
} else {
|
||||
FrameReader::new()
|
||||
},
|
||||
frame_reader: message_info
|
||||
.first_frame_type
|
||||
.map_or_else(FrameReader::new, |frame_type| {
|
||||
FrameReader::new_with_type(HFrameType(frame_type))
|
||||
}),
|
||||
},
|
||||
message_type: message_info.message_type,
|
||||
stream_type: message_info.stream_type,
|
||||
|
@ -9,8 +9,9 @@ use neqo_qpack::{decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STRE
|
||||
use neqo_transport::{Connection, StreamId, StreamType};
|
||||
|
||||
use crate::{
|
||||
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, CloseType,
|
||||
Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream,
|
||||
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL,
|
||||
frames::{hframe::HFrameType, reader::FrameDecoder, HFrame, H3_FRAME_TYPE_HEADERS},
|
||||
CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream,
|
||||
};
|
||||
|
||||
pub const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1;
|
||||
@ -24,7 +25,7 @@ pub enum NewStreamType {
|
||||
Encoder,
|
||||
Push(u64),
|
||||
WebTransportStream(u64),
|
||||
Http,
|
||||
Http(u64),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
@ -37,7 +38,7 @@ impl NewStreamType {
|
||||
///
|
||||
/// Push streams received by the server are not allowed and this function will return
|
||||
/// `HttpStreamCreation` error.
|
||||
const fn final_stream_type(
|
||||
fn final_stream_type(
|
||||
stream_type: u64,
|
||||
trans_stream_type: StreamType,
|
||||
role: Role,
|
||||
@ -49,8 +50,18 @@ impl NewStreamType {
|
||||
(HTTP3_UNI_STREAM_TYPE_PUSH, StreamType::UniDi, Role::Client)
|
||||
| (WEBTRANSPORT_UNI_STREAM, StreamType::UniDi, _)
|
||||
| (WEBTRANSPORT_STREAM, StreamType::BiDi, _) => Ok(None),
|
||||
(H3_FRAME_TYPE_HEADERS, StreamType::BiDi, Role::Server) => Ok(Some(Self::Http)),
|
||||
(_, StreamType::BiDi, Role::Server) => Err(Error::HttpFrame),
|
||||
(_, StreamType::BiDi, Role::Server) => {
|
||||
// The "stream_type" for a bidirectional stream is a frame type. We accept
|
||||
// WEBTRANSPORT_STREAM (above), and HEADERS, and we have to ignore unknown types,
|
||||
// but any other frame type is bad if we know about it.
|
||||
if <HFrame as FrameDecoder<HFrame>>::is_known_type(HFrameType(stream_type))
|
||||
&& HFrameType(stream_type) != H3_FRAME_TYPE_HEADERS
|
||||
{
|
||||
Err(Error::HttpFrame)
|
||||
} else {
|
||||
Ok(Some(Self::Http(stream_type)))
|
||||
}
|
||||
}
|
||||
(HTTP3_UNI_STREAM_TYPE_PUSH, StreamType::UniDi, Role::Server)
|
||||
| (_, StreamType::BiDi, Role::Client) => Err(Error::HttpStreamCreation),
|
||||
_ => Ok(Some(Self::Unknown)),
|
||||
@ -190,7 +201,7 @@ impl NewStreamHeadReader {
|
||||
Err(Error::HttpClosedCriticalStream)
|
||||
}
|
||||
None => Err(Error::HttpStreamCreation),
|
||||
Some(NewStreamType::Http) => Err(Error::HttpFrame),
|
||||
Some(NewStreamType::Http(_)) => Err(Error::HttpFrame),
|
||||
Some(NewStreamType::Unknown) => Ok(decoded),
|
||||
Some(NewStreamType::Push(_) | NewStreamType::WebTransportStream(_)) => {
|
||||
unreachable!("PushStream and WebTransport are mapped to None at this stage.")
|
||||
@ -216,9 +227,9 @@ impl RecvStream for NewStreamHeadReader {
|
||||
}
|
||||
|
||||
fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
|
||||
let t = self.get_type(conn)?;
|
||||
Ok((
|
||||
self.get_type(conn)?
|
||||
.map_or(ReceiveOutput::NoOutput, ReceiveOutput::NewStream),
|
||||
t.map_or(ReceiveOutput::NoOutput, ReceiveOutput::NewStream),
|
||||
self.done(),
|
||||
))
|
||||
}
|
||||
@ -240,7 +251,8 @@ mod tests {
|
||||
WEBTRANSPORT_UNI_STREAM,
|
||||
};
|
||||
use crate::{
|
||||
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS,
|
||||
control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL,
|
||||
frames::{H3_FRAME_TYPE_HEADERS, H3_FRAME_TYPE_SETTINGS},
|
||||
CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res,
|
||||
};
|
||||
|
||||
@ -384,16 +396,20 @@ mod tests {
|
||||
fn decode_stream_http() {
|
||||
let mut t = Test::new(StreamType::BiDi, Role::Server);
|
||||
t.decode(
|
||||
&[H3_FRAME_TYPE_HEADERS],
|
||||
&[u64::from(H3_FRAME_TYPE_HEADERS)],
|
||||
false,
|
||||
&Ok((ReceiveOutput::NewStream(NewStreamType::Http), true)),
|
||||
&Ok((
|
||||
ReceiveOutput::NewStream(NewStreamType::Http(u64::from(H3_FRAME_TYPE_HEADERS))),
|
||||
true,
|
||||
)),
|
||||
true,
|
||||
);
|
||||
|
||||
let mut t = Test::new(StreamType::UniDi, Role::Server);
|
||||
t.decode(
|
||||
&[H3_FRAME_TYPE_HEADERS], /* this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which
|
||||
* is not aallowed on the server side. */
|
||||
&[u64::from(H3_FRAME_TYPE_HEADERS)], /* this is the same as a
|
||||
* HTTP3_UNI_STREAM_TYPE_PUSH which
|
||||
* is not aallowed on the server side. */
|
||||
false,
|
||||
&Err(Error::HttpStreamCreation),
|
||||
true,
|
||||
@ -401,7 +417,7 @@ mod tests {
|
||||
|
||||
let mut t = Test::new(StreamType::BiDi, Role::Client);
|
||||
t.decode(
|
||||
&[H3_FRAME_TYPE_HEADERS],
|
||||
&[u64::from(H3_FRAME_TYPE_HEADERS)],
|
||||
false,
|
||||
&Err(Error::HttpStreamCreation),
|
||||
true,
|
||||
@ -409,8 +425,8 @@ mod tests {
|
||||
|
||||
let mut t = Test::new(StreamType::UniDi, Role::Client);
|
||||
t.decode(
|
||||
&[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], /* this is the same as a
|
||||
* HTTP3_UNI_STREAM_TYPE_PUSH */
|
||||
&[u64::from(H3_FRAME_TYPE_HEADERS), 0xaaaa_aaaa], /* this is the same as a
|
||||
* HTTP3_UNI_STREAM_TYPE_PUSH */
|
||||
false,
|
||||
&Ok((
|
||||
ReceiveOutput::NewStream(NewStreamType::Push(0xaaaa_aaaa)),
|
||||
@ -418,6 +434,14 @@ mod tests {
|
||||
)),
|
||||
true,
|
||||
);
|
||||
|
||||
let mut t = Test::new(StreamType::BiDi, Role::Server);
|
||||
t.decode(
|
||||
&[H3_FRAME_TYPE_SETTINGS.into()],
|
||||
true,
|
||||
&Err(Error::HttpFrame),
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -478,7 +502,8 @@ mod tests {
|
||||
t.decode(
|
||||
&[WEBTRANSPORT_UNI_STREAM],
|
||||
false,
|
||||
&Err(Error::HttpFrame),
|
||||
// WEBTRANSPORT_UNI_STREAM is treated as an unknown frame type here.
|
||||
&Ok((ReceiveOutput::NewStream(NewStreamType::Http(84)), true)),
|
||||
true,
|
||||
);
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"65733e28fe0e6be1fbffa77fea4ed32f38ffab469763a577434e003d05c74786","src/decoder.rs":"ed2d6fa29e8726429aabb84e65f5d8025b320c0219b442b47c38903728ba3b2d","src/decoder_instructions.rs":"7e23ad00bcc6a1f0ee9af6c3d7f5ec5fcf11e9bc6cd895e125e3392c34b309e0","src/encoder.rs":"ebc9e82e5ad6b31be46ab876965d0e9dc710c4c5db084a631f384185b56cab36","src/encoder_instructions.rs":"5afc60ecc5b65f5b1908cff7eb3b7394c5c36cebe8ebfcdefbf792c827799390","src/header_block.rs":"1ea71fe2f588a0f96e39fd3a3157c66cc0ed2794f14c6f01b4a3069a43f7997b","src/huffman.rs":"6976f1b4d3e5ef849a6b080cfb2e8804bf01cfe3b9bd9e3994a319d5405cd8f3","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"f9bad0fe7643c618d034c4941ebd30ad5f6015b8b87b484b0ea79681d13d8b49","src/prefix.rs":"d9ad12838d61b38dc2300948e3da01fd65371215edde1c370cf54ccd87d64d46","src/qlog.rs":"fbd96ef7d21db2bae19b8e379995544e8cf123e8e5129c1500ace2773acf5649","src/qpack_send_buf.rs":"48f8d0e011e0fb8e4bd0774279d3465e2be01fd9480eaf374ae2adada6be430d","src/reader.rs":"c23214ba190c7a59e416eaffac612ff8c2043c3a84e884fb10ae3bc112d884a5","src/static_table.rs":"6e5ec26e2b6bd63375d2d77e72748151d430d1629a8e497ec0d0ea21c078524a","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"2d2c9e6070a1e90048a4ad7c8279f9e1ce7615b44d7d8145fb0f140e554f5ca2"},"package":null}
|
||||
{"files":{"Cargo.toml":"b7832ef93c7463abc8cf9a8eab836fea5c79502cd23a037bceee8805704571ca","src/decoder.rs":"ed2d6fa29e8726429aabb84e65f5d8025b320c0219b442b47c38903728ba3b2d","src/decoder_instructions.rs":"7e23ad00bcc6a1f0ee9af6c3d7f5ec5fcf11e9bc6cd895e125e3392c34b309e0","src/encoder.rs":"ebc9e82e5ad6b31be46ab876965d0e9dc710c4c5db084a631f384185b56cab36","src/encoder_instructions.rs":"5afc60ecc5b65f5b1908cff7eb3b7394c5c36cebe8ebfcdefbf792c827799390","src/header_block.rs":"1ea71fe2f588a0f96e39fd3a3157c66cc0ed2794f14c6f01b4a3069a43f7997b","src/huffman.rs":"6976f1b4d3e5ef849a6b080cfb2e8804bf01cfe3b9bd9e3994a319d5405cd8f3","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"f9bad0fe7643c618d034c4941ebd30ad5f6015b8b87b484b0ea79681d13d8b49","src/prefix.rs":"d9ad12838d61b38dc2300948e3da01fd65371215edde1c370cf54ccd87d64d46","src/qlog.rs":"fbd96ef7d21db2bae19b8e379995544e8cf123e8e5129c1500ace2773acf5649","src/qpack_send_buf.rs":"48f8d0e011e0fb8e4bd0774279d3465e2be01fd9480eaf374ae2adada6be430d","src/reader.rs":"c23214ba190c7a59e416eaffac612ff8c2043c3a84e884fb10ae3bc112d884a5","src/static_table.rs":"6e5ec26e2b6bd63375d2d77e72748151d430d1629a8e497ec0d0ea21c078524a","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"2d2c9e6070a1e90048a4ad7c8279f9e1ce7615b44d7d8145fb0f140e554f5ca2"},"package":null}
|
2
third_party/rust/neqo-qpack/Cargo.toml
vendored
2
third_party/rust/neqo-qpack/Cargo.toml
vendored
@ -18,7 +18,7 @@ bench = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-qpack"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = false
|
||||
autobins = false
|
||||
|
File diff suppressed because one or more lines are too long
2
third_party/rust/neqo-transport/Cargo.toml
vendored
2
third_party/rust/neqo-transport/Cargo.toml
vendored
@ -16,7 +16,7 @@ example = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-transport"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = "build.rs"
|
||||
autobins = false
|
||||
|
@ -190,7 +190,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
||||
let mut is_app_limited = true;
|
||||
let mut new_acked = 0;
|
||||
for pkt in acked_pkts {
|
||||
qdebug!(
|
||||
qtrace!(
|
||||
"packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}",
|
||||
self,
|
||||
pkt.pn(),
|
||||
|
@ -966,6 +966,11 @@ impl Connection {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.state.closing() {
|
||||
qtrace!([self], "Closing, not processing other timers");
|
||||
return;
|
||||
}
|
||||
|
||||
self.streams.cleanup_closed_streams();
|
||||
|
||||
let res = self.crypto.states.check_key_update(now);
|
||||
@ -981,7 +986,10 @@ impl Connection {
|
||||
self.create_resumption_token(now);
|
||||
}
|
||||
|
||||
if !self.paths.process_timeout(now, pto) {
|
||||
if !self
|
||||
.paths
|
||||
.process_timeout(now, pto, &mut self.stats.borrow_mut())
|
||||
{
|
||||
qinfo!([self], "last available path failed");
|
||||
self.absorb_error::<Error>(now, Err(Error::NoAvailablePath));
|
||||
}
|
||||
@ -1455,7 +1463,9 @@ impl Connection {
|
||||
) {
|
||||
let space = PacketNumberSpace::from(packet.packet_type());
|
||||
if let Some(space) = self.acks.get_mut(space) {
|
||||
*space.ecn_marks() += d.tos().into();
|
||||
let space_ecn_marks = space.ecn_marks();
|
||||
*space_ecn_marks += d.tos().into();
|
||||
self.stats.borrow_mut().ecn_rx = *space_ecn_marks;
|
||||
} else {
|
||||
qtrace!("Not tracking ECN for dropped packet number space");
|
||||
}
|
||||
@ -1547,17 +1557,29 @@ impl Connection {
|
||||
|
||||
qlog::packet_received(&self.qlog, &packet, &payload);
|
||||
let space = PacketNumberSpace::from(payload.packet_type());
|
||||
if self.acks.get_mut(space).unwrap().is_duplicate(payload.pn()) {
|
||||
qdebug!([self], "Duplicate packet {}-{}", space, payload.pn());
|
||||
self.stats.borrow_mut().dups_rx += 1;
|
||||
} else {
|
||||
match self.process_packet(path, &payload, now) {
|
||||
Ok(migrate) => self.postprocess_packet(path, d, &packet, migrate, now),
|
||||
Err(e) => {
|
||||
self.ensure_error_path(path, &packet, now);
|
||||
return Err(e);
|
||||
if let Some(space) = self.acks.get_mut(space) {
|
||||
if space.is_duplicate(payload.pn()) {
|
||||
qdebug!("Duplicate packet {}-{}", space, payload.pn());
|
||||
self.stats.borrow_mut().dups_rx += 1;
|
||||
} else {
|
||||
match self.process_packet(path, &payload, now) {
|
||||
Ok(migrate) => {
|
||||
self.postprocess_packet(path, d, &packet, migrate, now);
|
||||
}
|
||||
Err(e) => {
|
||||
self.ensure_error_path(path, &packet, now);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
qdebug!(
|
||||
[self],
|
||||
"Received packet {} for untracked space {}",
|
||||
space,
|
||||
payload.pn()
|
||||
);
|
||||
return Err(Error::ProtocolViolation);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
@ -1684,7 +1706,11 @@ impl Connection {
|
||||
self.paths.make_permanent(path, None, cid);
|
||||
Ok(())
|
||||
} else if let Some(primary) = self.paths.primary() {
|
||||
if primary.borrow().remote_cid().is_empty() {
|
||||
if primary
|
||||
.borrow()
|
||||
.remote_cid()
|
||||
.map_or(true, |id| id.is_empty())
|
||||
{
|
||||
self.paths
|
||||
.make_permanent(path, None, ConnectionIdEntry::empty_remote());
|
||||
Ok(())
|
||||
@ -1729,12 +1755,12 @@ impl Connection {
|
||||
// Make a path on which to run the handshake.
|
||||
self.setup_handshake_path(path, now);
|
||||
|
||||
self.zero_rtt_state = match self.crypto.enable_0rtt(self.version, self.role) {
|
||||
Ok(true) => {
|
||||
qdebug!([self], "Accepted 0-RTT");
|
||||
ZeroRttState::AcceptedServer
|
||||
}
|
||||
_ => ZeroRttState::Rejected,
|
||||
self.zero_rtt_state = if self.crypto.enable_0rtt(self.version, self.role) == Ok(true) {
|
||||
qdebug!([self], "Accepted 0-RTT");
|
||||
ZeroRttState::AcceptedServer
|
||||
} else {
|
||||
qtrace!([self], "Rejected 0-RTT");
|
||||
ZeroRttState::Rejected
|
||||
};
|
||||
|
||||
// The server knows the final version if it has remote transport parameters.
|
||||
@ -1817,7 +1843,10 @@ impl Connection {
|
||||
path.borrow(),
|
||||
if force { "now" } else { "after" }
|
||||
);
|
||||
if self.paths.migrate(&path, force, now) {
|
||||
if self
|
||||
.paths
|
||||
.migrate(&path, force, now, &mut self.stats.borrow_mut())
|
||||
{
|
||||
self.loss_recovery.migrate();
|
||||
}
|
||||
Ok(())
|
||||
@ -1878,7 +1907,8 @@ impl Connection {
|
||||
}
|
||||
|
||||
if self.ensure_permanent(path).is_ok() {
|
||||
self.paths.handle_migration(path, d.source(), now);
|
||||
self.paths
|
||||
.handle_migration(path, d.source(), now, &mut self.stats.borrow_mut());
|
||||
} else {
|
||||
qinfo!(
|
||||
[self],
|
||||
@ -1913,7 +1943,7 @@ impl Connection {
|
||||
// a packet on a new path, we avoid sending (and the privacy risk) rather
|
||||
// than reuse a connection ID.
|
||||
let res = if path.borrow().is_temporary() {
|
||||
assert!(!cfg!(test), "attempting to close with a temporary path");
|
||||
qerror!([self], "Attempting to close with a temporary path");
|
||||
Err(Error::InternalError)
|
||||
} else {
|
||||
self.output_path(&path, now, &Some(details))
|
||||
@ -1937,16 +1967,15 @@ impl Connection {
|
||||
) -> (PacketType, PacketBuilder) {
|
||||
let pt = PacketType::from(cspace);
|
||||
let mut builder = if pt == PacketType::Short {
|
||||
qdebug!("Building Short dcid {}", path.remote_cid());
|
||||
qdebug!("Building Short dcid {:?}", path.remote_cid());
|
||||
PacketBuilder::short(encoder, tx.key_phase(), path.remote_cid())
|
||||
} else {
|
||||
qdebug!(
|
||||
"Building {:?} dcid {} scid {}",
|
||||
"Building {:?} dcid {:?} scid {:?}",
|
||||
pt,
|
||||
path.remote_cid(),
|
||||
path.local_cid(),
|
||||
);
|
||||
|
||||
PacketBuilder::long(encoder, pt, version, path.remote_cid(), path.local_cid())
|
||||
};
|
||||
if builder.remaining() > 0 {
|
||||
@ -2220,7 +2249,7 @@ impl Connection {
|
||||
// Include an ACK frame with the CONNECTION_CLOSE.
|
||||
let limit = builder.limit();
|
||||
builder.set_limit(limit - ClosingFrame::MIN_LENGTH);
|
||||
self.acks.immediate_ack(now);
|
||||
self.acks.immediate_ack(space, now);
|
||||
self.acks.write_frame(
|
||||
space,
|
||||
now,
|
||||
@ -2407,7 +2436,10 @@ impl Connection {
|
||||
self.loss_recovery.on_packet_sent(path, initial);
|
||||
}
|
||||
path.borrow_mut().add_sent(packets.len());
|
||||
Ok(SendOption::Yes(path.borrow_mut().datagram(packets)))
|
||||
Ok(SendOption::Yes(
|
||||
path.borrow_mut()
|
||||
.datagram(packets, &mut self.stats.borrow_mut()),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@ -2783,10 +2815,8 @@ impl Connection {
|
||||
// prepare to resend them.
|
||||
self.stats.borrow_mut().frame_rx.ping += 1;
|
||||
self.crypto.resend_unacked(space);
|
||||
if space == PacketNumberSpace::ApplicationData {
|
||||
// Send an ACK immediately if we might not otherwise do so.
|
||||
self.acks.immediate_ack(now);
|
||||
}
|
||||
// Send an ACK immediately if we might not otherwise do so.
|
||||
self.acks.immediate_ack(space, now);
|
||||
}
|
||||
Frame::Ack {
|
||||
largest_acknowledged,
|
||||
@ -2864,7 +2894,10 @@ impl Connection {
|
||||
}
|
||||
Frame::PathResponse { data } => {
|
||||
self.stats.borrow_mut().frame_rx.path_response += 1;
|
||||
if self.paths.path_response(data, now) {
|
||||
if self
|
||||
.paths
|
||||
.path_response(data, now, &mut self.stats.borrow_mut())
|
||||
{
|
||||
// This PATH_RESPONSE enabled migration; tell loss recovery.
|
||||
self.loss_recovery.migrate();
|
||||
}
|
||||
@ -2945,7 +2978,12 @@ impl Connection {
|
||||
for token in lost.tokens() {
|
||||
qdebug!([self], "Lost: {:?}", token);
|
||||
match token {
|
||||
RecoveryToken::Ack(_) => {}
|
||||
RecoveryToken::Ack(ack_token) => {
|
||||
// If we lost an ACK frame during the handshake, send another one.
|
||||
if ack_token.space() != PacketNumberSpace::ApplicationData {
|
||||
self.acks.immediate_ack(ack_token.space(), lost.time_sent());
|
||||
}
|
||||
}
|
||||
RecoveryToken::Crypto(ct) => self.crypto.lost(ct),
|
||||
RecoveryToken::HandshakeDone => self.state_signaling.handshake_done(),
|
||||
RecoveryToken::NewToken(seqno) => self.new_token.lost(*seqno),
|
||||
|
@ -76,6 +76,11 @@ impl State {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub const fn closing(&self) -> bool {
|
||||
matches!(self, Self::Closing { .. } | Self::Draining { .. })
|
||||
}
|
||||
}
|
||||
|
||||
// Implement `PartialOrd` so that we can enforce monotonic state progression.
|
||||
|
@ -599,7 +599,7 @@ fn datagram_fill() {
|
||||
let path = p.borrow();
|
||||
// Minimum overhead is connection ID length, 1 byte short header, 1 byte packet number,
|
||||
// 1 byte for the DATAGRAM frame type, and 16 bytes for the AEAD.
|
||||
path.plpmtu() - path.remote_cid().len() - 19
|
||||
path.plpmtu() - path.remote_cid().unwrap().len() - 19
|
||||
};
|
||||
assert!(space >= 64); // Unlikely, but this test depends on the datagram being this large.
|
||||
|
||||
|
@ -12,13 +12,14 @@ use test_fixture::{
|
||||
fixture_init, now, DEFAULT_ADDR_V4,
|
||||
};
|
||||
|
||||
use super::{send_something_with_modifier, DEFAULT_RTT};
|
||||
use crate::{
|
||||
connection::tests::{
|
||||
connect_force_idle, connect_force_idle_with_modifier, default_client, default_server,
|
||||
handshake_with_modifier, migration::get_cid, new_client, new_server, send_something,
|
||||
handshake_with_modifier, migration::get_cid, new_client, new_server, send_and_receive,
|
||||
send_something, send_something_with_modifier, send_with_modifier_and_receive, DEFAULT_RTT,
|
||||
},
|
||||
ecn::ECN_TEST_COUNT,
|
||||
path::MAX_PATH_PROBES,
|
||||
ConnectionId, ConnectionParameters, StreamType,
|
||||
};
|
||||
|
||||
@ -91,6 +92,79 @@ fn handshake_delay_with_ecn_blackhole() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn migration_delay_to_ecn_blackhole() {
|
||||
let mut now = now();
|
||||
let mut client = default_client();
|
||||
let mut server = default_server();
|
||||
|
||||
// Do a handshake.
|
||||
connect_force_idle(&mut client, &mut server);
|
||||
|
||||
// Migrate the client.
|
||||
client
|
||||
.migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now)
|
||||
.unwrap();
|
||||
|
||||
// The client should send MAX_PATH_PROBES path challenges with ECN enabled, and then another
|
||||
// MAX_PATH_PROBES without ECN.
|
||||
let mut probes = 0;
|
||||
while probes < MAX_PATH_PROBES * 2 {
|
||||
match client.process_output(now) {
|
||||
crate::Output::Callback(t) => {
|
||||
now += t;
|
||||
}
|
||||
crate::Output::Datagram(d) => {
|
||||
// The new path is IPv4.
|
||||
if d.source().is_ipv4() {
|
||||
// This should be a PATH_CHALLENGE.
|
||||
probes += 1;
|
||||
assert_eq!(client.stats().frame_tx.path_challenge, probes);
|
||||
if probes <= MAX_PATH_PROBES {
|
||||
// The first probes should be sent with ECN.
|
||||
assert_ecn_enabled(d.tos());
|
||||
} else {
|
||||
// The next probes should be sent without ECN.
|
||||
assert_ecn_disabled(d.tos());
|
||||
}
|
||||
}
|
||||
}
|
||||
crate::Output::None => panic!("unexpected output"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stats() {
|
||||
let now = now();
|
||||
let mut client = default_client();
|
||||
let mut server = default_server();
|
||||
connect_force_idle(&mut client, &mut server);
|
||||
|
||||
for _ in 0..ECN_TEST_COUNT {
|
||||
let ack = send_and_receive(&mut client, &mut server, now);
|
||||
client.process_input(&ack.unwrap(), now);
|
||||
}
|
||||
|
||||
for _ in 0..ECN_TEST_COUNT {
|
||||
let ack = send_and_receive(&mut server, &mut client, now);
|
||||
server.process_input(&ack.unwrap(), now);
|
||||
}
|
||||
|
||||
for stats in [client.stats(), server.stats()] {
|
||||
assert_eq!(stats.ecn_paths_capable, 1);
|
||||
assert_eq!(stats.ecn_paths_not_capable, 0);
|
||||
|
||||
for codepoint in [IpTosEcn::Ect1, IpTosEcn::Ce] {
|
||||
assert_eq!(stats.ecn_tx[codepoint], 0);
|
||||
assert_eq!(stats.ecn_rx[codepoint], 0);
|
||||
}
|
||||
}
|
||||
|
||||
assert!(client.stats().ecn_tx[IpTosEcn::Ect0] <= server.stats().ecn_rx[IpTosEcn::Ect0]);
|
||||
assert!(server.stats().ecn_tx[IpTosEcn::Ect0] <= client.stats().ecn_rx[IpTosEcn::Ect0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn disables_on_loss() {
|
||||
let now = now();
|
||||
@ -111,6 +185,24 @@ fn disables_on_loss() {
|
||||
assert_ecn_disabled(client_pkt.tos());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn disables_on_remark() {
|
||||
let now = now();
|
||||
let mut client = default_client();
|
||||
let mut server = default_server();
|
||||
connect_force_idle(&mut client, &mut server);
|
||||
|
||||
for _ in 0..ECN_TEST_COUNT {
|
||||
if let Some(ack) = send_with_modifier_and_receive(&mut client, &mut server, now, remark()) {
|
||||
client.process_input(&ack, now);
|
||||
}
|
||||
}
|
||||
|
||||
// ECN should now be disabled.
|
||||
let client_pkt = send_something(&mut client, now);
|
||||
assert_ecn_disabled(client_pkt.tos());
|
||||
}
|
||||
|
||||
/// This function performs a handshake over a path that modifies packets via `orig_path_modifier`.
|
||||
/// It then sends `burst` packets on that path, and then migrates to a new path that
|
||||
/// modifies packets via `new_path_modifier`. It sends `burst` packets on the new path.
|
||||
|
@ -35,6 +35,7 @@ use crate::{
|
||||
},
|
||||
events::ConnectionEvent,
|
||||
server::ValidateAddress,
|
||||
stats::FrameStats,
|
||||
tparams::{TransportParameter, MIN_ACK_DELAY},
|
||||
tracking::DEFAULT_ACK_DELAY,
|
||||
CloseReason, ConnectionParameters, EmptyConnectionIdGenerator, Error, Pmtud, StreamType,
|
||||
@ -1194,3 +1195,62 @@ fn emit_authentication_needed_once() {
|
||||
_ = client.process(server2.as_dgram_ref(), now());
|
||||
assert_eq!(0, authentication_needed_count(&mut client));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn client_initial_retransmits_identical() {
|
||||
let mut now = now();
|
||||
let mut client = default_client();
|
||||
|
||||
// Force the client to retransmit its Initial packet a number of times and make sure the
|
||||
// retranmissions are identical to the original. Also, verify the PTO durations.
|
||||
for i in 1..=5 {
|
||||
let ci = client.process(None, now).dgram().unwrap();
|
||||
assert_eq!(ci.len(), client.plpmtu());
|
||||
assert_eq!(
|
||||
client.stats().frame_tx,
|
||||
FrameStats {
|
||||
crypto: i,
|
||||
all: i,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
let pto = client.process(None, now).callback();
|
||||
assert_eq!(pto, DEFAULT_RTT * 3 * (1 << (i - 1)));
|
||||
now += pto;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn server_initial_retransmits_identical() {
|
||||
let mut now = now();
|
||||
let mut client = default_client();
|
||||
let mut ci = client.process(None, now).dgram();
|
||||
|
||||
// Force the server to retransmit its Initial packet a number of times and make sure the
|
||||
// retranmissions are identical to the original. Also, verify the PTO durations.
|
||||
let mut server = default_server();
|
||||
let mut total_ptos: Duration = Duration::from_secs(0);
|
||||
for i in 1..=3 {
|
||||
let si = server.process(ci.take().as_ref(), now).dgram().unwrap();
|
||||
assert_eq!(si.len(), server.plpmtu());
|
||||
assert_eq!(
|
||||
server.stats().frame_tx,
|
||||
FrameStats {
|
||||
crypto: i * 2,
|
||||
ack: i,
|
||||
all: i * 3,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
||||
let pto = server.process(None, now).callback();
|
||||
if i < 3 {
|
||||
assert_eq!(pto, DEFAULT_RTT * 3 * (1 << (i - 1)));
|
||||
} else {
|
||||
// Server is amplification-limited after three (re)transmissions.
|
||||
assert_eq!(pto, server.conn_params.get_idle_timeout() - total_ptos);
|
||||
}
|
||||
now += pto;
|
||||
total_ptos += pto;
|
||||
}
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ fn idle_caching() {
|
||||
let mut client = default_client();
|
||||
let mut server = default_server();
|
||||
let start = now();
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
|
||||
// Perform the first round trip, but drop the Initial from the server.
|
||||
// The client then caches the Handshake packet.
|
||||
@ -297,18 +297,15 @@ fn idle_caching() {
|
||||
client.process_input(&handshake.unwrap(), start);
|
||||
|
||||
// Perform an exchange and keep the connection alive.
|
||||
// Only allow a packet containing a PING to pass.
|
||||
let middle = start + AT_LEAST_PTO;
|
||||
mem::drop(client.process_output(middle));
|
||||
// This is the RTX of the client Initial.
|
||||
let dgram = client.process_output(middle).dgram();
|
||||
|
||||
// Get the server to send its first probe and throw that away.
|
||||
mem::drop(server.process_output(middle).dgram());
|
||||
// Now let the server process the client PING. This causes the server
|
||||
// Now let the server process the RTX'ed client Initial. This causes the server
|
||||
// to send CRYPTO frames again, so manually extract and discard those.
|
||||
let ping_before_s = server.stats().frame_rx.ping;
|
||||
server.process_input(&dgram.unwrap(), middle);
|
||||
assert_eq!(server.stats().frame_rx.ping, ping_before_s + 1);
|
||||
let mut tokens = Vec::new();
|
||||
server.crypto.streams.write_frame(
|
||||
PacketNumberSpace::Initial,
|
||||
@ -330,10 +327,10 @@ fn idle_caching() {
|
||||
// Now only allow the Initial packet from the server through;
|
||||
// it shouldn't contain a CRYPTO frame.
|
||||
let (initial, _) = split_datagram(&dgram.unwrap());
|
||||
let ping_before_c = client.stats().frame_rx.ping;
|
||||
let crypto_before_c = client.stats().frame_rx.crypto;
|
||||
let ack_before = client.stats().frame_rx.ack;
|
||||
client.process_input(&initial, middle);
|
||||
assert_eq!(client.stats().frame_rx.ping, ping_before_c + 1);
|
||||
assert_eq!(client.stats().frame_rx.crypto, crypto_before_c);
|
||||
assert_eq!(client.stats().frame_rx.ack, ack_before + 1);
|
||||
|
||||
let end = start + default_timeout() + (AT_LEAST_PTO / 2);
|
||||
|
@ -28,6 +28,7 @@ use crate::{
|
||||
connection::tests::send_something_paced,
|
||||
frame::FRAME_TYPE_NEW_CONNECTION_ID,
|
||||
packet::PacketBuilder,
|
||||
path::MAX_PATH_PROBES,
|
||||
pmtud::Pmtud,
|
||||
tparams::{self, PreferredAddress, TransportParameter},
|
||||
CloseReason, ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef,
|
||||
@ -236,7 +237,8 @@ fn migrate_immediate_fail() {
|
||||
let probe = client.process_output(now).dgram().unwrap();
|
||||
assert_v4_path(&probe, true); // Contains PATH_CHALLENGE.
|
||||
|
||||
for _ in 0..2 {
|
||||
// -1 because first PATH_CHALLENGE already sent above
|
||||
for _ in 0..MAX_PATH_PROBES * 2 - 1 {
|
||||
let cb = client.process_output(now).callback();
|
||||
assert_ne!(cb, Duration::new(0, 0));
|
||||
now += cb;
|
||||
@ -311,7 +313,8 @@ fn migrate_same_fail() {
|
||||
let probe = client.process_output(now).dgram().unwrap();
|
||||
assert_v6_path(&probe, true); // Contains PATH_CHALLENGE.
|
||||
|
||||
for _ in 0..2 {
|
||||
// -1 because first PATH_CHALLENGE already sent above
|
||||
for _ in 0..MAX_PATH_PROBES * 2 - 1 {
|
||||
let cb = client.process_output(now).callback();
|
||||
assert_ne!(cb, Duration::new(0, 0));
|
||||
now += cb;
|
||||
@ -946,7 +949,6 @@ impl crate::connection::test_internal::FrameWriter for GarbageWriter {
|
||||
/// Test the case that we run out of connection ID and receive an invalid frame
|
||||
/// from a new path.
|
||||
#[test]
|
||||
#[should_panic(expected = "attempting to close with a temporary path")]
|
||||
fn error_on_new_path_with_no_connection_id() {
|
||||
let mut client = default_client();
|
||||
let mut server = default_server();
|
||||
@ -967,5 +969,23 @@ fn error_on_new_path_with_no_connection_id() {
|
||||
|
||||
// See issue #1697. We had a crash when the client had a temporary path and
|
||||
// process_output is called.
|
||||
let closing_frames = client.stats().frame_tx.connection_close;
|
||||
mem::drop(client.process_output(now()));
|
||||
assert!(matches!(
|
||||
client.state(),
|
||||
State::Closing {
|
||||
error: CloseReason::Transport(Error::UnknownFrameType),
|
||||
..
|
||||
}
|
||||
));
|
||||
// Wait until the connection is closed.
|
||||
let mut now = now();
|
||||
now += client.process(None, now).callback();
|
||||
_ = client.process_output(now);
|
||||
// No closing frames should be sent, and the connection should be closed.
|
||||
assert_eq!(client.stats().frame_tx.connection_close, closing_frames);
|
||||
assert!(matches!(
|
||||
client.state(),
|
||||
State::Closed(CloseReason::Transport(Error::UnknownFrameType))
|
||||
));
|
||||
}
|
||||
|
@ -586,10 +586,10 @@ fn send_something_paced_with_modifier(
|
||||
.dgram()
|
||||
.expect("send_something: should have something to send")
|
||||
}
|
||||
Output::Datagram(d) => modifier(d).unwrap(),
|
||||
Output::Datagram(d) => d,
|
||||
Output::None => panic!("send_something: got Output::None"),
|
||||
};
|
||||
(dgram, now)
|
||||
(modifier(dgram).unwrap(), now)
|
||||
}
|
||||
|
||||
fn send_something_paced(
|
||||
@ -614,6 +614,18 @@ fn send_something(sender: &mut Connection, now: Instant) -> Datagram {
|
||||
send_something_with_modifier(sender, now, Some)
|
||||
}
|
||||
|
||||
/// Send something on a stream from `sender` through a modifier to `receiver`.
|
||||
/// Return any ACK that might result.
|
||||
fn send_with_modifier_and_receive(
|
||||
sender: &mut Connection,
|
||||
receiver: &mut Connection,
|
||||
now: Instant,
|
||||
modifier: fn(Datagram) -> Option<Datagram>,
|
||||
) -> Option<Datagram> {
|
||||
let dgram = send_something_with_modifier(sender, now, modifier);
|
||||
receiver.process(Some(&dgram), now).dgram()
|
||||
}
|
||||
|
||||
/// Send something on a stream from `sender` to `receiver`.
|
||||
/// Return any ACK that might result.
|
||||
fn send_and_receive(
|
||||
@ -621,8 +633,7 @@ fn send_and_receive(
|
||||
receiver: &mut Connection,
|
||||
now: Instant,
|
||||
) -> Option<Datagram> {
|
||||
let dgram = send_something(sender, now);
|
||||
receiver.process(Some(&dgram), now).dgram()
|
||||
send_with_modifier_and_receive(sender, receiver, now, Some)
|
||||
}
|
||||
|
||||
fn get_tokens(client: &mut Connection) -> Vec<ResumptionToken> {
|
||||
|
@ -264,6 +264,7 @@ fn pto_handshake_complete() {
|
||||
// We'll use that packet to force the server to acknowledge 1-RTT.
|
||||
let stream_id = client.stream_create(StreamType::UniDi).unwrap();
|
||||
client.stream_close_send(stream_id).unwrap();
|
||||
now += HALF_RTT * 6;
|
||||
let pkt3 = client.process(None, now).dgram();
|
||||
assert_handshake(pkt3.as_ref().unwrap());
|
||||
let (pkt3_hs, pkt3_1rtt) = split_datagram(&pkt3.unwrap());
|
||||
@ -581,6 +582,9 @@ fn loss_time_past_largest_acked() {
|
||||
assert!(s_pto < RTT);
|
||||
let s_hs2 = server.process(None, now + s_pto).dgram();
|
||||
assert!(s_hs2.is_some());
|
||||
let s_pto = server.process(None, now).callback();
|
||||
assert_ne!(s_pto, Duration::from_secs(0));
|
||||
assert!(s_pto < RTT);
|
||||
let s_hs3 = server.process(None, now + s_pto).dgram();
|
||||
assert!(s_hs3.is_some());
|
||||
|
||||
@ -623,7 +627,9 @@ fn loss_time_past_largest_acked() {
|
||||
|
||||
// Now the client should start its loss recovery timer based on the ACK.
|
||||
now += RTT / 2;
|
||||
let c_ack = client.process(Some(&s_hs_ack), now).dgram();
|
||||
let _c_ack = client.process(Some(&s_hs_ack), now).dgram();
|
||||
// This ACK triggers an immediate ACK, due to an ACK loss during handshake.
|
||||
let c_ack = client.process(None, now).dgram();
|
||||
assert!(c_ack.is_none());
|
||||
// The client should now have the loss recovery timer active.
|
||||
let lr_time = client.process(None, now).callback();
|
||||
|
@ -4,9 +4,9 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
use std::{cell::RefCell, rc::Rc, time::Duration};
|
||||
|
||||
use neqo_common::event::Provider;
|
||||
use neqo_common::{event::Provider, qdebug};
|
||||
use neqo_crypto::{AllowZeroRtt, AntiReplay};
|
||||
use test_fixture::{assertions, now};
|
||||
|
||||
@ -258,3 +258,65 @@ fn zero_rtt_update_flow_control() {
|
||||
assert!(client.stream_send_atomic(uni_stream, MESSAGE).unwrap());
|
||||
assert!(client.stream_send_atomic(bidi_stream, MESSAGE).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zero_rtt_loss_accepted() {
|
||||
// This test requires a wider anti-replay window than other tests
|
||||
// because the dropped 0-RTT packets add a bunch of delay.
|
||||
const WINDOW: Duration = Duration::from_secs(20);
|
||||
for i in 0..5 {
|
||||
let mut client = default_client();
|
||||
let mut server = default_server();
|
||||
connect(&mut client, &mut server);
|
||||
|
||||
let mut now = now();
|
||||
let earlier = now;
|
||||
|
||||
let token = exchange_ticket(&mut client, &mut server, now);
|
||||
|
||||
now += WINDOW;
|
||||
let mut client = default_client();
|
||||
client.enable_resumption(now, token).unwrap();
|
||||
let mut server = resumed_server(&client);
|
||||
let anti_replay = AntiReplay::new(earlier, WINDOW, 1, 3).unwrap();
|
||||
server
|
||||
.server_enable_0rtt(&anti_replay, AllowZeroRtt {})
|
||||
.unwrap();
|
||||
|
||||
// Make CI/0-RTT
|
||||
let client_stream_id = client.stream_create(StreamType::UniDi).unwrap();
|
||||
client.stream_send(client_stream_id, &[1, 2, 3]).unwrap();
|
||||
let mut ci = client.process_output(now);
|
||||
assert!(ci.as_dgram_ref().is_some());
|
||||
assertions::assert_coalesced_0rtt(&ci.as_dgram_ref().unwrap()[..]);
|
||||
|
||||
// Drop CI/0-RTT a number of times
|
||||
qdebug!("Drop CI/0-RTT {i} extra times");
|
||||
for _ in 0..i {
|
||||
now += client.process_output(now).callback();
|
||||
ci = client.process_output(now);
|
||||
assert!(ci.as_dgram_ref().is_some());
|
||||
}
|
||||
|
||||
// Process CI/0-RTT
|
||||
let si = server.process(ci.as_dgram_ref(), now);
|
||||
assert!(si.as_dgram_ref().is_some());
|
||||
|
||||
let server_stream_id = server
|
||||
.events()
|
||||
.find_map(|evt| match evt {
|
||||
ConnectionEvent::NewStream { stream_id } => Some(stream_id),
|
||||
_ => None,
|
||||
})
|
||||
.expect("should have received a new stream event");
|
||||
assert_eq!(client_stream_id, server_stream_id.as_u64());
|
||||
|
||||
// 0-RTT should be accepted
|
||||
client.process_input(si.as_dgram_ref().unwrap(), now);
|
||||
let recvd_0rtt_reject = |e| e == ConnectionEvent::ZeroRttRejected;
|
||||
assert!(
|
||||
!client.events().any(recvd_0rtt_reject),
|
||||
"rejected 0-RTT after {i} extra dropped packets"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
56
third_party/rust/neqo-transport/src/ecn.rs
vendored
56
third_party/rust/neqo-transport/src/ecn.rs
vendored
@ -12,6 +12,7 @@ use neqo_common::{qdebug, qinfo, qwarn, IpTosEcn};
|
||||
use crate::{
|
||||
packet::{PacketNumber, PacketType},
|
||||
recovery::SentPacket,
|
||||
Stats,
|
||||
};
|
||||
|
||||
/// The number of packets to use for testing a path for ECN capability.
|
||||
@ -25,7 +26,7 @@ const ECN_TEST_COUNT_INITIAL_PHASE: usize = 3;
|
||||
|
||||
/// The state information related to testing a path for ECN capability.
|
||||
/// See RFC9000, Appendix A.4.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
enum EcnValidationState {
|
||||
/// The path is currently being tested for ECN capability, with the number of probes sent so
|
||||
/// far on the path during the ECN validation.
|
||||
@ -50,7 +51,32 @@ impl Default for EcnValidationState {
|
||||
}
|
||||
}
|
||||
|
||||
impl EcnValidationState {
|
||||
fn set(&mut self, new: Self, stats: &mut Stats) {
|
||||
let old = std::mem::replace(self, new);
|
||||
|
||||
match old {
|
||||
Self::Testing { .. } | Self::Unknown => {}
|
||||
Self::Failed => debug_assert!(false, "Failed is a terminal state"),
|
||||
Self::Capable => stats.ecn_paths_capable -= 1,
|
||||
}
|
||||
match new {
|
||||
Self::Testing { .. } | Self::Unknown => {}
|
||||
Self::Failed => stats.ecn_paths_not_capable += 1,
|
||||
Self::Capable => stats.ecn_paths_capable += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The counts for different ECN marks.
|
||||
///
|
||||
/// Note: [`EcnCount`] is used both for outgoing UDP datagrams, returned by
|
||||
/// remote through QUIC ACKs and for incoming UDP datagrams, read from IP TOS
|
||||
/// header. In the former case, given that QUIC ACKs only carry
|
||||
/// [`IpTosEcn::Ect0`], [`IpTosEcn::Ect1`] and [`IpTosEcn::Ce`], but never
|
||||
/// [`IpTosEcn::NotEct`], the [`IpTosEcn::NotEct`] value will always be 0.
|
||||
///
|
||||
/// See also <https://www.rfc-editor.org/rfc/rfc9000.html#section-19.3.2>.
|
||||
#[derive(PartialEq, Eq, Debug, Clone, Copy, Default)]
|
||||
pub struct EcnCount(EnumMap<IpTosEcn, u64>);
|
||||
|
||||
@ -126,17 +152,22 @@ impl EcnInfo {
|
||||
/// Exit ECN validation if the number of packets sent exceeds `ECN_TEST_COUNT`.
|
||||
/// We do not implement the part of the RFC that says to exit ECN validation if the time since
|
||||
/// the start of ECN validation exceeds 3 * PTO, since this seems to happen much too quickly.
|
||||
pub fn on_packet_sent(&mut self) {
|
||||
pub fn on_packet_sent(&mut self, stats: &mut Stats) {
|
||||
if let EcnValidationState::Testing { probes_sent, .. } = &mut self.state {
|
||||
*probes_sent += 1;
|
||||
qdebug!("ECN probing: sent {} probes", probes_sent);
|
||||
if *probes_sent == ECN_TEST_COUNT {
|
||||
qdebug!("ECN probing concluded with {} probes sent", probes_sent);
|
||||
self.state = EcnValidationState::Unknown;
|
||||
self.state.set(EcnValidationState::Unknown, stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Disable ECN.
|
||||
pub fn disable_ecn(&mut self, stats: &mut Stats) {
|
||||
self.state.set(EcnValidationState::Failed, stats);
|
||||
}
|
||||
|
||||
/// Process ECN counts from an ACK frame.
|
||||
///
|
||||
/// Returns whether ECN counts contain new valid ECN CE marks.
|
||||
@ -144,16 +175,17 @@ impl EcnInfo {
|
||||
&mut self,
|
||||
acked_packets: &[SentPacket],
|
||||
ack_ecn: Option<EcnCount>,
|
||||
stats: &mut Stats,
|
||||
) -> bool {
|
||||
let prev_baseline = self.baseline;
|
||||
|
||||
self.validate_ack_ecn_and_update(acked_packets, ack_ecn);
|
||||
self.validate_ack_ecn_and_update(acked_packets, ack_ecn, stats);
|
||||
|
||||
matches!(self.state, EcnValidationState::Capable)
|
||||
&& (self.baseline - prev_baseline)[IpTosEcn::Ce] > 0
|
||||
}
|
||||
|
||||
pub fn on_packets_lost(&mut self, lost_packets: &[SentPacket]) {
|
||||
pub fn on_packets_lost(&mut self, lost_packets: &[SentPacket], stats: &mut Stats) {
|
||||
if let EcnValidationState::Testing {
|
||||
probes_sent,
|
||||
initial_probes_lost: probes_lost,
|
||||
@ -170,7 +202,7 @@ impl EcnInfo {
|
||||
"ECN validation failed, all {} initial marked packets were lost",
|
||||
probes_lost
|
||||
);
|
||||
self.state = EcnValidationState::Failed;
|
||||
self.disable_ecn(stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -180,6 +212,7 @@ impl EcnInfo {
|
||||
&mut self,
|
||||
acked_packets: &[SentPacket],
|
||||
ack_ecn: Option<EcnCount>,
|
||||
stats: &mut Stats,
|
||||
) {
|
||||
// RFC 9000, Appendix A.4:
|
||||
//
|
||||
@ -212,7 +245,7 @@ impl EcnInfo {
|
||||
// > corresponding ECN counts are not present in the ACK frame.
|
||||
let Some(ack_ecn) = ack_ecn else {
|
||||
qwarn!("ECN validation failed, no ECN counts in ACK frame");
|
||||
self.state = EcnValidationState::Failed;
|
||||
self.disable_ecn(stats);
|
||||
return;
|
||||
};
|
||||
|
||||
@ -229,7 +262,7 @@ impl EcnInfo {
|
||||
.unwrap();
|
||||
if newly_acked_sent_with_ect0 == 0 {
|
||||
qwarn!("ECN validation failed, no ECT(0) packets were newly acked");
|
||||
self.state = EcnValidationState::Failed;
|
||||
self.disable_ecn(stats);
|
||||
return;
|
||||
}
|
||||
let ecn_diff = ack_ecn - self.baseline;
|
||||
@ -240,15 +273,16 @@ impl EcnInfo {
|
||||
sum_inc,
|
||||
newly_acked_sent_with_ect0
|
||||
);
|
||||
self.state = EcnValidationState::Failed;
|
||||
self.disable_ecn(stats);
|
||||
} else if ecn_diff[IpTosEcn::Ect1] > 0 {
|
||||
qwarn!("ECN validation failed, ACK counted ECT(1) marks that were never sent");
|
||||
self.state = EcnValidationState::Failed;
|
||||
self.disable_ecn(stats);
|
||||
} else if self.state != EcnValidationState::Capable {
|
||||
qinfo!("ECN validation succeeded, path is capable");
|
||||
self.state = EcnValidationState::Capable;
|
||||
self.state.set(EcnValidationState::Capable, stats);
|
||||
}
|
||||
self.baseline = ack_ecn;
|
||||
stats.ecn_tx = ack_ecn;
|
||||
self.largest_acked = largest_acked;
|
||||
}
|
||||
|
||||
|
2
third_party/rust/neqo-transport/src/fc.rs
vendored
2
third_party/rust/neqo-transport/src/fc.rs
vendored
@ -810,7 +810,7 @@ mod test {
|
||||
fc[StreamType::BiDi].add_retired(1);
|
||||
fc[StreamType::BiDi].send_flowc_update();
|
||||
// consume the frame
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut tokens = Vec::new();
|
||||
fc[StreamType::BiDi].write_frames(&mut builder, &mut tokens, &mut FrameStats::default());
|
||||
assert_eq!(tokens.len(), 1);
|
||||
|
@ -149,15 +149,19 @@ impl PacketBuilder {
|
||||
///
|
||||
/// If, after calling this method, `remaining()` returns 0, then call `abort()` to get
|
||||
/// the encoder back.
|
||||
pub fn short(mut encoder: Encoder, key_phase: bool, dcid: impl AsRef<[u8]>) -> Self {
|
||||
pub fn short(mut encoder: Encoder, key_phase: bool, dcid: Option<impl AsRef<[u8]>>) -> Self {
|
||||
let mut limit = Self::infer_limit(&encoder);
|
||||
let header_start = encoder.len();
|
||||
// Check that there is enough space for the header.
|
||||
// 5 = 1 (first byte) + 4 (packet number)
|
||||
if limit > encoder.len() && 5 + dcid.as_ref().len() < limit - encoder.len() {
|
||||
if limit > encoder.len()
|
||||
&& 5 + dcid.as_ref().map_or(0, |d| d.as_ref().len()) < limit - encoder.len()
|
||||
{
|
||||
encoder
|
||||
.encode_byte(PACKET_BIT_SHORT | PACKET_BIT_FIXED_QUIC | (u8::from(key_phase) << 2));
|
||||
encoder.encode(dcid.as_ref());
|
||||
if let Some(dcid) = dcid {
|
||||
encoder.encode(dcid.as_ref());
|
||||
}
|
||||
} else {
|
||||
limit = 0;
|
||||
}
|
||||
@ -185,20 +189,23 @@ impl PacketBuilder {
|
||||
mut encoder: Encoder,
|
||||
pt: PacketType,
|
||||
version: Version,
|
||||
dcid: impl AsRef<[u8]>,
|
||||
scid: impl AsRef<[u8]>,
|
||||
mut dcid: Option<impl AsRef<[u8]>>,
|
||||
mut scid: Option<impl AsRef<[u8]>>,
|
||||
) -> Self {
|
||||
let mut limit = Self::infer_limit(&encoder);
|
||||
let header_start = encoder.len();
|
||||
// Check that there is enough space for the header.
|
||||
// 11 = 1 (first byte) + 4 (version) + 2 (dcid+scid length) + 4 (packet number)
|
||||
if limit > encoder.len()
|
||||
&& 11 + dcid.as_ref().len() + scid.as_ref().len() < limit - encoder.len()
|
||||
&& 11
|
||||
+ dcid.as_ref().map_or(0, |d| d.as_ref().len())
|
||||
+ scid.as_ref().map_or(0, |d| d.as_ref().len())
|
||||
< limit - encoder.len()
|
||||
{
|
||||
encoder.encode_byte(PACKET_BIT_LONG | PACKET_BIT_FIXED_QUIC | pt.to_byte(version) << 4);
|
||||
encoder.encode_uint(4, version.wire_version());
|
||||
encoder.encode_vec(1, dcid.as_ref());
|
||||
encoder.encode_vec(1, scid.as_ref());
|
||||
encoder.encode_vec(1, dcid.take().as_ref().map_or(&[], AsRef::as_ref));
|
||||
encoder.encode_vec(1, scid.take().as_ref().map_or(&[], AsRef::as_ref));
|
||||
} else {
|
||||
limit = 0;
|
||||
}
|
||||
@ -994,8 +1001,8 @@ mod tests {
|
||||
Encoder::new(),
|
||||
PacketType::Initial,
|
||||
Version::default(),
|
||||
ConnectionId::from(&[][..]),
|
||||
ConnectionId::from(SERVER_CID),
|
||||
None::<&[u8]>,
|
||||
Some(ConnectionId::from(SERVER_CID)),
|
||||
);
|
||||
builder.initial_token(&[]);
|
||||
builder.pn(1, 2);
|
||||
@ -1058,7 +1065,7 @@ mod tests {
|
||||
fn build_short() {
|
||||
fixture_init();
|
||||
let mut builder =
|
||||
PacketBuilder::short(Encoder::new(), true, ConnectionId::from(SERVER_CID));
|
||||
PacketBuilder::short(Encoder::new(), true, Some(ConnectionId::from(SERVER_CID)));
|
||||
builder.pn(0, 1);
|
||||
builder.encode(SAMPLE_SHORT_PAYLOAD); // Enough payload for sampling.
|
||||
let packet = builder
|
||||
@ -1073,7 +1080,7 @@ mod tests {
|
||||
let mut firsts = Vec::new();
|
||||
for _ in 0..64 {
|
||||
let mut builder =
|
||||
PacketBuilder::short(Encoder::new(), true, ConnectionId::from(SERVER_CID));
|
||||
PacketBuilder::short(Encoder::new(), true, Some(ConnectionId::from(SERVER_CID)));
|
||||
builder.scramble(true);
|
||||
builder.pn(0, 1);
|
||||
firsts.push(builder.as_ref()[0]);
|
||||
@ -1136,8 +1143,8 @@ mod tests {
|
||||
Encoder::new(),
|
||||
PacketType::Handshake,
|
||||
Version::default(),
|
||||
ConnectionId::from(SERVER_CID),
|
||||
ConnectionId::from(CLIENT_CID),
|
||||
Some(ConnectionId::from(SERVER_CID)),
|
||||
Some(ConnectionId::from(CLIENT_CID)),
|
||||
);
|
||||
builder.pn(0, 1);
|
||||
builder.encode(&[0; 3]);
|
||||
@ -1145,7 +1152,8 @@ mod tests {
|
||||
assert_eq!(encoder.len(), 45);
|
||||
let first = encoder.clone();
|
||||
|
||||
let mut builder = PacketBuilder::short(encoder, false, ConnectionId::from(SERVER_CID));
|
||||
let mut builder =
|
||||
PacketBuilder::short(encoder, false, Some(ConnectionId::from(SERVER_CID)));
|
||||
builder.pn(1, 3);
|
||||
builder.encode(&[0]); // Minimal size (packet number is big enough).
|
||||
let encoder = builder.build(&mut prot).expect("build");
|
||||
@ -1170,8 +1178,8 @@ mod tests {
|
||||
Encoder::new(),
|
||||
PacketType::Handshake,
|
||||
Version::default(),
|
||||
ConnectionId::from(&[][..]),
|
||||
ConnectionId::from(&[][..]),
|
||||
None::<&[u8]>,
|
||||
None::<&[u8]>,
|
||||
);
|
||||
builder.pn(0, 1);
|
||||
builder.encode(&[1, 2, 3]);
|
||||
@ -1189,8 +1197,8 @@ mod tests {
|
||||
Encoder::new(),
|
||||
PacketType::Handshake,
|
||||
Version::default(),
|
||||
ConnectionId::from(&[][..]),
|
||||
ConnectionId::from(&[][..]),
|
||||
None::<&[u8]>,
|
||||
None::<&[u8]>,
|
||||
);
|
||||
builder.pn(0, 1);
|
||||
builder.scramble(true);
|
||||
@ -1210,8 +1218,8 @@ mod tests {
|
||||
Encoder::new(),
|
||||
PacketType::Initial,
|
||||
Version::default(),
|
||||
ConnectionId::from(&[][..]),
|
||||
ConnectionId::from(SERVER_CID),
|
||||
None::<&[u8]>,
|
||||
Some(ConnectionId::from(SERVER_CID)),
|
||||
);
|
||||
assert_ne!(builder.remaining(), 0);
|
||||
builder.initial_token(&[]);
|
||||
@ -1229,7 +1237,7 @@ mod tests {
|
||||
let mut builder = PacketBuilder::short(
|
||||
Encoder::with_capacity(100),
|
||||
true,
|
||||
ConnectionId::from(SERVER_CID),
|
||||
Some(ConnectionId::from(SERVER_CID)),
|
||||
);
|
||||
builder.pn(0, 1);
|
||||
// Pad, but not up to the full capacity. Leave enough space for the
|
||||
@ -1244,8 +1252,8 @@ mod tests {
|
||||
encoder,
|
||||
PacketType::Initial,
|
||||
Version::default(),
|
||||
ConnectionId::from(SERVER_CID),
|
||||
ConnectionId::from(SERVER_CID),
|
||||
Some(ConnectionId::from(SERVER_CID)),
|
||||
Some(ConnectionId::from(SERVER_CID)),
|
||||
);
|
||||
assert_eq!(builder.remaining(), 0);
|
||||
assert_eq!(builder.abort(), encoder_copy);
|
||||
|
89
third_party/rust/neqo-transport/src/path.rs
vendored
89
third_party/rust/neqo-transport/src/path.rs
vendored
@ -15,7 +15,7 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos};
|
||||
use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos, IpTosEcn};
|
||||
use neqo_crypto::random;
|
||||
|
||||
use crate::{
|
||||
@ -35,7 +35,10 @@ use crate::{
|
||||
};
|
||||
|
||||
/// The number of times that a path will be probed before it is considered failed.
|
||||
const MAX_PATH_PROBES: usize = 3;
|
||||
///
|
||||
/// Note that with [`crate::ecn`], a path is probed [`MAX_PATH_PROBES`] with ECN
|
||||
/// marks and [`MAX_PATH_PROBES`] without.
|
||||
pub const MAX_PATH_PROBES: usize = 3;
|
||||
/// The maximum number of paths that `Paths` will track.
|
||||
const MAX_PATHS: usize = 15;
|
||||
|
||||
@ -225,7 +228,13 @@ impl Paths {
|
||||
/// Otherwise, migration will occur after probing succeeds.
|
||||
/// The path is always probed and will be abandoned if probing fails.
|
||||
/// Returns `true` if the path was migrated.
|
||||
pub fn migrate(&mut self, path: &PathRef, force: bool, now: Instant) -> bool {
|
||||
pub fn migrate(
|
||||
&mut self,
|
||||
path: &PathRef,
|
||||
force: bool,
|
||||
now: Instant,
|
||||
stats: &mut Stats,
|
||||
) -> bool {
|
||||
debug_assert!(!self.is_temporary(path));
|
||||
let baseline = self.primary().map_or_else(
|
||||
|| EcnInfo::default().baseline(),
|
||||
@ -239,7 +248,7 @@ impl Paths {
|
||||
} else {
|
||||
self.migration_target = Some(Rc::clone(path));
|
||||
}
|
||||
path.borrow_mut().probe();
|
||||
path.borrow_mut().probe(stats);
|
||||
self.migration_target.is_none()
|
||||
}
|
||||
|
||||
@ -248,11 +257,11 @@ impl Paths {
|
||||
///
|
||||
/// TODO(mt) - the paths should own the RTT estimator, so they can find the PTO
|
||||
/// for themselves.
|
||||
pub fn process_timeout(&mut self, now: Instant, pto: Duration) -> bool {
|
||||
pub fn process_timeout(&mut self, now: Instant, pto: Duration, stats: &mut Stats) -> bool {
|
||||
let to_retire = &mut self.to_retire;
|
||||
let mut primary_failed = false;
|
||||
self.paths.retain(|p| {
|
||||
if p.borrow_mut().process_timeout(now, pto) {
|
||||
if p.borrow_mut().process_timeout(now, pto, stats) {
|
||||
true
|
||||
} else {
|
||||
qdebug!([p.borrow()], "Retiring path");
|
||||
@ -301,7 +310,13 @@ impl Paths {
|
||||
|
||||
/// Set the identified path to be primary.
|
||||
/// This panics if `make_permanent` hasn't been called.
|
||||
pub fn handle_migration(&mut self, path: &PathRef, remote: SocketAddr, now: Instant) {
|
||||
pub fn handle_migration(
|
||||
&mut self,
|
||||
path: &PathRef,
|
||||
remote: SocketAddr,
|
||||
now: Instant,
|
||||
stats: &mut Stats,
|
||||
) {
|
||||
// The update here needs to match the checks in `Path::received_on`.
|
||||
// Here, we update the remote port number to match the source port on the
|
||||
// datagram that was received. This ensures that we send subsequent
|
||||
@ -316,7 +331,7 @@ impl Paths {
|
||||
|
||||
if let Some(old_path) = self.select_primary(path) {
|
||||
// Need to probe the old path if the peer migrates.
|
||||
old_path.borrow_mut().probe();
|
||||
old_path.borrow_mut().probe(stats);
|
||||
// TODO(mt) - suppress probing if the path was valid within 3PTO.
|
||||
}
|
||||
}
|
||||
@ -339,11 +354,11 @@ impl Paths {
|
||||
/// A `PATH_RESPONSE` was received.
|
||||
/// Returns `true` if migration occurred.
|
||||
#[must_use]
|
||||
pub fn path_response(&mut self, response: [u8; 8], now: Instant) -> bool {
|
||||
pub fn path_response(&mut self, response: [u8; 8], now: Instant, stats: &mut Stats) -> bool {
|
||||
// TODO(mt) consider recording an RTT measurement here as we don't train
|
||||
// RTT for non-primary paths.
|
||||
for p in &self.paths {
|
||||
if p.borrow_mut().path_response(response, now) {
|
||||
if p.borrow_mut().path_response(response, now, stats) {
|
||||
// The response was accepted. If this path is one we intend
|
||||
// to migrate to, then migrate.
|
||||
if self
|
||||
@ -452,10 +467,10 @@ impl Paths {
|
||||
// make a new RTT esimate and interrogate that.
|
||||
// That is more expensive, but it should be rare and breaking encapsulation
|
||||
// is worse, especially as this is only used in tests.
|
||||
self.primary()
|
||||
.map_or(RttEstimate::default().estimate(), |p| {
|
||||
p.borrow().rtt().estimate()
|
||||
})
|
||||
self.primary().map_or_else(
|
||||
|| RttEstimate::default().estimate(),
|
||||
|p| p.borrow().rtt().estimate(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn set_qlog(&mut self, qlog: NeqoQlog) {
|
||||
@ -660,8 +675,8 @@ impl Path {
|
||||
|
||||
/// Get the first local connection ID.
|
||||
/// Only do this for the primary path during the handshake.
|
||||
pub fn local_cid(&self) -> &ConnectionId {
|
||||
self.local_cid.as_ref().unwrap()
|
||||
pub const fn local_cid(&self) -> Option<&ConnectionId> {
|
||||
self.local_cid.as_ref()
|
||||
}
|
||||
|
||||
/// Set the remote connection ID based on the peer's choice.
|
||||
@ -674,8 +689,10 @@ impl Path {
|
||||
}
|
||||
|
||||
/// Access the remote connection ID.
|
||||
pub fn remote_cid(&self) -> &ConnectionId {
|
||||
self.remote_cid.as_ref().unwrap().connection_id()
|
||||
pub fn remote_cid(&self) -> Option<&ConnectionId> {
|
||||
self.remote_cid
|
||||
.as_ref()
|
||||
.map(super::cid::ConnectionIdEntry::connection_id)
|
||||
}
|
||||
|
||||
/// Set the stateless reset token for the connection ID that is currently in use.
|
||||
@ -696,12 +713,12 @@ impl Path {
|
||||
}
|
||||
|
||||
/// Make a datagram.
|
||||
pub fn datagram<V: Into<Vec<u8>>>(&mut self, payload: V) -> Datagram {
|
||||
pub fn datagram<V: Into<Vec<u8>>>(&mut self, payload: V, stats: &mut Stats) -> Datagram {
|
||||
// Make sure to use the TOS value from before calling EcnInfo::on_packet_sent, which may
|
||||
// update the ECN state and can hence change it - this packet should still be sent
|
||||
// with the current value.
|
||||
let tos = self.tos();
|
||||
self.ecn_info.on_packet_sent();
|
||||
self.ecn_info.on_packet_sent(stats);
|
||||
Datagram::new(self.local, self.remote, tos, payload)
|
||||
}
|
||||
|
||||
@ -721,14 +738,14 @@ impl Path {
|
||||
}
|
||||
|
||||
/// Handle a `PATH_RESPONSE` frame. Returns true if the response was accepted.
|
||||
pub fn path_response(&mut self, response: [u8; 8], now: Instant) -> bool {
|
||||
pub fn path_response(&mut self, response: [u8; 8], now: Instant, stats: &mut Stats) -> bool {
|
||||
if let ProbeState::Probing { data, mtu, .. } = &mut self.state {
|
||||
if response == *data {
|
||||
let need_full_probe = !*mtu;
|
||||
self.set_valid(now);
|
||||
if need_full_probe {
|
||||
qdebug!([self], "Sub-MTU probe successful, reset probe count");
|
||||
self.probe();
|
||||
self.probe(stats);
|
||||
}
|
||||
true
|
||||
} else {
|
||||
@ -747,15 +764,25 @@ impl Path {
|
||||
|
||||
/// At the next opportunity, send a probe.
|
||||
/// If the probe count has been exhausted already, marks the path as failed.
|
||||
fn probe(&mut self) {
|
||||
fn probe(&mut self, stats: &mut Stats) {
|
||||
let probe_count = match &self.state {
|
||||
ProbeState::Probing { probe_count, .. } => *probe_count + 1,
|
||||
ProbeState::ProbeNeeded { probe_count, .. } => *probe_count,
|
||||
_ => 0,
|
||||
};
|
||||
self.state = if probe_count >= MAX_PATH_PROBES {
|
||||
qinfo!([self], "Probing failed");
|
||||
ProbeState::Failed
|
||||
if self.ecn_info.ecn_mark() == IpTosEcn::Ect0 {
|
||||
// The path validation failure may be due to ECN blackholing, try again without ECN.
|
||||
qinfo!(
|
||||
[self],
|
||||
"Possible ECN blackhole, disabling ECN and re-probing path"
|
||||
);
|
||||
self.ecn_info.disable_ecn(stats);
|
||||
ProbeState::ProbeNeeded { probe_count: 0 }
|
||||
} else {
|
||||
qinfo!([self], "Probing failed");
|
||||
ProbeState::Failed
|
||||
}
|
||||
} else {
|
||||
qdebug!([self], "Initiating probe");
|
||||
ProbeState::ProbeNeeded { probe_count }
|
||||
@ -839,10 +866,10 @@ impl Path {
|
||||
|
||||
/// Process a timer for this path.
|
||||
/// This returns true if the path is viable and can be kept alive.
|
||||
pub fn process_timeout(&mut self, now: Instant, pto: Duration) -> bool {
|
||||
pub fn process_timeout(&mut self, now: Instant, pto: Duration, stats: &mut Stats) -> bool {
|
||||
if let ProbeState::Probing { sent, .. } = &self.state {
|
||||
if now >= *sent + pto {
|
||||
self.probe();
|
||||
self.probe(stats);
|
||||
}
|
||||
}
|
||||
if matches!(self.state, ProbeState::Failed) {
|
||||
@ -853,9 +880,9 @@ impl Path {
|
||||
true
|
||||
} else if matches!(self.state, ProbeState::Valid) {
|
||||
// Retire validated, non-primary paths.
|
||||
// Allow more than `MAX_PATH_PROBES` times the PTO so that an old
|
||||
// Allow more than 2* `MAX_PATH_PROBES` times the PTO so that an old
|
||||
// path remains around until after a previous path fails.
|
||||
let count = u32::try_from(MAX_PATH_PROBES + 1).unwrap();
|
||||
let count = u32::try_from(2 * MAX_PATH_PROBES + 1).unwrap();
|
||||
self.validated.unwrap() + (pto * count) > now
|
||||
} else {
|
||||
// Keep paths that are being actively probed.
|
||||
@ -978,7 +1005,7 @@ impl Path {
|
||||
) {
|
||||
debug_assert!(self.is_primary());
|
||||
|
||||
let ecn_ce_received = self.ecn_info.on_packets_acked(acked_pkts, ack_ecn);
|
||||
let ecn_ce_received = self.ecn_info.on_packets_acked(acked_pkts, ack_ecn, stats);
|
||||
if ecn_ce_received {
|
||||
let cwnd_reduced = self
|
||||
.sender
|
||||
@ -1002,7 +1029,7 @@ impl Path {
|
||||
now: Instant,
|
||||
) {
|
||||
debug_assert!(self.is_primary());
|
||||
self.ecn_info.on_packets_lost(lost_packets);
|
||||
self.ecn_info.on_packets_lost(lost_packets, stats);
|
||||
let cwnd_reduced = self.sender.on_packets_lost(
|
||||
self.rtt.first_sample_time(),
|
||||
prev_largest_acked_sent,
|
||||
|
2
third_party/rust/neqo-transport/src/pmtud.rs
vendored
2
third_party/rust/neqo-transport/src/pmtud.rs
vendored
@ -383,7 +383,7 @@ mod tests {
|
||||
let stats_before = stats.clone();
|
||||
|
||||
// Fake a packet number, so the builder logic works.
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let pn = prot.next_pn();
|
||||
builder.pn(pn, 4);
|
||||
builder.set_initial_limit(&SendProfile::new_limited(pmtud.plpmtu()), 16, pmtud);
|
||||
|
4
third_party/rust/neqo-transport/src/qlog.rs
vendored
4
third_party/rust/neqo-transport/src/qlog.rs
vendored
@ -104,8 +104,8 @@ fn connection_started(qlog: &NeqoQlog, path: &PathRef) {
|
||||
protocol: Some("QUIC".into()),
|
||||
src_port: p.local_address().port().into(),
|
||||
dst_port: p.remote_address().port().into(),
|
||||
src_cid: Some(format!("{}", p.local_cid())),
|
||||
dst_cid: Some(format!("{}", p.remote_cid())),
|
||||
src_cid: p.local_cid().map(ToString::to_string),
|
||||
dst_cid: p.remote_cid().map(ToString::to_string),
|
||||
});
|
||||
|
||||
Some(ev_data)
|
||||
|
@ -16,10 +16,10 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use enum_map::{enum_map, EnumMap};
|
||||
use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn};
|
||||
pub use sent::SentPacket;
|
||||
use sent::SentPackets;
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
pub use token::{RecoveryToken, StreamRecoveryToken};
|
||||
|
||||
use crate::{
|
||||
@ -361,20 +361,10 @@ impl LossRecoverySpace {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LossRecoverySpaces {
|
||||
/// When we have all of the loss recovery spaces, this will use a separate
|
||||
/// allocation, but this is reduced once the handshake is done.
|
||||
spaces: SmallVec<[LossRecoverySpace; 1]>,
|
||||
spaces: EnumMap<PacketNumberSpace, Option<LossRecoverySpace>>,
|
||||
}
|
||||
|
||||
impl LossRecoverySpaces {
|
||||
const fn idx(space: PacketNumberSpace) -> usize {
|
||||
match space {
|
||||
PacketNumberSpace::ApplicationData => 0,
|
||||
PacketNumberSpace::Handshake => 1,
|
||||
PacketNumberSpace::Initial => 2,
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop a packet number space and return all the packets that were
|
||||
/// outstanding, so that those can be marked as lost.
|
||||
///
|
||||
@ -382,45 +372,42 @@ impl LossRecoverySpaces {
|
||||
///
|
||||
/// If the space has already been removed.
|
||||
pub fn drop_space(&mut self, space: PacketNumberSpace) -> impl IntoIterator<Item = SentPacket> {
|
||||
let sp = match space {
|
||||
PacketNumberSpace::Initial => self.spaces.pop(),
|
||||
PacketNumberSpace::Handshake => {
|
||||
let sp = self.spaces.pop();
|
||||
self.spaces.shrink_to_fit();
|
||||
sp
|
||||
}
|
||||
PacketNumberSpace::ApplicationData => panic!("discarding application space"),
|
||||
};
|
||||
let mut sp = sp.unwrap();
|
||||
assert_eq!(sp.space(), space, "dropping spaces out of order");
|
||||
sp.remove_ignored()
|
||||
let sp = self.spaces[space].take();
|
||||
assert_ne!(
|
||||
space,
|
||||
PacketNumberSpace::ApplicationData,
|
||||
"discarding application space"
|
||||
);
|
||||
sp.unwrap().remove_ignored()
|
||||
}
|
||||
|
||||
pub fn get(&self, space: PacketNumberSpace) -> Option<&LossRecoverySpace> {
|
||||
self.spaces.get(Self::idx(space))
|
||||
self.spaces[space].as_ref()
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self, space: PacketNumberSpace) -> Option<&mut LossRecoverySpace> {
|
||||
self.spaces.get_mut(Self::idx(space))
|
||||
self.spaces[space].as_mut()
|
||||
}
|
||||
|
||||
fn iter(&self) -> impl Iterator<Item = &LossRecoverySpace> {
|
||||
self.spaces.iter()
|
||||
self.spaces.iter().filter_map(|(_, recvd)| recvd.as_ref())
|
||||
}
|
||||
|
||||
fn iter_mut(&mut self) -> impl Iterator<Item = &mut LossRecoverySpace> {
|
||||
self.spaces.iter_mut()
|
||||
self.spaces
|
||||
.iter_mut()
|
||||
.filter_map(|(_, recvd)| recvd.as_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LossRecoverySpaces {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
spaces: smallvec![
|
||||
LossRecoverySpace::new(PacketNumberSpace::ApplicationData),
|
||||
LossRecoverySpace::new(PacketNumberSpace::Handshake),
|
||||
LossRecoverySpace::new(PacketNumberSpace::Initial),
|
||||
],
|
||||
spaces: enum_map! {
|
||||
PacketNumberSpace::Initial => Some(LossRecoverySpace::new(PacketNumberSpace::Initial)),
|
||||
PacketNumberSpace::Handshake => Some(LossRecoverySpace::new(PacketNumberSpace::Handshake)),
|
||||
PacketNumberSpace::ApplicationData =>Some(LossRecoverySpace::new(PacketNumberSpace::ApplicationData)),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -439,32 +426,33 @@ struct PtoState {
|
||||
impl PtoState {
|
||||
/// The number of packets we send on a PTO.
|
||||
/// And the number to declare lost when the PTO timer is hit.
|
||||
fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize {
|
||||
if space == PacketNumberSpace::Initial && rx_count == 0 {
|
||||
// For the Initial space, we only send one packet on PTO if we have not received any
|
||||
// packets from the peer yet. This avoids sending useless PING-only packets
|
||||
// when the Client Initial is deemed lost.
|
||||
1
|
||||
} else {
|
||||
fn pto_packet_count(space: PacketNumberSpace) -> usize {
|
||||
if space == PacketNumberSpace::ApplicationData {
|
||||
MAX_PTO_PACKET_COUNT
|
||||
} else {
|
||||
// For the Initial and Handshake spaces, we only send one packet on PTO. This avoids
|
||||
// sending useless PING-only packets when only a single packet was lost, which is the
|
||||
// common case. These PINGs use cwnd and amplification window space, and sending them
|
||||
// hence makes the handshake more brittle.
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) -> Self {
|
||||
pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet) -> Self {
|
||||
debug_assert!(probe[space]);
|
||||
Self {
|
||||
space,
|
||||
count: 1,
|
||||
packets: Self::pto_packet_count(space, rx_count),
|
||||
packets: Self::pto_packet_count(space),
|
||||
probe,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) {
|
||||
pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet) {
|
||||
debug_assert!(probe[space]);
|
||||
self.space = space;
|
||||
self.count += 1;
|
||||
self.packets = Self::pto_packet_count(space, rx_count);
|
||||
self.packets = Self::pto_packet_count(space);
|
||||
self.probe = probe;
|
||||
}
|
||||
|
||||
@ -546,7 +534,7 @@ impl LossRecovery {
|
||||
|
||||
pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) {
|
||||
let pn_space = PacketNumberSpace::from(sent_packet.packet_type());
|
||||
qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn());
|
||||
qtrace!([self], "packet {}-{} sent", pn_space, sent_packet.pn());
|
||||
if let Some(space) = self.spaces.get_mut(pn_space) {
|
||||
path.borrow_mut().packet_sent(&mut sent_packet);
|
||||
space.on_packet_sent(sent_packet);
|
||||
@ -816,11 +804,10 @@ impl LossRecovery {
|
||||
}
|
||||
|
||||
fn fire_pto(&mut self, pn_space: PacketNumberSpace, allow_probes: PacketNumberSpaceSet) {
|
||||
let rx_count = self.stats.borrow().packets_rx;
|
||||
if let Some(st) = &mut self.pto_state {
|
||||
st.pto(pn_space, allow_probes, rx_count);
|
||||
st.pto(pn_space, allow_probes);
|
||||
} else {
|
||||
self.pto_state = Some(PtoState::new(pn_space, allow_probes, rx_count));
|
||||
self.pto_state = Some(PtoState::new(pn_space, allow_probes));
|
||||
}
|
||||
|
||||
self.pto_state
|
||||
@ -852,10 +839,7 @@ impl LossRecovery {
|
||||
let space = self.spaces.get_mut(*pn_space).unwrap();
|
||||
lost.extend(
|
||||
space
|
||||
.pto_packets(PtoState::pto_packet_count(
|
||||
*pn_space,
|
||||
self.stats.borrow().packets_rx,
|
||||
))
|
||||
.pto_packets(PtoState::pto_packet_count(*pn_space))
|
||||
.cloned(),
|
||||
);
|
||||
|
||||
@ -906,7 +890,7 @@ impl LossRecovery {
|
||||
/// what the current congestion window is, and what the pacer says.
|
||||
#[allow(clippy::option_if_let_else)]
|
||||
pub fn send_profile(&mut self, path: &Path, now: Instant) -> SendProfile {
|
||||
qdebug!([self], "get send profile {:?}", now);
|
||||
qtrace!([self], "get send profile {:?}", now);
|
||||
let sender = path.sender();
|
||||
let mtu = path.plpmtu();
|
||||
if let Some(profile) = self
|
||||
@ -1382,13 +1366,6 @@ mod tests {
|
||||
lr.discard(PacketNumberSpace::ApplicationData, now());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "dropping spaces out of order")]
|
||||
fn drop_out_of_order() {
|
||||
let mut lr = Fixture::default();
|
||||
lr.discard(PacketNumberSpace::Handshake, now());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ack_after_drop() {
|
||||
let mut lr = Fixture::default();
|
||||
|
@ -1483,7 +1483,7 @@ mod tests {
|
||||
assert!(s.has_frames_to_write());
|
||||
|
||||
// consume it
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut token = Vec::new();
|
||||
s.write_frame(&mut builder, &mut token, &mut FrameStats::default());
|
||||
|
||||
@ -1597,7 +1597,7 @@ mod tests {
|
||||
s.read(&mut buf).unwrap();
|
||||
assert!(session_fc.borrow().frame_needed());
|
||||
// consume it
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut token = Vec::new();
|
||||
session_fc
|
||||
.borrow_mut()
|
||||
@ -1618,7 +1618,7 @@ mod tests {
|
||||
s.read(&mut buf).unwrap();
|
||||
assert!(session_fc.borrow().frame_needed());
|
||||
// consume it
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut token = Vec::new();
|
||||
session_fc
|
||||
.borrow_mut()
|
||||
@ -1866,7 +1866,7 @@ mod tests {
|
||||
assert!(s.fc().unwrap().frame_needed());
|
||||
|
||||
// Write the fc update frame
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut token = Vec::new();
|
||||
let mut stats = FrameStats::default();
|
||||
fc.borrow_mut()
|
||||
|
@ -2596,7 +2596,7 @@ mod tests {
|
||||
ss.insert(StreamId::from(0), s);
|
||||
|
||||
let mut tokens = Vec::new();
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
|
||||
// Write a small frame: no fin.
|
||||
let written = builder.len();
|
||||
@ -2684,7 +2684,7 @@ mod tests {
|
||||
ss.insert(StreamId::from(0), s);
|
||||
|
||||
let mut tokens = Vec::new();
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
ss.write_frames(
|
||||
TransmissionPriority::default(),
|
||||
&mut builder,
|
||||
@ -2762,7 +2762,7 @@ mod tests {
|
||||
assert_eq!(s.next_bytes(false), Some((0, &b"ab"[..])));
|
||||
|
||||
// This doesn't report blocking yet.
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut tokens = Vec::new();
|
||||
let mut stats = FrameStats::default();
|
||||
s.write_blocked_frame(
|
||||
@ -2815,7 +2815,7 @@ mod tests {
|
||||
assert_eq!(s.send_atomic(b"abc").unwrap(), 0);
|
||||
|
||||
// Assert that STREAM_DATA_BLOCKED is sent.
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut tokens = Vec::new();
|
||||
let mut stats = FrameStats::default();
|
||||
s.write_blocked_frame(
|
||||
@ -2902,7 +2902,7 @@ mod tests {
|
||||
s.mark_as_lost(len_u64, 0, true);
|
||||
|
||||
// No frame should be sent here.
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut tokens = Vec::new();
|
||||
let mut stats = FrameStats::default();
|
||||
s.write_stream_frame(
|
||||
@ -2962,7 +2962,7 @@ mod tests {
|
||||
s.close();
|
||||
}
|
||||
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let header_len = builder.len();
|
||||
builder.set_limit(header_len + space);
|
||||
|
||||
@ -3063,7 +3063,7 @@ mod tests {
|
||||
s.send(data).unwrap();
|
||||
s.close();
|
||||
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let header_len = builder.len();
|
||||
// Add 2 for the frame type and stream ID, then add the extra.
|
||||
builder.set_limit(header_len + data.len() + 2 + extra);
|
||||
|
59
third_party/rust/neqo-transport/src/server.rs
vendored
59
third_party/rust/neqo-transport/src/server.rs
vendored
@ -10,7 +10,6 @@ use std::{
|
||||
cell::RefCell,
|
||||
cmp::min,
|
||||
collections::HashSet,
|
||||
fs::OpenOptions,
|
||||
ops::{Deref, DerefMut},
|
||||
path::PathBuf,
|
||||
rc::Rc,
|
||||
@ -18,14 +17,12 @@ use std::{
|
||||
};
|
||||
|
||||
use neqo_common::{
|
||||
self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn,
|
||||
Datagram, Role,
|
||||
event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, Datagram, Role,
|
||||
};
|
||||
use neqo_crypto::{
|
||||
encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult,
|
||||
ZeroRttChecker,
|
||||
};
|
||||
use qlog::streamer::QlogStreamer;
|
||||
|
||||
pub use crate::addr_valid::ValidateAddress;
|
||||
use crate::{
|
||||
@ -258,49 +255,17 @@ impl Server {
|
||||
self.qlog_dir
|
||||
.as_ref()
|
||||
.map_or_else(NeqoQlog::disabled, |qlog_dir| {
|
||||
let mut qlog_path = qlog_dir.clone();
|
||||
|
||||
qlog_path.push(format!("{odcid}.qlog"));
|
||||
|
||||
// The original DCID is chosen by the client. Using create_new()
|
||||
// prevents attackers from overwriting existing logs.
|
||||
match OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open(&qlog_path)
|
||||
{
|
||||
Ok(f) => {
|
||||
qinfo!("Qlog output to {}", qlog_path.display());
|
||||
|
||||
let streamer = QlogStreamer::new(
|
||||
qlog::QLOG_VERSION.to_string(),
|
||||
Some("Neqo server qlog".to_string()),
|
||||
Some("Neqo server qlog".to_string()),
|
||||
None,
|
||||
std::time::Instant::now(),
|
||||
common::qlog::new_trace(Role::Server),
|
||||
qlog::events::EventImportance::Base,
|
||||
Box::new(f),
|
||||
);
|
||||
let n_qlog = NeqoQlog::enabled(streamer, qlog_path);
|
||||
match n_qlog {
|
||||
Ok(nql) => nql,
|
||||
Err(e) => {
|
||||
// Keep going but w/o qlogging
|
||||
qerror!("NeqoQlog error: {}", e);
|
||||
NeqoQlog::disabled()
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
qerror!(
|
||||
"Could not open file {} for qlog output: {}",
|
||||
qlog_path.display(),
|
||||
e
|
||||
);
|
||||
NeqoQlog::disabled()
|
||||
}
|
||||
}
|
||||
NeqoQlog::enabled_with_file(
|
||||
qlog_dir.clone(),
|
||||
Role::Server,
|
||||
Some("Neqo server qlog".to_string()),
|
||||
Some("Neqo server qlog".to_string()),
|
||||
odcid,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
qerror!("failed to create NeqoQlog: {}", e);
|
||||
NeqoQlog::disabled()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
28
third_party/rust/neqo-transport/src/stats.rs
vendored
28
third_party/rust/neqo-transport/src/stats.rs
vendored
@ -16,7 +16,7 @@ use std::{
|
||||
|
||||
use neqo_common::qwarn;
|
||||
|
||||
use crate::packet::PacketNumber;
|
||||
use crate::{ecn::EcnCount, packet::PacketNumber};
|
||||
|
||||
pub const MAX_PTO_COUNTS: usize = 16;
|
||||
|
||||
@ -166,6 +166,25 @@ pub struct Stats {
|
||||
pub incoming_datagram_dropped: usize,
|
||||
|
||||
pub datagram_tx: DatagramStats,
|
||||
|
||||
/// Number of paths known to be ECN capable.
|
||||
pub ecn_paths_capable: usize,
|
||||
/// Number of paths known to be ECN incapable.
|
||||
pub ecn_paths_not_capable: usize,
|
||||
/// ECN counts for outgoing UDP datagrams, returned by remote through QUIC ACKs.
|
||||
///
|
||||
/// Note: Given that QUIC ACKs only carry [`Ect0`], [`Ect1`] and [`Ce`], but
|
||||
/// never [`NotEct`], the [`NotEct`] value will always be 0.
|
||||
///
|
||||
/// See also <https://www.rfc-editor.org/rfc/rfc9000.html#section-19.3.2>.
|
||||
///
|
||||
/// [`Ect0`]: neqo_common::tos::IpTosEcn::Ect0
|
||||
/// [`Ect1`]: neqo_common::tos::IpTosEcn::Ect1
|
||||
/// [`Ce`]: neqo_common::tos::IpTosEcn::Ce
|
||||
/// [`NotEct`]: neqo_common::tos::IpTosEcn::NotEct
|
||||
pub ecn_tx: EcnCount,
|
||||
/// ECN counts for incoming UDP datagrams, read from IP TOS header.
|
||||
pub ecn_rx: EcnCount,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
@ -222,7 +241,12 @@ impl Debug for Stats {
|
||||
writeln!(f, " frames rx:")?;
|
||||
self.frame_rx.fmt(f)?;
|
||||
writeln!(f, " frames tx:")?;
|
||||
self.frame_tx.fmt(f)
|
||||
self.frame_tx.fmt(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
" ecn: {:?} for tx {:?} for rx {} capable paths {} not capable paths",
|
||||
self.ecn_tx, self.ecn_rx, self.ecn_paths_capable, self.ecn_paths_not_capable
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ pub enum TransportParameter {
|
||||
|
||||
impl TransportParameter {
|
||||
fn encode(&self, enc: &mut Encoder, tp: TransportParameterId) {
|
||||
qdebug!("TP encoded; type 0x{:02x} val {:?}", tp, self);
|
||||
qtrace!("TP encoded; type 0x{:02x} val {:?}", tp, self);
|
||||
enc.encode_varint(tp);
|
||||
match self {
|
||||
Self::Bytes(a) => {
|
||||
@ -309,7 +309,7 @@ impl TransportParameter {
|
||||
if d.remaining() > 0 {
|
||||
return Err(Error::TooMuchData);
|
||||
}
|
||||
qdebug!("TP decoded; type 0x{:02x} val {:?}", tp, value);
|
||||
qtrace!("TP decoded; type 0x{:02x} val {:?}", tp, value);
|
||||
Ok(Some((tp, value)))
|
||||
}
|
||||
}
|
||||
|
183
third_party/rust/neqo-transport/src/tracking.rs
vendored
183
third_party/rust/neqo-transport/src/tracking.rs
vendored
@ -13,10 +13,9 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use enum_map::Enum;
|
||||
use enum_map::{enum_map, Enum, EnumMap};
|
||||
use neqo_common::{qdebug, qinfo, qtrace, qwarn, IpTosEcn};
|
||||
use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL};
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
|
||||
use crate::{
|
||||
ecn::EcnCount,
|
||||
@ -26,7 +25,6 @@ use crate::{
|
||||
stats::FrameStats,
|
||||
};
|
||||
|
||||
// TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574
|
||||
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq, Enum)]
|
||||
pub enum PacketNumberSpace {
|
||||
Initial,
|
||||
@ -70,17 +68,17 @@ impl From<PacketType> for PacketNumberSpace {
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub struct PacketNumberSpaceSet {
|
||||
initial: bool,
|
||||
handshake: bool,
|
||||
application_data: bool,
|
||||
spaces: EnumMap<PacketNumberSpace, bool>,
|
||||
}
|
||||
|
||||
impl PacketNumberSpaceSet {
|
||||
pub const fn all() -> Self {
|
||||
pub fn all() -> Self {
|
||||
Self {
|
||||
initial: true,
|
||||
handshake: true,
|
||||
application_data: true,
|
||||
spaces: enum_map! {
|
||||
PacketNumberSpace::Initial => true,
|
||||
PacketNumberSpace::Handshake => true,
|
||||
PacketNumberSpace::ApplicationData => true,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -89,21 +87,13 @@ impl Index<PacketNumberSpace> for PacketNumberSpaceSet {
|
||||
type Output = bool;
|
||||
|
||||
fn index(&self, space: PacketNumberSpace) -> &Self::Output {
|
||||
match space {
|
||||
PacketNumberSpace::Initial => &self.initial,
|
||||
PacketNumberSpace::Handshake => &self.handshake,
|
||||
PacketNumberSpace::ApplicationData => &self.application_data,
|
||||
}
|
||||
&self.spaces[space]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<PacketNumberSpace> for PacketNumberSpaceSet {
|
||||
fn index_mut(&mut self, space: PacketNumberSpace) -> &mut Self::Output {
|
||||
match space {
|
||||
PacketNumberSpace::Initial => &mut self.initial,
|
||||
PacketNumberSpace::Handshake => &mut self.handshake,
|
||||
PacketNumberSpace::ApplicationData => &mut self.application_data,
|
||||
}
|
||||
&mut self.spaces[space]
|
||||
}
|
||||
}
|
||||
|
||||
@ -245,6 +235,13 @@ pub struct AckToken {
|
||||
ranges: Vec<PacketRange>,
|
||||
}
|
||||
|
||||
impl AckToken {
|
||||
/// Get the space for this token.
|
||||
pub const fn space(&self) -> PacketNumberSpace {
|
||||
self.space
|
||||
}
|
||||
}
|
||||
|
||||
/// A structure that tracks what packets have been received,
|
||||
/// and what needs acknowledgement for a packet number space.
|
||||
#[derive(Debug)]
|
||||
@ -290,7 +287,12 @@ impl RecvdPackets {
|
||||
ack_frequency_seqno: 0,
|
||||
ack_delay: DEFAULT_ACK_DELAY,
|
||||
unacknowledged_count: 0,
|
||||
unacknowledged_tolerance: DEFAULT_ACK_PACKET_TOLERANCE,
|
||||
unacknowledged_tolerance: if space == PacketNumberSpace::ApplicationData {
|
||||
DEFAULT_ACK_PACKET_TOLERANCE
|
||||
} else {
|
||||
// ACK more aggressively
|
||||
0
|
||||
},
|
||||
ignore_order: false,
|
||||
ecn_count: EcnCount::default(),
|
||||
}
|
||||
@ -380,7 +382,7 @@ impl RecvdPackets {
|
||||
/// Return true if the packet was the largest received so far.
|
||||
pub fn set_received(&mut self, now: Instant, pn: PacketNumber, ack_eliciting: bool) -> bool {
|
||||
let next_in_order_pn = self.ranges.front().map_or(0, |r| r.largest + 1);
|
||||
qdebug!([self], "received {}, next: {}", pn, next_in_order_pn);
|
||||
qtrace!([self], "received {}, next: {}", pn, next_in_order_pn);
|
||||
|
||||
self.add(pn);
|
||||
self.trim_ranges();
|
||||
@ -497,6 +499,9 @@ impl RecvdPackets {
|
||||
.take(max_ranges)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
if ranges.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
builder.encode_varint(if self.ecn_count.is_some() {
|
||||
FRAME_TYPE_ACK_ECN
|
||||
@ -550,34 +555,25 @@ impl ::std::fmt::Display for RecvdPackets {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AckTracker {
|
||||
/// This stores information about received packets in *reverse* order
|
||||
/// by spaces. Why reverse? Because we ultimately only want to keep
|
||||
/// `ApplicationData` and this allows us to drop other spaces easily.
|
||||
spaces: SmallVec<[RecvdPackets; 1]>,
|
||||
spaces: EnumMap<PacketNumberSpace, Option<RecvdPackets>>,
|
||||
}
|
||||
|
||||
impl AckTracker {
|
||||
pub fn drop_space(&mut self, space: PacketNumberSpace) {
|
||||
let sp = match space {
|
||||
PacketNumberSpace::Initial => self.spaces.pop(),
|
||||
PacketNumberSpace::Handshake => {
|
||||
let sp = self.spaces.pop();
|
||||
self.spaces.shrink_to_fit();
|
||||
sp
|
||||
}
|
||||
PacketNumberSpace::ApplicationData => panic!("discarding application space"),
|
||||
};
|
||||
assert_eq!(sp.unwrap().space, space, "dropping spaces out of order");
|
||||
assert_ne!(
|
||||
space,
|
||||
PacketNumberSpace::ApplicationData,
|
||||
"discarding application space"
|
||||
);
|
||||
if space == PacketNumberSpace::Handshake {
|
||||
assert!(self.spaces[PacketNumberSpace::Initial].is_none());
|
||||
}
|
||||
self.spaces[space].take();
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self, space: PacketNumberSpace) -> Option<&mut RecvdPackets> {
|
||||
self.spaces.get_mut(match space {
|
||||
PacketNumberSpace::ApplicationData => 0,
|
||||
PacketNumberSpace::Handshake => 1,
|
||||
PacketNumberSpace::Initial => 2,
|
||||
})
|
||||
self.spaces[space].as_mut()
|
||||
}
|
||||
|
||||
pub fn ack_freq(
|
||||
@ -588,37 +584,45 @@ impl AckTracker {
|
||||
ignore_order: bool,
|
||||
) {
|
||||
// Only ApplicationData ever delays ACK.
|
||||
self.get_mut(PacketNumberSpace::ApplicationData)
|
||||
.unwrap()
|
||||
.ack_freq(seqno, tolerance, delay, ignore_order);
|
||||
if let Some(space) = self.get_mut(PacketNumberSpace::ApplicationData) {
|
||||
space.ack_freq(seqno, tolerance, delay, ignore_order);
|
||||
}
|
||||
}
|
||||
|
||||
// Force an ACK to be generated immediately (a PING was received).
|
||||
pub fn immediate_ack(&mut self, now: Instant) {
|
||||
self.get_mut(PacketNumberSpace::ApplicationData)
|
||||
.unwrap()
|
||||
.immediate_ack(now);
|
||||
/// Force an ACK to be generated immediately.
|
||||
pub fn immediate_ack(&mut self, space: PacketNumberSpace, now: Instant) {
|
||||
if let Some(space) = self.get_mut(space) {
|
||||
space.immediate_ack(now);
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine the earliest time that an ACK might be needed.
|
||||
pub fn ack_time(&self, now: Instant) -> Option<Instant> {
|
||||
for recvd in &self.spaces {
|
||||
qtrace!("ack_time for {} = {:?}", recvd.space, recvd.ack_time());
|
||||
#[cfg(debug_assertions)]
|
||||
for (space, recvd) in &self.spaces {
|
||||
if let Some(recvd) = recvd {
|
||||
qtrace!("ack_time for {} = {:?}", space, recvd.ack_time());
|
||||
}
|
||||
}
|
||||
|
||||
if self.spaces.len() == 1 {
|
||||
self.spaces[0].ack_time()
|
||||
} else {
|
||||
// Ignore any time that is in the past relative to `now`.
|
||||
// That is something of a hack, but there are cases where we can't send ACK
|
||||
// frames for all spaces, which can mean that one space is stuck in the past.
|
||||
// That isn't a problem because we guarantee that earlier spaces will always
|
||||
// be able to send ACK frames.
|
||||
self.spaces
|
||||
.iter()
|
||||
.filter_map(|recvd| recvd.ack_time().filter(|t| *t > now))
|
||||
.min()
|
||||
if self.spaces[PacketNumberSpace::Initial].is_none()
|
||||
&& self.spaces[PacketNumberSpace::Handshake].is_none()
|
||||
{
|
||||
if let Some(recvd) = &self.spaces[PacketNumberSpace::ApplicationData] {
|
||||
return recvd.ack_time();
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore any time that is in the past relative to `now`.
|
||||
// That is something of a hack, but there are cases where we can't send ACK
|
||||
// frames for all spaces, which can mean that one space is stuck in the past.
|
||||
// That isn't a problem because we guarantee that earlier spaces will always
|
||||
// be able to send ACK frames.
|
||||
self.spaces
|
||||
.values()
|
||||
.flatten()
|
||||
.filter_map(|recvd| recvd.ack_time().filter(|t| *t > now))
|
||||
.min()
|
||||
}
|
||||
|
||||
pub fn acked(&mut self, token: &AckToken) {
|
||||
@ -645,11 +649,11 @@ impl AckTracker {
|
||||
impl Default for AckTracker {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
spaces: smallvec![
|
||||
RecvdPackets::new(PacketNumberSpace::ApplicationData),
|
||||
RecvdPackets::new(PacketNumberSpace::Handshake),
|
||||
RecvdPackets::new(PacketNumberSpace::Initial),
|
||||
],
|
||||
spaces: enum_map! {
|
||||
PacketNumberSpace::Initial => Some(RecvdPackets::new(PacketNumberSpace::Initial)),
|
||||
PacketNumberSpace::Handshake => Some(RecvdPackets::new(PacketNumberSpace::Handshake)),
|
||||
PacketNumberSpace::ApplicationData => Some(RecvdPackets::new(PacketNumberSpace::ApplicationData)),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -667,7 +671,7 @@ mod tests {
|
||||
};
|
||||
use crate::{
|
||||
frame::Frame,
|
||||
packet::{PacketBuilder, PacketNumber},
|
||||
packet::{PacketBuilder, PacketNumber, PacketType},
|
||||
stats::FrameStats,
|
||||
};
|
||||
|
||||
@ -797,7 +801,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn write_frame_at(rp: &mut RecvdPackets, now: Instant) {
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
let mut stats = FrameStats::default();
|
||||
let mut tokens = Vec::new();
|
||||
rp.write_frame(now, RTT, &mut builder, &mut tokens, &mut stats);
|
||||
@ -942,17 +946,10 @@ mod tests {
|
||||
tracker.drop_space(PacketNumberSpace::ApplicationData);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "dropping spaces out of order")]
|
||||
fn drop_out_of_order() {
|
||||
let mut tracker = AckTracker::default();
|
||||
tracker.drop_space(PacketNumberSpace::Handshake);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_spaces() {
|
||||
let mut tracker = AckTracker::default();
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
tracker
|
||||
.get_mut(PacketNumberSpace::Initial)
|
||||
.unwrap()
|
||||
@ -1017,7 +1014,7 @@ mod tests {
|
||||
.ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
|
||||
.is_some());
|
||||
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
builder.set_limit(10);
|
||||
|
||||
let mut stats = FrameStats::default();
|
||||
@ -1048,7 +1045,7 @@ mod tests {
|
||||
.ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
|
||||
.is_some());
|
||||
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
|
||||
let mut builder = PacketBuilder::short(Encoder::new(), false, None::<&[u8]>);
|
||||
// The code pessimistically assumes that each range needs 16 bytes to express.
|
||||
// So this won't be enough for a second range.
|
||||
builder.set_limit(RecvdPackets::USEFUL_ACK_LEN + 8);
|
||||
@ -1136,4 +1133,28 @@ mod tests {
|
||||
assert!(copy[PacketNumberSpace::Handshake]);
|
||||
assert!(copy[PacketNumberSpace::ApplicationData]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_packet_type() {
|
||||
assert_eq!(
|
||||
PacketNumberSpace::from(PacketType::Initial),
|
||||
PacketNumberSpace::Initial
|
||||
);
|
||||
assert_eq!(
|
||||
PacketNumberSpace::from(PacketType::Handshake),
|
||||
PacketNumberSpace::Handshake
|
||||
);
|
||||
assert_eq!(
|
||||
PacketNumberSpace::from(PacketType::ZeroRtt),
|
||||
PacketNumberSpace::ApplicationData
|
||||
);
|
||||
assert_eq!(
|
||||
PacketNumberSpace::from(PacketType::Short),
|
||||
PacketNumberSpace::ApplicationData
|
||||
);
|
||||
assert!(std::panic::catch_unwind(|| {
|
||||
PacketNumberSpace::from(PacketType::VersionNegotiation)
|
||||
})
|
||||
.is_err());
|
||||
}
|
||||
}
|
||||
|
@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"2a0119d7971850169f74f1229c8cc2d9a0f69f6384ea4a1a0da4f1449574a5f2","src/lib.rs":"bf3bc79b1d799a42b73e64d2b203ce688cc0859d7afa6c66eec429ec36199ba6"},"package":null}
|
||||
{"files":{"Cargo.toml":"20aadbf0239bbfb94acf4b5a5abd606bcc0956126de4251102a4357b16e7b945","src/lib.rs":"bf3bc79b1d799a42b73e64d2b203ce688cc0859d7afa6c66eec429ec36199ba6"},"package":null}
|
2
third_party/rust/neqo-udp/Cargo.toml
vendored
2
third_party/rust/neqo-udp/Cargo.toml
vendored
@ -18,7 +18,7 @@ bench = []
|
||||
edition = "2021"
|
||||
rust-version = "1.76.0"
|
||||
name = "neqo-udp"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = false
|
||||
autobins = false
|
||||
|
Loading…
Reference in New Issue
Block a user