Bug 1895319 - vendor neqo-bin, r=necko-reviewers,supply-chain-reviewers,valentin

Differential Revision: https://phabricator.services.mozilla.com/D210624
This commit is contained in:
Kershaw Chang 2024-05-21 09:34:48 +00:00
parent 53ac4125c5
commit fbfdcf7dcb
46 changed files with 7906 additions and 0 deletions

58
Cargo.lock generated
View File

@ -792,6 +792,16 @@ dependencies = [
"clap_derive",
]
[[package]]
name = "clap-verbosity-flag"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb9b20c0dd58e4c2e991c8d203bbeb76c11304d1011659686b5b644bc29aa478"
dependencies = [
"clap",
"log",
]
[[package]]
name = "clap_builder"
version = "4.4.5"
@ -2727,6 +2737,7 @@ dependencies = [
"mio 0.6.23",
"mio-extras",
"mozilla-central-workspace-hack",
"neqo-bin",
"neqo-common",
"neqo-crypto",
"neqo-http3",
@ -4016,6 +4027,28 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "neqo-bin"
version = "0.7.8"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.8#a71e43dacf8fae41e5aa30cf95b2e826f63a7466"
dependencies = [
"clap",
"clap-verbosity-flag",
"futures",
"hex",
"log",
"neqo-common",
"neqo-crypto",
"neqo-http3",
"neqo-qpack",
"neqo-transport",
"qlog",
"quinn-udp",
"regex",
"tokio",
"url",
]
[[package]]
name = "neqo-common"
version = "0.7.8"
@ -4737,6 +4770,19 @@ version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]]
name = "quinn-udp"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7ad7bc932e4968523fa7d9c320ee135ff779de720e9350fee8728838551764"
dependencies = [
"libc",
"once_cell",
"socket2 0.5.7",
"tracing",
"windows-sys 0.52.0",
]
[[package]]
name = "quote"
version = "1.0.35"
@ -5867,9 +5913,21 @@ dependencies = [
"num_cpus",
"pin-project-lite",
"socket2 0.4.999",
"tokio-macros",
"windows-sys 0.48.999",
]
[[package]]
name = "tokio-macros"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tokio-stream"
version = "0.1.12"

View File

@ -6,6 +6,7 @@ edition = "2018"
license = "MPL-2.0"
[dependencies]
neqo-bin = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }

View File

@ -968,6 +968,11 @@ who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
delta = "1.4.0 -> 1.6.0"
[[audits.clap-verbosity-flag]]
who = "Kershaw Chang <kershaw@mozilla.com>"
criteria = "safe-to-run"
version = "2.2.0"
[[audits.clap_lex]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
@ -3313,6 +3318,11 @@ who = "Kershaw Chang <kershaw@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.12.0 -> 0.13.0"
[[audits.quinn-udp]]
who = "Kershaw Chang <kershaw@mozilla.com>"
criteria = "safe-to-run"
version = "0.5.0"
[[audits.quote]]
who = "Nika Layzell <nika@thelayzells.com>"
criteria = "safe-to-deploy"

View File

@ -1343,6 +1343,12 @@ criteria = "safe-to-run"
version = "1.29.1"
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
[[audits.google.audits.tokio-macros]]
who = "Vovo Yang <vovoy@google.com>"
criteria = "safe-to-run"
version = "2.1.0"
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
[[audits.google.audits.tokio-stream]]
who = "David Koloski <dkoloski@google.com>"
criteria = "safe-to-deploy"

View File

@ -0,0 +1 @@
{"files":{"Cargo.lock":"7b363beb66d859673c11fa16d372c6941760520c0ade16ff068830fb956212ff","Cargo.toml":"3115b30070610abe249bc22470d2a4cf423c9d1e234b522f54b42cbaeecc69e3","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"6efb0476a1cc085077ed49357026d8c173bf33017278ef440f222fb9cbcb66e6","README.md":"24afa7b618ebb5067ac10cece94ece67ecaadb4d756158b46a7931f97b815e63","examples/log.rs":"c1983f539e9b81ef5cacc8983f94816a6b0db935fca651d189af7308fe7f144e","examples/tracing.rs":"cfdb7f630ff9056360d542c4f8dc2135af4db072bb1cb97dc21902517d35dcf4","src/lib.rs":"bfd69fddaca38c5577705048a4ba95efdd314a9fbda66dc5531e6bd2f9d2c11d"},"package":"bb9b20c0dd58e4c2e991c8d203bbeb76c11304d1011659686b5b644bc29aa478"}

455
third_party/rust/clap-verbosity-flag/Cargo.lock generated vendored Normal file
View File

@ -0,0 +1,455 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "0.7.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
dependencies = [
"memchr",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "anstream"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
dependencies = [
"windows-sys",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
"windows-sys",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "4.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c"
dependencies = [
"clap_builder",
"clap_derive",
]
[[package]]
name = "clap-verbosity-flag"
version = "2.2.0"
dependencies = [
"clap",
"env_logger",
"log",
"tracing",
"tracing-log 0.2.0",
"tracing-subscriber",
]
[[package]]
name = "clap_builder"
version = "4.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_derive"
version = "4.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.44",
]
[[package]]
name = "clap_lex"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "env_filter"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea"
dependencies = [
"log",
"regex",
]
[[package]]
name = "env_logger"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05e7cf40684ae96ade6232ed84582f40ce0a66efcd43a5117aef610534f8e0b8"
dependencies = [
"anstream",
"anstyle",
"env_filter",
"humantime",
"log",
]
[[package]]
name = "heck"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "once_cell"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "pin-project-lite"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
[[package]]
name = "proc-macro2"
version = "1.0.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dd5e8a1f1029c43224ad5898e50140c2aebb1705f19e67c918ebf5b9e797fe1"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22a37c9326af5ed140c86a46655b5278de879853be5573c01df185b6f49a580a"
dependencies = [
"proc-macro2",
]
[[package]]
name = "regex"
version = "1.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64"
[[package]]
name = "sharded-slab"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31"
dependencies = [
"lazy_static",
]
[[package]]
name = "smallvec"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83"
[[package]]
name = "syn"
version = "1.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92d27c2c202598d05175a6dd3af46824b7f747f8d8e9b14c623f19fa5069735d"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thread_local"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
dependencies = [
"once_cell",
]
[[package]]
name = "tracing"
version = "0.1.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160"
dependencies = [
"cfg-if",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.96",
]
[[package]]
name = "tracing-core"
version = "0.1.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
dependencies = [
"once_cell",
"valuable",
]
[[package]]
name = "tracing-log"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922"
dependencies = [
"lazy_static",
"log",
"tracing-core",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [
"log",
"once_cell",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596"
dependencies = [
"ansi_term",
"sharded-slab",
"smallvec",
"thread_local",
"tracing-core",
"tracing-log 0.1.3",
]
[[package]]
name = "unicode-ident"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "valuable"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
[[package]]
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
[[package]]
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"

View File

@ -0,0 +1,107 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.73"
name = "clap-verbosity-flag"
version = "2.2.0"
authors = ["Pascal Hertleif <killercup@gmail.com>"]
include = [
"build.rs",
"src/**/*",
"Cargo.toml",
"Cargo.lock",
"LICENSE*",
"README.md",
"benches/**/*",
"examples/**/*",
]
description = "Easily add a `--verbose` flag to CLIs using Clap"
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/clap-rs/clap-verbosity-flag"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = [
"--cfg",
"docsrs",
]
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
min = 1
replace = "{{version}}"
search = "Unreleased"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = "...{{tag_name}}"
search = '\.\.\.HEAD'
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
min = 1
replace = "{{date}}"
search = "ReleaseDate"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = """
<!-- next-header -->
## [Unreleased] - ReleaseDate
"""
search = "<!-- next-header -->"
[[package.metadata.release.pre-release-replacements]]
exactly = 1
file = "CHANGELOG.md"
replace = """
<!-- next-url -->
[Unreleased]: https://github.com/clap-rs/clap-verbosity-flag/compare/{{tag_name}}...HEAD"""
search = "<!-- next-url -->"
[dependencies.clap]
version = "4.0.0"
features = [
"std",
"derive",
]
default-features = false
[dependencies.log]
version = "0.4.1"
[dev-dependencies.clap]
version = "4.4.18"
features = [
"help",
"usage",
]
default-features = false
[dev-dependencies.env_logger]
version = "0.11.1"
[dev-dependencies.tracing]
version = "0.1"
[dev-dependencies.tracing-log]
version = "0.2"
[dev-dependencies.tracing-subscriber]
version = "0.3"
[badges.codecov]
repository = "clap-rs/clap-verbosity-flag"

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,19 @@
Copyright (c) Individual contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,46 @@
# clap-verbosity-flag for `log`
[![Documentation](https://img.shields.io/badge/docs-master-blue.svg)][Documentation]
![License](https://img.shields.io/crates/l/clap-verbosity-flag.svg)
[![crates.io](https://img.shields.io/crates/v/clap-verbosity-flag.svg)][Crates.io]
[Crates.io]: https://crates.io/crates/clap-verbosity-flag
[Documentation]: https://docs.rs/clap-verbosity-flag/
Easily add a `--verbose` flag to CLIs using Clap
## Examples
```rust
use clap::Parser;
// ...
#[derive(Debug, Parser)]
struct Cli {
#[command(flatten)]
verbose: clap_verbosity_flag::Verbosity,
}
```
By default, it'll only report errors.
- `-q` silences output
- `-v` show warnings
- `-vv` show info
- `-vvv` show debug
- `-vvvv` show trace
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or <http://www.apache.org/licenses/LICENSE-2.0>)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or <http://opensource.org/licenses/MIT>)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@ -0,0 +1,23 @@
use clap::Parser;
use clap_verbosity_flag::Verbosity;
/// Foo
#[derive(Debug, Parser)]
struct Cli {
#[command(flatten)]
verbose: Verbosity,
}
fn main() {
let cli = Cli::parse();
env_logger::Builder::new()
.filter_level(cli.verbose.log_level_filter())
.init();
log::error!("Engines exploded");
log::warn!("Engines smoking");
log::info!("Engines exist");
log::debug!("Engine temperature is 200 degrees");
log::trace!("Engine subsection is 300 degrees");
}

View File

@ -0,0 +1,24 @@
use clap::Parser;
use clap_verbosity_flag::Verbosity;
use tracing_log::AsTrace;
/// Foo
#[derive(Debug, Parser)]
struct Cli {
#[command(flatten)]
verbose: Verbosity,
}
fn main() {
let cli = Cli::parse();
tracing_subscriber::fmt()
.with_max_level(cli.verbose.log_level_filter().as_trace())
.init();
tracing::error!("Engines exploded");
tracing::warn!("Engines smoking");
tracing::info!("Engines exist");
tracing::debug!("Engine temperature is 200 degrees");
tracing::trace!("Engine subsection is 300 degrees");
}

View File

@ -0,0 +1,228 @@
//! Control `log` level with a `--verbose` flag for your CLI
//!
//! # Examples
//!
//! To get `--quiet` and `--verbose` flags through your entire program, just `flatten`
//! [`Verbosity`]:
//! ```rust,no_run
//! # use clap::Parser;
//! # use clap_verbosity_flag::Verbosity;
//! #
//! # /// Le CLI
//! # #[derive(Debug, Parser)]
//! # struct Cli {
//! #[command(flatten)]
//! verbose: Verbosity,
//! # }
//! ```
//!
//! You can then use this to configure your logger:
//! ```rust,no_run
//! # use clap::Parser;
//! # use clap_verbosity_flag::Verbosity;
//! #
//! # /// Le CLI
//! # #[derive(Debug, Parser)]
//! # struct Cli {
//! # #[command(flatten)]
//! # verbose: Verbosity,
//! # }
//! let cli = Cli::parse();
//! env_logger::Builder::new()
//! .filter_level(cli.verbose.log_level_filter())
//! .init();
//! ```
//!
//! By default, this will only report errors.
//! - `-q` silences output
//! - `-v` show warnings
//! - `-vv` show info
//! - `-vvv` show debug
//! - `-vvvv` show trace
//!
//! You can also customize the default logging level:
//! ```rust,no_run
//! # use clap::Parser;
//! use clap_verbosity_flag::{Verbosity, InfoLevel};
//!
//! /// Le CLI
//! #[derive(Debug, Parser)]
//! struct Cli {
//! #[command(flatten)]
//! verbose: Verbosity<InfoLevel>,
//! }
//! ```
//!
//! Or implement our [`LogLevel`] trait to customize the default log level and help output.
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
pub use log::Level;
pub use log::LevelFilter;
/// Logging flags to `#[command(flatten)]` into your CLI
#[derive(clap::Args, Debug, Clone, Default)]
#[command(about = None, long_about = None)]
pub struct Verbosity<L: LogLevel = ErrorLevel> {
#[arg(
long,
short = 'v',
action = clap::ArgAction::Count,
global = true,
help = L::verbose_help(),
long_help = L::verbose_long_help(),
)]
verbose: u8,
#[arg(
long,
short = 'q',
action = clap::ArgAction::Count,
global = true,
help = L::quiet_help(),
long_help = L::quiet_long_help(),
conflicts_with = "verbose",
)]
quiet: u8,
#[arg(skip)]
phantom: std::marker::PhantomData<L>,
}
impl<L: LogLevel> Verbosity<L> {
/// Create a new verbosity instance by explicitly setting the values
pub fn new(verbose: u8, quiet: u8) -> Self {
Verbosity {
verbose,
quiet,
phantom: std::marker::PhantomData,
}
}
/// Whether any verbosity flags (either `--verbose` or `--quiet`)
/// are present on the command line.
pub fn is_present(&self) -> bool {
self.verbose != 0 || self.quiet != 0
}
/// Get the log level.
///
/// `None` means all output is disabled.
pub fn log_level(&self) -> Option<log::Level> {
level_enum(self.verbosity())
}
/// Get the log level filter.
pub fn log_level_filter(&self) -> log::LevelFilter {
level_enum(self.verbosity())
.map(|l| l.to_level_filter())
.unwrap_or(log::LevelFilter::Off)
}
/// If the user requested complete silence (i.e. not just no-logging).
pub fn is_silent(&self) -> bool {
self.log_level().is_none()
}
fn verbosity(&self) -> i8 {
level_value(L::default()) - (self.quiet as i8) + (self.verbose as i8)
}
}
fn level_value(level: Option<log::Level>) -> i8 {
match level {
None => -1,
Some(log::Level::Error) => 0,
Some(log::Level::Warn) => 1,
Some(log::Level::Info) => 2,
Some(log::Level::Debug) => 3,
Some(log::Level::Trace) => 4,
}
}
fn level_enum(verbosity: i8) -> Option<log::Level> {
match verbosity {
std::i8::MIN..=-1 => None,
0 => Some(log::Level::Error),
1 => Some(log::Level::Warn),
2 => Some(log::Level::Info),
3 => Some(log::Level::Debug),
4..=std::i8::MAX => Some(log::Level::Trace),
}
}
use std::fmt;
impl<L: LogLevel> fmt::Display for Verbosity<L> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.verbosity())
}
}
/// Customize the default log-level and associated help
pub trait LogLevel {
fn default() -> Option<log::Level>;
fn verbose_help() -> Option<&'static str> {
Some("Increase logging verbosity")
}
fn verbose_long_help() -> Option<&'static str> {
None
}
fn quiet_help() -> Option<&'static str> {
Some("Decrease logging verbosity")
}
fn quiet_long_help() -> Option<&'static str> {
None
}
}
/// Default to [`log::Level::Error`]
#[derive(Copy, Clone, Debug, Default)]
pub struct ErrorLevel;
impl LogLevel for ErrorLevel {
fn default() -> Option<log::Level> {
Some(log::Level::Error)
}
}
/// Default to [`log::Level::Warn`]
#[derive(Copy, Clone, Debug, Default)]
pub struct WarnLevel;
impl LogLevel for WarnLevel {
fn default() -> Option<log::Level> {
Some(log::Level::Warn)
}
}
/// Default to [`log::Level::Info`]
#[derive(Copy, Clone, Debug, Default)]
pub struct InfoLevel;
impl LogLevel for InfoLevel {
fn default() -> Option<log::Level> {
Some(log::Level::Info)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn verify_app() {
#[derive(Debug, clap::Parser)]
struct Cli {
#[command(flatten)]
verbose: Verbosity,
}
use clap::CommandFactory;
Cli::command().debug_assert()
}
}

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"a5c6890e9bcc19edef2f6722a17a0e8a88d102de83ffc0878f19054e730d6061","benches/main.rs":"0835db58b265c529dcde6b208942b60e932ac0665e7f303cf60406ff9994dd45","src/bin/client.rs":"db77efd75dc0745b6dd983ab8fa3bc8f5f9111967f0d90d23cb19140a940246d","src/bin/server.rs":"2f7ab3c7a98117bd162e6fd07abef1d21791d1bb240db3aae61afa6ff72df83a","src/client/http09.rs":"b13eda497821c932f60a45af304e4d3769d76588edfe6f940ba6b8f87dc7f96c","src/client/http3.rs":"66a15d176b98528277c2158b4e6df2e52715312180222ad279809868b7aa0d2a","src/client/mod.rs":"bef0d7874dd6de84e3320d877ab098889037da1fe3610d82365f9e66c73d939c","src/lib.rs":"77d01388ea8bf076dca5b67c8003622868a0d37af5bbf9deb50d41c971eb579b","src/server/http09.rs":"64a318ee47ea9c2f8454e0ef809f50aaffadf5c3505086b0aa2d42c6957ef637","src/server/http3.rs":"dc9f71f964574fae7e83c49184df8cd76f9c1c019ca8592941acf093b9393efc","src/server/mod.rs":"eeabaed415b7c9eebc9b3bae255963c1c3407ae5ef30abefc29e7d26f649219a","src/udp.rs":"d914b6cf1dda149202c0499182f646f38244376903ee109c829de58c1a12132c"},"package":null}

132
third_party/rust/neqo-bin/Cargo.toml vendored Normal file
View File

@ -0,0 +1,132 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.76.0"
name = "neqo-bin"
version = "0.7.8"
authors = ["The Neqo Authors <necko@mozilla.com>"]
description = "A basic QUIC HTTP/0.9 and HTTP/3 client and server."
homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
repository = "https://github.com/mozilla/neqo/"
[lib]
bench = false
[[bin]]
name = "neqo-client"
path = "src/bin/client.rs"
bench = false
[[bin]]
name = "neqo-server"
path = "src/bin/server.rs"
bench = false
[[bench]]
name = "main"
harness = false
required-features = ["bench"]
[dependencies.clap]
version = "4.4"
features = [
"std",
"help",
"usage",
"error-context",
"suggestions",
"derive",
]
default-features = false
[dependencies.clap-verbosity-flag]
version = "2.2"
default-features = false
[dependencies.futures]
version = "0.3"
features = ["alloc"]
default-features = false
[dependencies.hex]
version = "0.4"
features = ["std"]
default-features = false
[dependencies.log]
version = "0.4"
default-features = false
[dependencies.neqo-common]
path = "./../neqo-common"
[dependencies.neqo-crypto]
path = "./../neqo-crypto"
[dependencies.neqo-http3]
path = "./../neqo-http3"
[dependencies.neqo-qpack]
path = "./../neqo-qpack"
[dependencies.neqo-transport]
path = "./../neqo-transport"
[dependencies.qlog]
version = "0.13"
default-features = false
[dependencies.quinn-udp]
version = "0.5.0"
default-features = false
[dependencies.regex]
version = "1.9"
features = ["unicode-perl"]
default-features = false
[dependencies.tokio]
version = "1"
features = [
"net",
"time",
"macros",
"rt",
"rt-multi-thread",
]
default-features = false
[dependencies.url]
version = "2.5"
default-features = false
[dev-dependencies.criterion]
version = "0.5"
features = [
"html_reports",
"async_tokio",
]
default-features = false
[dev-dependencies.tokio]
version = "1"
features = ["sync"]
default-features = false
[features]
bench = []
[lints.clippy.pedantic]
level = "warn"
priority = -1

View File

@ -0,0 +1,86 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{path::PathBuf, str::FromStr};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput};
use neqo_bin::{client, server};
use tokio::runtime::Runtime;
struct Benchmark {
name: String,
requests: Vec<u64>,
sample_size: Option<usize>,
}
fn transfer(c: &mut Criterion) {
neqo_common::log::init(Some(log::LevelFilter::Off));
neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap();
let done_sender = spawn_server();
for Benchmark {
name,
requests,
sample_size,
} in [
Benchmark {
name: "1-conn/1-100mb-resp (aka. Download)".to_string(),
requests: vec![100 * 1024 * 1024],
sample_size: Some(10),
},
Benchmark {
name: "1-conn/10_000-parallel-1b-resp (aka. RPS)".to_string(),
requests: vec![1; 10_000],
sample_size: None,
},
Benchmark {
name: "1-conn/1-1b-resp (aka. HPS)".to_string(),
requests: vec![1; 1],
sample_size: None,
},
] {
let mut group = c.benchmark_group(name);
group.throughput(if requests[0] > 1 {
assert_eq!(requests.len(), 1);
Throughput::Bytes(requests[0])
} else {
Throughput::Elements(requests.len() as u64)
});
if let Some(size) = sample_size {
group.sample_size(size);
}
group.bench_function("client", |b| {
b.to_async(Runtime::new().unwrap()).iter_batched(
|| client::client(client::Args::new(&requests)),
|client| async move {
client.await.unwrap();
},
BatchSize::PerIteration,
);
});
group.finish();
}
done_sender.send(()).unwrap();
}
fn spawn_server() -> tokio::sync::oneshot::Sender<()> {
let (done_sender, mut done_receiver) = tokio::sync::oneshot::channel();
std::thread::spawn(move || {
Runtime::new().unwrap().block_on(async {
let mut server = Box::pin(server::server(server::Args::default()));
tokio::select! {
_ = &mut done_receiver => {}
res = &mut server => panic!("expect server not to terminate: {res:?}"),
}
});
});
done_sender
}
criterion_group!(benches, transfer);
criterion_main!(benches);

View File

@ -0,0 +1,14 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use clap::Parser;
#[tokio::main]
async fn main() -> Result<(), neqo_bin::client::Error> {
let args = neqo_bin::client::Args::parse();
neqo_bin::client::client(args).await
}

View File

@ -0,0 +1,14 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use clap::Parser;
#[tokio::main]
async fn main() -> Result<(), neqo_bin::server::Error> {
let args = neqo_bin::server::Args::parse();
neqo_bin::server::server(args).await
}

View File

@ -0,0 +1,316 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An [HTTP 0.9](https://www.w3.org/Protocols/HTTP/AsImplemented.html) client implementation.
use std::{
cell::RefCell,
collections::{HashMap, VecDeque},
fs::File,
io::{BufWriter, Write},
net::SocketAddr,
path::PathBuf,
rc::Rc,
time::Instant,
};
use neqo_common::{event::Provider, qdebug, qinfo, qwarn, Datagram};
use neqo_crypto::{AuthenticationStatus, ResumptionToken};
use neqo_transport::{
CloseReason, Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State,
StreamId, StreamType,
};
use url::Url;
use super::{get_output_file, qlog_new, Args, CloseState, Res};
pub struct Handler<'a> {
streams: HashMap<StreamId, Option<BufWriter<File>>>,
url_queue: VecDeque<Url>,
all_paths: Vec<PathBuf>,
args: &'a Args,
token: Option<ResumptionToken>,
needs_key_update: bool,
}
impl<'a> super::Handler for Handler<'a> {
type Client = Connection;
fn handle(&mut self, client: &mut Self::Client) -> Res<bool> {
while let Some(event) = client.next_event() {
if self.needs_key_update {
match client.initiate_key_update() {
Ok(()) => {
qdebug!("Keys updated");
self.needs_key_update = false;
self.download_urls(client);
}
Err(neqo_transport::Error::KeyUpdateBlocked) => (),
Err(e) => return Err(e.into()),
}
}
match event {
ConnectionEvent::AuthenticationNeeded => {
client.authenticated(AuthenticationStatus::Ok, Instant::now());
}
ConnectionEvent::RecvStreamReadable { stream_id } => {
self.read(client, stream_id)?;
}
ConnectionEvent::SendStreamWritable { stream_id } => {
qdebug!("stream {stream_id} writable");
}
ConnectionEvent::SendStreamComplete { stream_id } => {
qdebug!("stream {stream_id} complete");
}
ConnectionEvent::SendStreamCreatable { stream_type } => {
qdebug!("stream {stream_type:?} creatable");
if stream_type == StreamType::BiDi {
self.download_urls(client);
}
}
ConnectionEvent::StateChange(
State::WaitInitial | State::Handshaking | State::Connected,
) => {
qdebug!("{event:?}");
self.download_urls(client);
}
ConnectionEvent::ResumptionToken(token) => {
self.token = Some(token);
}
_ => {
qwarn!("Unhandled event {event:?}");
}
}
}
if !self.streams.is_empty() || !self.url_queue.is_empty() {
return Ok(false);
}
if self.args.resume && self.token.is_none() {
let Some(token) = client.take_resumption_token(Instant::now()) else {
return Ok(false);
};
self.token = Some(token);
}
Ok(true)
}
fn take_token(&mut self) -> Option<ResumptionToken> {
self.token.take()
}
}
pub(crate) fn create_client(
args: &Args,
local_addr: SocketAddr,
remote_addr: SocketAddr,
hostname: &str,
resumption_token: Option<ResumptionToken>,
) -> Res<Connection> {
let alpn = match args.shared.alpn.as_str() {
"hq-29" | "hq-30" | "hq-31" | "hq-32" => args.shared.alpn.as_str(),
_ => "hq-interop",
};
let mut client = Connection::new_client(
hostname,
&[alpn],
Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())),
local_addr,
remote_addr,
args.shared.quic_parameters.get(alpn),
Instant::now(),
)?;
if let Some(tok) = resumption_token {
client.enable_resumption(Instant::now(), tok)?;
}
let ciphers = args.get_ciphers();
if !ciphers.is_empty() {
client.set_ciphers(&ciphers)?;
}
client.set_qlog(qlog_new(args, hostname, client.odcid().unwrap())?);
Ok(client)
}
impl TryFrom<&State> for CloseState {
type Error = CloseReason;
fn try_from(value: &State) -> Result<Self, Self::Error> {
let (state, error) = match value {
State::Closing { error, .. } | State::Draining { error, .. } => {
(CloseState::Closing, error)
}
State::Closed(error) => (CloseState::Closed, error),
_ => return Ok(CloseState::NotClosing),
};
if error.is_error() {
Err(error.clone())
} else {
Ok(state)
}
}
}
impl super::Client for Connection {
fn process_output(&mut self, now: Instant) -> Output {
self.process_output(now)
}
fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant)
where
I: IntoIterator<Item = &'a Datagram>,
{
self.process_multiple_input(dgrams, now);
}
fn close<S>(&mut self, now: Instant, app_error: neqo_transport::AppError, msg: S)
where
S: AsRef<str> + std::fmt::Display,
{
if !self.state().closed() {
self.close(now, app_error, msg);
}
}
fn is_closed(&self) -> Result<CloseState, CloseReason> {
self.state().try_into()
}
fn stats(&self) -> neqo_transport::Stats {
self.stats()
}
fn has_events(&self) -> bool {
neqo_common::event::Provider::has_events(self)
}
}
impl<'b> Handler<'b> {
pub fn new(url_queue: VecDeque<Url>, args: &'b Args) -> Self {
Self {
streams: HashMap::new(),
url_queue,
all_paths: Vec::new(),
args,
token: None,
needs_key_update: args.key_update,
}
}
fn download_urls(&mut self, client: &mut Connection) {
loop {
if self.url_queue.is_empty() {
break;
}
if self.streams.len() >= self.args.concurrency {
break;
}
if !self.download_next(client) {
break;
}
}
}
fn download_next(&mut self, client: &mut Connection) -> bool {
if self.needs_key_update {
qdebug!("Deferring requests until after first key update");
return false;
}
let url = self
.url_queue
.pop_front()
.expect("download_next called with empty queue");
match client.stream_create(StreamType::BiDi) {
Ok(client_stream_id) => {
qinfo!("Created stream {client_stream_id} for {url}");
let req = format!("GET {}\r\n", url.path());
_ = client
.stream_send(client_stream_id, req.as_bytes())
.unwrap();
client.stream_close_send(client_stream_id).unwrap();
let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths);
self.streams.insert(client_stream_id, out_file);
true
}
Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => {
qwarn!("Cannot create stream {e:?}");
self.url_queue.push_front(url);
false
}
Err(e) => {
panic!("Error creating stream {e:?}");
}
}
}
/// Read and maybe print received data from a stream.
// Returns bool: was fin received?
fn read_from_stream(
client: &mut Connection,
stream_id: StreamId,
output_read_data: bool,
maybe_out_file: &mut Option<BufWriter<File>>,
) -> Res<bool> {
let mut data = vec![0; 4096];
loop {
let (sz, fin) = client.stream_recv(stream_id, &mut data)?;
if sz == 0 {
return Ok(fin);
}
if let Some(out_file) = maybe_out_file {
out_file.write_all(&data[..sz])?;
} else if !output_read_data {
qdebug!("READ[{stream_id}]: {sz} bytes");
} else {
qdebug!(
"READ[{}]: {}",
stream_id,
String::from_utf8(data.clone()).unwrap()
);
}
if fin {
return Ok(true);
}
}
}
fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> {
match self.streams.get_mut(&stream_id) {
None => {
qwarn!("Data on unexpected stream: {stream_id}");
return Ok(());
}
Some(maybe_out_file) => {
let fin_recvd = Self::read_from_stream(
client,
stream_id,
self.args.output_read_data,
maybe_out_file,
)?;
if fin_recvd {
if let Some(mut out_file) = maybe_out_file.take() {
out_file.flush()?;
} else {
qinfo!("<FIN[{stream_id}]>");
}
self.streams.remove(&stream_id);
self.download_urls(client);
}
}
}
Ok(())
}
}

View File

@ -0,0 +1,478 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An HTTP 3 client implementation.
use std::{
cell::RefCell,
collections::{HashMap, VecDeque},
fmt::Display,
fs::File,
io::{BufWriter, Write},
net::SocketAddr,
path::PathBuf,
rc::Rc,
time::Instant,
};
use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram, Header};
use neqo_crypto::{AuthenticationStatus, ResumptionToken};
use neqo_http3::{Error, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority};
use neqo_transport::{
AppError, CloseReason, Connection, EmptyConnectionIdGenerator, Error as TransportError, Output,
StreamId,
};
use url::Url;
use super::{get_output_file, qlog_new, Args, CloseState, Res};
pub(crate) struct Handler<'a> {
#[allow(
unknown_lints,
clippy::struct_field_names,
clippy::redundant_field_names
)]
url_handler: UrlHandler<'a>,
token: Option<ResumptionToken>,
output_read_data: bool,
}
impl<'a> Handler<'a> {
pub(crate) fn new(url_queue: VecDeque<Url>, args: &'a Args) -> Self {
let url_handler = UrlHandler {
url_queue,
stream_handlers: HashMap::new(),
all_paths: Vec::new(),
handler_type: if args.test.is_some() {
StreamHandlerType::Upload
} else {
StreamHandlerType::Download
},
args,
};
Self {
url_handler,
token: None,
output_read_data: args.output_read_data,
}
}
}
pub(crate) fn create_client(
args: &Args,
local_addr: SocketAddr,
remote_addr: SocketAddr,
hostname: &str,
resumption_token: Option<ResumptionToken>,
) -> Res<Http3Client> {
let mut transport = Connection::new_client(
hostname,
&[&args.shared.alpn],
Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())),
local_addr,
remote_addr,
args.shared.quic_parameters.get(args.shared.alpn.as_str()),
Instant::now(),
)?;
let ciphers = args.get_ciphers();
if !ciphers.is_empty() {
transport.set_ciphers(&ciphers)?;
}
let mut client = Http3Client::new_with_conn(
transport,
Http3Parameters::default()
.max_table_size_encoder(args.shared.max_table_size_encoder)
.max_table_size_decoder(args.shared.max_table_size_decoder)
.max_blocked_streams(args.shared.max_blocked_streams)
.max_concurrent_push_streams(args.max_concurrent_push_streams),
);
let qlog = qlog_new(args, hostname, client.connection_id())?;
client.set_qlog(qlog);
if let Some(ech) = &args.ech {
client.enable_ech(ech).expect("enable ECH");
}
if let Some(token) = resumption_token {
client
.enable_resumption(Instant::now(), token)
.expect("enable resumption");
}
Ok(client)
}
impl TryFrom<Http3State> for CloseState {
type Error = CloseReason;
fn try_from(value: Http3State) -> Result<Self, Self::Error> {
let (state, error) = match value {
Http3State::Closing(error) => (CloseState::Closing, error),
Http3State::Closed(error) => (CloseState::Closed, error),
_ => return Ok(CloseState::NotClosing),
};
if error.is_error() {
Err(error.clone())
} else {
Ok(state)
}
}
}
impl super::Client for Http3Client {
fn is_closed(&self) -> Result<CloseState, CloseReason> {
self.state().try_into()
}
fn process_output(&mut self, now: Instant) -> Output {
self.process_output(now)
}
fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant)
where
I: IntoIterator<Item = &'a Datagram>,
{
self.process_multiple_input(dgrams, now);
}
fn close<S>(&mut self, now: Instant, app_error: AppError, msg: S)
where
S: AsRef<str> + Display,
{
self.close(now, app_error, msg);
}
fn stats(&self) -> neqo_transport::Stats {
self.transport_stats()
}
fn has_events(&self) -> bool {
neqo_common::event::Provider::has_events(self)
}
}
impl<'a> super::Handler for Handler<'a> {
type Client = Http3Client;
fn handle(&mut self, client: &mut Http3Client) -> Res<bool> {
while let Some(event) = client.next_event() {
match event {
Http3ClientEvent::AuthenticationNeeded => {
client.authenticated(AuthenticationStatus::Ok, Instant::now());
}
Http3ClientEvent::HeaderReady {
stream_id,
headers,
fin,
..
} => {
if let Some(handler) = self.url_handler.stream_handler(stream_id) {
handler.process_header_ready(stream_id, fin, headers);
} else {
qwarn!("Data on unexpected stream: {stream_id}");
}
if fin {
self.url_handler.on_stream_fin(client, stream_id);
}
}
Http3ClientEvent::DataReadable { stream_id } => {
let mut stream_done = false;
match self.url_handler.stream_handler(stream_id) {
None => {
qwarn!("Data on unexpected stream: {stream_id}");
}
Some(handler) => loop {
let mut data = vec![0; 4096];
let (sz, fin) = client
.read_data(Instant::now(), stream_id, &mut data)
.expect("Read should succeed");
handler.process_data_readable(
stream_id,
fin,
data,
sz,
self.output_read_data,
)?;
if fin {
stream_done = true;
break;
}
if sz == 0 {
break;
}
},
}
if stream_done {
self.url_handler.on_stream_fin(client, stream_id);
}
}
Http3ClientEvent::DataWritable { stream_id } => {
match self.url_handler.stream_handler(stream_id) {
None => {
qwarn!("Data on unexpected stream: {stream_id}");
}
Some(handler) => {
handler.process_data_writable(client, stream_id);
}
}
}
Http3ClientEvent::StateChange(Http3State::Connected)
| Http3ClientEvent::RequestsCreatable => {
self.url_handler.process_urls(client);
}
Http3ClientEvent::ResumptionToken(t) => self.token = Some(t),
_ => {
qwarn!("Unhandled event {event:?}");
}
}
}
Ok(self.url_handler.done())
}
fn take_token(&mut self) -> Option<ResumptionToken> {
self.token.take()
}
}
trait StreamHandler {
fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec<Header>);
fn process_data_readable(
&mut self,
stream_id: StreamId,
fin: bool,
data: Vec<u8>,
sz: usize,
output_read_data: bool,
) -> Res<bool>;
fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId);
}
enum StreamHandlerType {
Download,
Upload,
}
impl StreamHandlerType {
fn make_handler(
handler_type: &Self,
url: &Url,
args: &Args,
all_paths: &mut Vec<PathBuf>,
client: &mut Http3Client,
client_stream_id: StreamId,
) -> Box<dyn StreamHandler> {
match handler_type {
Self::Download => {
let out_file = get_output_file(url, &args.output_dir, all_paths);
client.stream_close_send(client_stream_id).unwrap();
Box::new(DownloadStreamHandler { out_file })
}
Self::Upload => Box::new(UploadStreamHandler {
data: vec![42; args.upload_size],
offset: 0,
chunk_size: 32768,
start: Instant::now(),
}),
}
}
}
struct DownloadStreamHandler {
out_file: Option<BufWriter<File>>,
}
impl StreamHandler for DownloadStreamHandler {
fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec<Header>) {
if self.out_file.is_none() {
qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}");
}
}
fn process_data_readable(
&mut self,
stream_id: StreamId,
fin: bool,
data: Vec<u8>,
sz: usize,
output_read_data: bool,
) -> Res<bool> {
if let Some(out_file) = &mut self.out_file {
if sz > 0 {
out_file.write_all(&data[..sz])?;
}
return Ok(true);
} else if !output_read_data {
qdebug!("READ[{stream_id}]: {sz} bytes");
} else if let Ok(txt) = String::from_utf8(data.clone()) {
qdebug!("READ[{stream_id}]: {txt}");
} else {
qdebug!("READ[{}]: 0x{}", stream_id, hex(&data));
}
if fin {
if let Some(mut out_file) = self.out_file.take() {
out_file.flush()?;
} else {
qdebug!("<FIN[{stream_id}]>");
}
}
Ok(true)
}
fn process_data_writable(&mut self, _client: &mut Http3Client, _stream_id: StreamId) {}
}
struct UploadStreamHandler {
data: Vec<u8>,
offset: usize,
chunk_size: usize,
start: Instant,
}
impl StreamHandler for UploadStreamHandler {
fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec<Header>) {
qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}");
}
fn process_data_readable(
&mut self,
stream_id: StreamId,
_fin: bool,
data: Vec<u8>,
_sz: usize,
_output_read_data: bool,
) -> Res<bool> {
if let Ok(txt) = String::from_utf8(data.clone()) {
let trimmed_txt = txt.trim_end_matches(char::from(0));
let parsed: usize = trimmed_txt.parse().unwrap();
if parsed == self.data.len() {
let upload_time = Instant::now().duration_since(self.start);
qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}");
}
} else {
panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data));
}
Ok(true)
}
fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId) {
while self.offset < self.data.len() {
let end = self.offset + self.chunk_size.min(self.data.len() - self.offset);
let chunk = &self.data[self.offset..end];
match client.send_data(stream_id, chunk) {
Ok(amount) => {
if amount == 0 {
break;
}
self.offset += amount;
if self.offset == self.data.len() {
client.stream_close_send(stream_id).unwrap();
}
}
Err(_) => break,
};
}
}
}
struct UrlHandler<'a> {
url_queue: VecDeque<Url>,
stream_handlers: HashMap<StreamId, Box<dyn StreamHandler>>,
all_paths: Vec<PathBuf>,
handler_type: StreamHandlerType,
args: &'a Args,
}
impl<'a> UrlHandler<'a> {
fn stream_handler(&mut self, stream_id: StreamId) -> Option<&mut Box<dyn StreamHandler>> {
self.stream_handlers.get_mut(&stream_id)
}
fn process_urls(&mut self, client: &mut Http3Client) {
loop {
if self.url_queue.is_empty() {
break;
}
if self.stream_handlers.len() >= self.args.concurrency {
break;
}
if !self.next_url(client) {
break;
}
}
}
fn next_url(&mut self, client: &mut Http3Client) -> bool {
let url = self
.url_queue
.pop_front()
.expect("download_next called with empty queue");
match client.fetch(
Instant::now(),
&self.args.method,
&url,
&to_headers(&self.args.header),
Priority::default(),
) {
Ok(client_stream_id) => {
qdebug!("Successfully created stream id {client_stream_id} for {url}");
let handler: Box<dyn StreamHandler> = StreamHandlerType::make_handler(
&self.handler_type,
&url,
self.args,
&mut self.all_paths,
client,
client_stream_id,
);
self.stream_handlers.insert(client_stream_id, handler);
true
}
Err(
Error::TransportError(TransportError::StreamLimitError)
| Error::StreamLimitError
| Error::Unavailable,
) => {
self.url_queue.push_front(url);
false
}
Err(e) => {
panic!("Can't create stream {e}");
}
}
}
fn done(&mut self) -> bool {
self.stream_handlers.is_empty() && self.url_queue.is_empty()
}
fn on_stream_fin(&mut self, client: &mut Http3Client, stream_id: StreamId) {
self.stream_handlers.remove(&stream_id);
self.process_urls(client);
}
}
fn to_headers(values: &[impl AsRef<str>]) -> Vec<Header> {
values
.iter()
.scan(None, |state, value| {
if let Some(name) = state.take() {
*state = None;
Some(Header::new(name, value.as_ref()))
} else {
*state = Some(value.as_ref().to_string());
None
}
})
.collect()
}

View File

@ -0,0 +1,593 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{
collections::{HashMap, VecDeque},
fmt::{self, Display},
fs::{create_dir_all, File, OpenOptions},
io::{self, BufWriter},
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs},
path::PathBuf,
pin::Pin,
process::exit,
time::Instant,
};
use clap::Parser;
use futures::{
future::{select, Either},
FutureExt, TryFutureExt,
};
use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role};
use neqo_crypto::{
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
init, Cipher, ResumptionToken,
};
use neqo_http3::Output;
use neqo_transport::{AppError, CloseReason, ConnectionId, Version};
use qlog::{events::EventImportance, streamer::QlogStreamer};
use tokio::time::Sleep;
use url::{Origin, Url};
use crate::{udp, SharedArgs};
mod http09;
mod http3;
const BUFWRITER_BUFFER_SIZE: usize = 64 * 1024;
#[derive(Debug)]
pub enum Error {
ArgumentError(&'static str),
Http3Error(neqo_http3::Error),
IoError(io::Error),
QlogError,
TransportError(neqo_transport::Error),
ApplicationError(neqo_transport::AppError),
CryptoError(neqo_crypto::Error),
}
impl From<neqo_crypto::Error> for Error {
fn from(err: neqo_crypto::Error) -> Self {
Self::CryptoError(err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Self::IoError(err)
}
}
impl From<neqo_http3::Error> for Error {
fn from(err: neqo_http3::Error) -> Self {
Self::Http3Error(err)
}
}
impl From<qlog::Error> for Error {
fn from(_err: qlog::Error) -> Self {
Self::QlogError
}
}
impl From<neqo_transport::Error> for Error {
fn from(err: neqo_transport::Error) -> Self {
Self::TransportError(err)
}
}
impl From<neqo_transport::CloseReason> for Error {
fn from(err: neqo_transport::CloseReason) -> Self {
match err {
CloseReason::Transport(e) => Self::TransportError(e),
CloseReason::Application(e) => Self::ApplicationError(e),
}
}
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error: {self:?}")?;
Ok(())
}
}
impl std::error::Error for Error {}
type Res<T> = Result<T, Error>;
#[derive(Debug, Parser)]
#[command(author, version, about, long_about = None)]
#[allow(clippy::struct_excessive_bools)] // Not a good use of that lint.
pub struct Args {
#[command(flatten)]
shared: SharedArgs,
urls: Vec<Url>,
#[arg(short = 'm', default_value = "GET")]
method: String,
#[arg(short = 'H', long, number_of_values = 2)]
header: Vec<String>,
#[arg(name = "max-push", short = 'p', long, default_value = "10")]
max_concurrent_push_streams: u64,
#[arg(name = "download-in-series", long)]
/// Download resources in series using separate connections.
download_in_series: bool,
#[arg(name = "concurrency", long, default_value = "100")]
/// The maximum number of requests to have outstanding at one time.
concurrency: usize,
#[arg(name = "output-read-data", long)]
/// Output received data to stdout
output_read_data: bool,
#[arg(name = "output-dir", long)]
/// Save contents of fetched URLs to a directory
output_dir: Option<PathBuf>,
#[arg(short = 'r', long, hide = true)]
/// Client attempts to resume by making multiple connections to servers.
/// Requires that 2 or more URLs are listed for each server.
/// Use this for 0-RTT: the stack always attempts 0-RTT on resumption.
resume: bool,
#[arg(name = "key-update", long, hide = true)]
/// Attempt to initiate a key update immediately after confirming the connection.
key_update: bool,
#[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))]
/// Enable encrypted client hello (ECH).
/// This takes an encoded ECH configuration in hexadecimal format.
ech: Option<Vec<u8>>,
#[arg(name = "ipv4-only", short = '4', long)]
/// Connect only over IPv4
ipv4_only: bool,
#[arg(name = "ipv6-only", short = '6', long)]
/// Connect only over IPv6
ipv6_only: bool,
/// The test that this client will run. Currently, we only support "upload".
#[arg(name = "test", long)]
test: Option<String>,
/// The request size that will be used for upload test.
#[arg(name = "upload-size", long, default_value = "100")]
upload_size: usize,
/// Print connection stats after close.
#[arg(name = "stats", long)]
stats: bool,
}
impl Args {
#[must_use]
#[cfg(feature = "bench")]
#[allow(clippy::missing_panics_doc)]
pub fn new(requests: &[u64]) -> Self {
use std::str::FromStr;
Self {
shared: crate::SharedArgs::default(),
urls: requests
.iter()
.map(|r| Url::from_str(&format!("http://[::1]:12345/{r}")).unwrap())
.collect(),
method: "GET".into(),
header: vec![],
max_concurrent_push_streams: 10,
download_in_series: false,
concurrency: 100,
output_read_data: false,
output_dir: Some("/dev/null".into()),
resume: false,
key_update: false,
ech: None,
ipv4_only: false,
ipv6_only: false,
test: None,
upload_size: 100,
stats: false,
}
}
fn get_ciphers(&self) -> Vec<Cipher> {
self.shared
.ciphers
.iter()
.filter_map(|c| match c.as_str() {
"TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256),
"TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384),
"TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256),
_ => None,
})
.collect::<Vec<_>>()
}
fn update_for_tests(&mut self) {
let Some(testcase) = self.shared.qns_test.as_ref() else {
return;
};
if self.key_update {
qerror!("internal option key_update set by user");
exit(127)
}
if self.resume {
qerror!("internal option resume set by user");
exit(127)
}
// Only use v1 for most QNS tests.
self.shared.quic_parameters.quic_version = vec![Version::Version1];
match testcase.as_str() {
"http3" => {
if let Some(testcase) = &self.test {
if testcase.as_str() != "upload" {
qerror!("Unsupported test case: {testcase}");
exit(127)
}
self.method = String::from("POST");
}
}
"handshake" | "transfer" | "retry" | "ecn" => {
self.shared.use_old_http = true;
}
"zerortt" | "resumption" => {
if self.urls.len() < 2 {
qerror!("Warning: resumption tests won't work without >1 URL");
exit(127);
}
self.shared.use_old_http = true;
self.resume = true;
}
"multiconnect" => {
self.shared.use_old_http = true;
self.download_in_series = true;
}
"chacha20" => {
self.shared.use_old_http = true;
self.shared.ciphers.clear();
self.shared
.ciphers
.extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]);
}
"keyupdate" => {
self.shared.use_old_http = true;
self.key_update = true;
}
"v2" => {
self.shared.use_old_http = true;
// Use default version set for this test (which allows compatible vneg.)
self.shared.quic_parameters.quic_version.clear();
}
_ => exit(127),
}
}
}
fn get_output_file(
url: &Url,
output_dir: &Option<PathBuf>,
all_paths: &mut Vec<PathBuf>,
) -> Option<BufWriter<File>> {
if let Some(ref dir) = output_dir {
let mut out_path = dir.clone();
let url_path = if url.path() == "/" {
// If no path is given... call it "root"?
"root"
} else {
// Omit leading slash
&url.path()[1..]
};
out_path.push(url_path);
if all_paths.contains(&out_path) {
qerror!("duplicate path {}", out_path.display());
return None;
}
qinfo!("Saving {url} to {out_path:?}");
if let Some(parent) = out_path.parent() {
create_dir_all(parent).ok()?;
}
let f = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&out_path)
.ok()?;
all_paths.push(out_path);
Some(BufWriter::with_capacity(BUFWRITER_BUFFER_SIZE, f))
} else {
None
}
}
enum Ready {
Socket,
Timeout,
}
// Wait for the socket to be readable or the timeout to fire.
async fn ready(
socket: &udp::Socket,
mut timeout: Option<&mut Pin<Box<Sleep>>>,
) -> Result<Ready, io::Error> {
let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket);
let timeout_ready = timeout
.as_mut()
.map_or(Either::Right(futures::future::pending()), Either::Left)
.map(|()| Ok(Ready::Timeout));
select(socket_ready, timeout_ready).await.factor_first().0
}
/// Handles a given task on the provided [`Client`].
trait Handler {
type Client: Client;
fn handle(&mut self, client: &mut Self::Client) -> Res<bool>;
fn take_token(&mut self) -> Option<ResumptionToken>;
}
enum CloseState {
NotClosing,
Closing,
Closed,
}
/// Network client, e.g. [`neqo_transport::Connection`] or [`neqo_http3::Http3Client`].
trait Client {
fn process_output(&mut self, now: Instant) -> Output;
fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant)
where
I: IntoIterator<Item = &'a Datagram>;
fn has_events(&self) -> bool;
fn close<S>(&mut self, now: Instant, app_error: AppError, msg: S)
where
S: AsRef<str> + Display;
fn is_closed(&self) -> Result<CloseState, CloseReason>;
fn stats(&self) -> neqo_transport::Stats;
}
struct Runner<'a, H: Handler> {
local_addr: SocketAddr,
socket: &'a mut udp::Socket,
client: H::Client,
handler: H,
timeout: Option<Pin<Box<Sleep>>>,
args: &'a Args,
}
impl<'a, H: Handler> Runner<'a, H> {
async fn run(mut self) -> Res<Option<ResumptionToken>> {
loop {
let handler_done = self.handler.handle(&mut self.client)?;
self.process_output().await?;
if self.client.has_events() {
continue;
}
#[allow(clippy::match_same_arms)]
match (handler_done, self.client.is_closed()?) {
// more work
(false, _) => {}
// no more work, closing connection
(true, CloseState::NotClosing) => {
self.client.close(Instant::now(), 0, "kthxbye!");
continue;
}
// no more work, already closing connection
(true, CloseState::Closing) => {}
// no more work, connection closed, terminating
(true, CloseState::Closed) => break,
}
match ready(self.socket, self.timeout.as_mut()).await? {
Ready::Socket => self.process_multiple_input().await?,
Ready::Timeout => {
self.timeout = None;
}
}
}
if self.args.stats {
qinfo!("{:?}", self.client.stats());
}
Ok(self.handler.take_token())
}
async fn process_output(&mut self) -> Result<(), io::Error> {
loop {
match self.client.process_output(Instant::now()) {
Output::Datagram(dgram) => {
self.socket.writable().await?;
self.socket.send(&dgram)?;
}
Output::Callback(new_timeout) => {
qdebug!("Setting timeout of {:?}", new_timeout);
self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout)));
break;
}
Output::None => {
qdebug!("Output::None");
break;
}
}
}
Ok(())
}
async fn process_multiple_input(&mut self) -> Res<()> {
loop {
let dgrams = self.socket.recv(&self.local_addr)?;
if dgrams.is_empty() {
break;
}
self.client
.process_multiple_input(dgrams.iter(), Instant::now());
self.process_output().await?;
}
Ok(())
}
}
fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res<NeqoQlog> {
if let Some(qlog_dir) = &args.shared.qlog_dir {
let mut qlog_path = qlog_dir.clone();
let filename = format!("{hostname}-{cid}.sqlog");
qlog_path.push(filename);
let f = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&qlog_path)?;
let streamer = QlogStreamer::new(
qlog::QLOG_VERSION.to_string(),
Some("Example qlog".to_string()),
Some("Example qlog description".to_string()),
None,
std::time::Instant::now(),
common::qlog::new_trace(Role::Client),
EventImportance::Base,
Box::new(f),
);
Ok(NeqoQlog::enabled(streamer, qlog_path)?)
} else {
Ok(NeqoQlog::disabled())
}
}
pub async fn client(mut args: Args) -> Res<()> {
neqo_common::log::init(
args.shared
.verbose
.as_ref()
.map(clap_verbosity_flag::Verbosity::log_level_filter),
);
init()?;
args.update_for_tests();
init()?;
let urls_by_origin = args
.urls
.clone()
.into_iter()
.fold(HashMap::<Origin, VecDeque<Url>>::new(), |mut urls, url| {
urls.entry(url.origin()).or_default().push_back(url);
urls
})
.into_iter()
.filter_map(|(origin, urls)| match origin {
Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)),
Origin::Opaque(x) => {
qwarn!("Opaque origin {x:?}");
None
}
});
for ((host, port), mut urls) in urls_by_origin {
if args.resume && urls.len() < 2 {
qerror!("Resumption to {host} cannot work without at least 2 URLs.");
exit(127);
}
let remote_addr = format!("{host}:{port}").to_socket_addrs()?.find(|addr| {
!matches!(
(addr, args.ipv4_only, args.ipv6_only),
(SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false)
)
});
let Some(remote_addr) = remote_addr else {
qerror!("No compatible address found for: {host}");
exit(1);
};
let local_addr = match remote_addr {
SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0),
SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0),
};
let mut socket = udp::Socket::bind(local_addr)?;
let real_local = socket.local_addr().unwrap();
qinfo!(
"{} Client connecting: {:?} -> {:?}",
if args.shared.use_old_http { "H9" } else { "H3" },
real_local,
remote_addr,
);
let hostname = format!("{host}");
let mut token: Option<ResumptionToken> = None;
let mut first = true;
while !urls.is_empty() {
let to_request = if (args.resume && first) || args.download_in_series {
urls.pop_front().into_iter().collect()
} else {
std::mem::take(&mut urls)
};
first = false;
token = if args.shared.use_old_http {
let client =
http09::create_client(&args, real_local, remote_addr, &hostname, token)
.expect("failed to create client");
let handler = http09::Handler::new(to_request, &args);
Runner {
args: &args,
client,
handler,
local_addr: real_local,
socket: &mut socket,
timeout: None,
}
.run()
.await?
} else {
let client = http3::create_client(&args, real_local, remote_addr, &hostname, token)
.expect("failed to create client");
let handler = http3::Handler::new(to_request, &args);
Runner {
args: &args,
client,
handler,
local_addr: real_local,
socket: &mut socket,
timeout: None,
}
.run()
.await?
};
}
}
Ok(())
}

248
third_party/rust/neqo-bin/src/lib.rs vendored Normal file
View File

@ -0,0 +1,248 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(clippy::missing_panics_doc)]
#![allow(clippy::missing_errors_doc)]
use std::{
fmt::{self, Display},
net::{SocketAddr, ToSocketAddrs},
path::PathBuf,
time::Duration,
};
use clap::Parser;
use neqo_transport::{
tparams::PreferredAddress, CongestionControlAlgorithm, ConnectionParameters, StreamType,
Version,
};
pub mod client;
pub mod server;
pub mod udp;
#[derive(Debug, Parser)]
pub struct SharedArgs {
#[command(flatten)]
verbose: Option<clap_verbosity_flag::Verbosity>,
#[arg(short = 'a', long, default_value = "h3")]
/// ALPN labels to negotiate.
///
/// This client still only does HTTP/3 no matter what the ALPN says.
pub alpn: String,
#[arg(name = "qlog-dir", long, value_parser=clap::value_parser!(PathBuf))]
/// Enable QLOG logging and QLOG traces to this directory
pub qlog_dir: Option<PathBuf>,
#[arg(name = "encoder-table-size", long, default_value = "16384")]
pub max_table_size_encoder: u64,
#[arg(name = "decoder-table-size", long, default_value = "16384")]
pub max_table_size_decoder: u64,
#[arg(name = "max-blocked-streams", short = 'b', long, default_value = "10")]
pub max_blocked_streams: u16,
#[arg(short = 'c', long, number_of_values = 1)]
/// The set of TLS cipher suites to enable.
/// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`.
pub ciphers: Vec<String>,
#[arg(name = "qns-test", long)]
/// Enable special behavior for use with QUIC Network Simulator
pub qns_test: Option<String>,
#[arg(name = "use-old-http", short = 'o', long)]
/// Use http 0.9 instead of HTTP/3
pub use_old_http: bool,
#[command(flatten)]
pub quic_parameters: QuicParameters,
}
#[cfg(feature = "bench")]
impl Default for SharedArgs {
fn default() -> Self {
Self {
verbose: None,
alpn: "h3".into(),
qlog_dir: None,
max_table_size_encoder: 16384,
max_table_size_decoder: 16384,
max_blocked_streams: 10,
ciphers: vec![],
qns_test: None,
use_old_http: false,
quic_parameters: QuicParameters::default(),
}
}
}
#[derive(Debug, Parser)]
pub struct QuicParameters {
#[arg(
short = 'Q',
long,
num_args = 1..,
value_delimiter = ' ',
number_of_values = 1,
value_parser = from_str)]
/// A list of versions to support, in hex.
/// The first is the version to attempt.
/// Adding multiple values adds versions in order of preference.
/// If the first listed version appears in the list twice, the position
/// of the second entry determines the preference order of that version.
pub quic_version: Vec<Version>,
#[arg(long, default_value = "16")]
/// Set the `MAX_STREAMS_BIDI` limit.
pub max_streams_bidi: u64,
#[arg(long, default_value = "16")]
/// Set the `MAX_STREAMS_UNI` limit.
pub max_streams_uni: u64,
#[arg(long = "idle", default_value = "30")]
/// The idle timeout for connections, in seconds.
pub idle_timeout: u64,
#[arg(long = "cc", default_value = "newreno")]
/// The congestion controller to use.
pub congestion_control: CongestionControlAlgorithm,
#[arg(long = "no-pacing")]
/// Whether to disable pacing.
pub no_pacing: bool,
#[arg(name = "preferred-address-v4", long)]
/// An IPv4 address for the server preferred address.
pub preferred_address_v4: Option<String>,
#[arg(name = "preferred-address-v6", long)]
/// An IPv6 address for the server preferred address.
pub preferred_address_v6: Option<String>,
}
#[cfg(feature = "bench")]
impl Default for QuicParameters {
fn default() -> Self {
Self {
quic_version: vec![],
max_streams_bidi: 16,
max_streams_uni: 16,
idle_timeout: 30,
congestion_control: CongestionControlAlgorithm::NewReno,
no_pacing: false,
preferred_address_v4: None,
preferred_address_v6: None,
}
}
}
impl QuicParameters {
fn get_sock_addr<F>(opt: &Option<String>, v: &str, f: F) -> Option<SocketAddr>
where
F: FnMut(&SocketAddr) -> bool,
{
let addr = opt
.iter()
.filter_map(|spa| spa.to_socket_addrs().ok())
.flatten()
.find(f);
assert_eq!(
opt.is_some(),
addr.is_some(),
"unable to resolve '{}' to an {} address",
opt.as_ref().unwrap(),
v,
);
addr
}
#[must_use]
pub fn preferred_address_v4(&self) -> Option<SocketAddr> {
Self::get_sock_addr(&self.preferred_address_v4, "IPv4", SocketAddr::is_ipv4)
}
#[must_use]
pub fn preferred_address_v6(&self) -> Option<SocketAddr> {
Self::get_sock_addr(&self.preferred_address_v6, "IPv6", SocketAddr::is_ipv6)
}
#[must_use]
pub fn preferred_address(&self) -> Option<PreferredAddress> {
let v4 = self.preferred_address_v4();
let v6 = self.preferred_address_v6();
if v4.is_none() && v6.is_none() {
None
} else {
let v4 = v4.map(|v4| {
let SocketAddr::V4(v4) = v4 else {
unreachable!();
};
v4
});
let v6 = v6.map(|v6| {
let SocketAddr::V6(v6) = v6 else {
unreachable!();
};
v6
});
Some(PreferredAddress::new(v4, v6))
}
}
#[must_use]
pub fn get(&self, alpn: &str) -> ConnectionParameters {
let params = ConnectionParameters::default()
.max_streams(StreamType::BiDi, self.max_streams_bidi)
.max_streams(StreamType::UniDi, self.max_streams_uni)
.idle_timeout(Duration::from_secs(self.idle_timeout))
.cc_algorithm(self.congestion_control)
.pacing(!self.no_pacing);
if let Some(&first) = self.quic_version.first() {
let all = if self.quic_version[1..].contains(&first) {
&self.quic_version[1..]
} else {
&self.quic_version
};
params.versions(first, all.to_vec())
} else {
let version = match alpn {
"h3" | "hq-interop" => Version::Version1,
"h3-29" | "hq-29" => Version::Draft29,
"h3-30" | "hq-30" => Version::Draft30,
"h3-31" | "hq-31" => Version::Draft31,
"h3-32" | "hq-32" => Version::Draft32,
_ => Version::default(),
};
params.versions(version, Version::all())
}
}
}
fn from_str(s: &str) -> Result<Version, Error> {
let v = u32::from_str_radix(s, 16)
.map_err(|_| Error::Argument("versions need to be specified in hex"))?;
Version::try_from(v).map_err(|_| Error::Argument("unknown version"))
}
#[derive(Debug)]
pub enum Error {
Argument(&'static str),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error: {self:?}")?;
Ok(())
}
}
impl std::error::Error for Error {}

View File

@ -0,0 +1,253 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc, time::Instant};
use neqo_common::{event::Provider, hex, qdebug, qerror, qinfo, qwarn, Datagram};
use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay};
use neqo_http3::Error;
use neqo_transport::{
server::{ActiveConnectionRef, Server, ValidateAddress},
ConnectionEvent, ConnectionIdGenerator, Output, State, StreamId,
};
use regex::Regex;
use super::{qns_read_response, Args};
#[derive(Default)]
struct HttpStreamState {
writable: bool,
data_to_send: Option<(Vec<u8>, usize)>,
}
pub struct HttpServer {
server: Server,
write_state: HashMap<StreamId, HttpStreamState>,
read_state: HashMap<StreamId, Vec<u8>>,
is_qns_test: bool,
}
impl HttpServer {
pub fn new(
args: &Args,
anti_replay: AntiReplay,
cid_manager: Rc<RefCell<dyn ConnectionIdGenerator>>,
) -> Result<Self, Error> {
let mut server = Server::new(
args.now(),
&[args.key.clone()],
&[args.shared.alpn.clone()],
anti_replay,
Box::new(AllowZeroRtt {}),
cid_manager,
args.shared.quic_parameters.get(&args.shared.alpn),
)?;
server.set_ciphers(&args.get_ciphers());
server.set_qlog_dir(args.shared.qlog_dir.clone());
if args.retry {
server.set_validation(ValidateAddress::Always);
}
if args.ech {
let (sk, pk) = generate_ech_keys().expect("generate ECH keys");
server
.enable_ech(random::<1>()[0], "public.example", &sk, &pk)
.expect("enable ECH");
let cfg = server.ech_config();
qinfo!("ECHConfigList: {}", hex(cfg));
}
Ok(Self {
server,
write_state: HashMap::new(),
read_state: HashMap::new(),
is_qns_test: args.shared.qns_test.is_some(),
})
}
fn save_partial(
&mut self,
stream_id: StreamId,
partial: Vec<u8>,
conn: &mut ActiveConnectionRef,
) {
let url_dbg = String::from_utf8(partial.clone())
.unwrap_or_else(|_| format!("<invalid UTF-8: {}>", hex(&partial)));
if partial.len() < 4096 {
qdebug!("Saving partial URL: {}", url_dbg);
self.read_state.insert(stream_id, partial);
} else {
qdebug!("Giving up on partial URL {}", url_dbg);
conn.borrow_mut().stream_stop_sending(stream_id, 0).unwrap();
}
}
fn write(
&mut self,
stream_id: StreamId,
data: Option<Vec<u8>>,
conn: &mut ActiveConnectionRef,
) {
let resp = data.unwrap_or_else(|| Vec::from(&b"404 That request was nonsense\r\n"[..]));
if let Some(stream_state) = self.write_state.get_mut(&stream_id) {
match stream_state.data_to_send {
None => stream_state.data_to_send = Some((resp, 0)),
Some(_) => {
qdebug!("Data already set, doing nothing");
}
}
if stream_state.writable {
self.stream_writable(stream_id, conn);
}
} else {
self.write_state.insert(
stream_id,
HttpStreamState {
writable: false,
data_to_send: Some((resp, 0)),
},
);
}
}
fn stream_readable(&mut self, stream_id: StreamId, conn: &mut ActiveConnectionRef) {
if !stream_id.is_client_initiated() || !stream_id.is_bidi() {
qdebug!("Stream {} not client-initiated bidi, ignoring", stream_id);
return;
}
let mut data = vec![0; 4000];
let (sz, fin) = conn
.borrow_mut()
.stream_recv(stream_id, &mut data)
.expect("Read should succeed");
if sz == 0 {
if !fin {
qdebug!("size 0 but !fin");
}
return;
}
data.truncate(sz);
let buf = if let Some(mut existing) = self.read_state.remove(&stream_id) {
existing.append(&mut data);
existing
} else {
data
};
let Ok(msg) = std::str::from_utf8(&buf[..]) else {
self.save_partial(stream_id, buf, conn);
return;
};
let re = if self.is_qns_test {
Regex::new(r"GET +/(\S+)(?:\r)?\n").unwrap()
} else {
Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap()
};
let m = re.captures(msg);
let Some(path) = m.and_then(|m| m.get(1)) else {
self.save_partial(stream_id, buf, conn);
return;
};
let resp = {
let path = path.as_str();
qdebug!("Path = '{path}'");
if self.is_qns_test {
match qns_read_response(path) {
Ok(data) => Some(data),
Err(e) => {
qerror!("Failed to read {path}: {e}");
Some(b"404".to_vec())
}
}
} else {
let count = path.parse().unwrap();
Some(vec![b'a'; count])
}
};
self.write(stream_id, resp, conn);
}
fn stream_writable(&mut self, stream_id: StreamId, conn: &mut ActiveConnectionRef) {
match self.write_state.get_mut(&stream_id) {
None => {
qwarn!("Unknown stream {stream_id}, ignoring event");
}
Some(stream_state) => {
stream_state.writable = true;
if let Some((data, ref mut offset)) = &mut stream_state.data_to_send {
let sent = conn
.borrow_mut()
.stream_send(stream_id, &data[*offset..])
.unwrap();
qdebug!("Wrote {}", sent);
*offset += sent;
self.server.add_to_waiting(conn);
if *offset == data.len() {
qinfo!("Sent {sent} on {stream_id}, closing");
conn.borrow_mut().stream_close_send(stream_id).unwrap();
self.write_state.remove(&stream_id);
} else {
stream_state.writable = false;
}
}
}
}
}
}
impl super::HttpServer for HttpServer {
fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output {
self.server.process(dgram, now)
}
fn process_events(&mut self, now: Instant) {
let active_conns = self.server.active_connections();
for mut acr in active_conns {
loop {
let event = match acr.borrow_mut().next_event() {
None => break,
Some(e) => e,
};
match event {
ConnectionEvent::NewStream { stream_id } => {
self.write_state
.insert(stream_id, HttpStreamState::default());
}
ConnectionEvent::RecvStreamReadable { stream_id } => {
self.stream_readable(stream_id, &mut acr);
}
ConnectionEvent::SendStreamWritable { stream_id } => {
self.stream_writable(stream_id, &mut acr);
}
ConnectionEvent::StateChange(State::Connected) => {
acr.connection()
.borrow_mut()
.send_ticket(now, b"hi!")
.unwrap();
}
ConnectionEvent::StateChange(_)
| ConnectionEvent::SendStreamCreatable { .. }
| ConnectionEvent::SendStreamComplete { .. } => (),
e => qwarn!("unhandled event {e:?}"),
}
}
}
}
fn has_events(&self) -> bool {
self.server.has_active_connections()
}
}
impl Display for HttpServer {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Http 0.9 server ")
}
}

View File

@ -0,0 +1,244 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{
borrow::Cow,
cell::RefCell,
cmp::min,
collections::HashMap,
fmt::{self, Display},
rc::Rc,
time::Instant,
};
use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header};
use neqo_crypto::{generate_ech_keys, random, AntiReplay};
use neqo_http3::{
Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId,
};
use neqo_transport::{server::ValidateAddress, ConnectionIdGenerator};
use super::{qns_read_response, Args};
pub struct HttpServer {
server: Http3Server,
/// Progress writing to each stream.
remaining_data: HashMap<StreamId, ResponseData>,
posts: HashMap<Http3OrWebTransportStream, usize>,
is_qns_test: bool,
}
impl HttpServer {
const MESSAGE: &'static [u8] = &[0; 4096];
pub fn new(
args: &Args,
anti_replay: AntiReplay,
cid_mgr: Rc<RefCell<dyn ConnectionIdGenerator>>,
) -> Self {
let mut server = Http3Server::new(
args.now(),
&[args.key.clone()],
&[args.shared.alpn.clone()],
anti_replay,
cid_mgr,
Http3Parameters::default()
.connection_parameters(args.shared.quic_parameters.get(&args.shared.alpn))
.max_table_size_encoder(args.shared.max_table_size_encoder)
.max_table_size_decoder(args.shared.max_table_size_decoder)
.max_blocked_streams(args.shared.max_blocked_streams),
None,
)
.expect("We cannot make a server!");
server.set_ciphers(&args.get_ciphers());
server.set_qlog_dir(args.shared.qlog_dir.clone());
if args.retry {
server.set_validation(ValidateAddress::Always);
}
if args.ech {
let (sk, pk) = generate_ech_keys().expect("should create ECH keys");
server
.enable_ech(random::<1>()[0], "public.example", &sk, &pk)
.unwrap();
let cfg = server.ech_config();
qinfo!("ECHConfigList: {}", hex(cfg));
}
Self {
server,
remaining_data: HashMap::new(),
posts: HashMap::new(),
is_qns_test: args.shared.qns_test.is_some(),
}
}
}
impl Display for HttpServer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.server.fmt(f)
}
}
impl super::HttpServer for HttpServer {
fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> neqo_http3::Output {
self.server.process(dgram, now)
}
fn process_events(&mut self, _now: Instant) {
while let Some(event) = self.server.next_event() {
match event {
Http3ServerEvent::Headers {
mut stream,
headers,
fin,
} => {
qdebug!("Headers (request={stream} fin={fin}): {headers:?}");
if headers
.iter()
.any(|h| h.name() == ":method" && h.value() == "POST")
{
self.posts.insert(stream, 0);
continue;
}
let Some(path) = headers.iter().find(|&h| h.name() == ":path") else {
stream
.cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code())
.unwrap();
continue;
};
let mut response = if self.is_qns_test {
match qns_read_response(path.value()) {
Ok(data) => ResponseData::from(data),
Err(e) => {
qerror!("Failed to read {}: {e}", path.value());
stream
.send_headers(&[Header::new(":status", "404")])
.unwrap();
stream.stream_close_send().unwrap();
continue;
}
}
} else if let Ok(count) =
path.value().trim_matches(|p| p == '/').parse::<usize>()
{
ResponseData::repeat(Self::MESSAGE, count)
} else {
ResponseData::from(Self::MESSAGE)
};
stream
.send_headers(&[
Header::new(":status", "200"),
Header::new("content-length", response.remaining.to_string()),
])
.unwrap();
response.send(&mut stream);
if response.done() {
stream.stream_close_send().unwrap();
} else {
self.remaining_data.insert(stream.stream_id(), response);
}
}
Http3ServerEvent::DataWritable { mut stream } => {
if self.posts.get_mut(&stream).is_none() {
if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) {
remaining.send(&mut stream);
if remaining.done() {
self.remaining_data.remove(&stream.stream_id());
stream.stream_close_send().unwrap();
}
}
}
}
Http3ServerEvent::Data {
mut stream,
data,
fin,
} => {
if let Some(received) = self.posts.get_mut(&stream) {
*received += data.len();
}
if fin {
if let Some(received) = self.posts.remove(&stream) {
let msg = received.to_string().as_bytes().to_vec();
stream
.send_headers(&[Header::new(":status", "200")])
.unwrap();
stream.send_data(&msg).unwrap();
stream.stream_close_send().unwrap();
}
}
}
_ => {}
}
}
}
fn has_events(&self) -> bool {
self.server.has_events()
}
}
struct ResponseData {
data: Cow<'static, [u8]>,
offset: usize,
remaining: usize,
}
impl From<&[u8]> for ResponseData {
fn from(data: &[u8]) -> Self {
Self::from(data.to_vec())
}
}
impl From<Vec<u8>> for ResponseData {
fn from(data: Vec<u8>) -> Self {
let remaining = data.len();
Self {
data: Cow::Owned(data),
offset: 0,
remaining,
}
}
}
impl ResponseData {
fn repeat(buf: &'static [u8], total: usize) -> Self {
Self {
data: Cow::Borrowed(buf),
offset: 0,
remaining: total,
}
}
fn send(&mut self, stream: &mut Http3OrWebTransportStream) {
while self.remaining > 0 {
let end = min(self.data.len(), self.offset + self.remaining);
let slice = &self.data[self.offset..end];
match stream.send_data(slice) {
Ok(0) => {
return;
}
Ok(sent) => {
self.remaining -= sent;
self.offset = (self.offset + sent) % self.data.len();
}
Err(e) => {
qwarn!("Error writing to stream {}: {:?}", stream, e);
return;
}
}
}
}
fn done(&self) -> bool {
self.remaining == 0
}
}

View File

@ -0,0 +1,390 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{
cell::RefCell,
fmt::{self, Display},
fs, io,
net::{SocketAddr, ToSocketAddrs},
path::PathBuf,
pin::Pin,
process::exit,
rc::Rc,
time::{Duration, Instant},
};
use clap::Parser;
use futures::{
future::{select, select_all, Either},
FutureExt,
};
use neqo_common::{qdebug, qerror, qinfo, qwarn, Datagram};
use neqo_crypto::{
constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256},
init_db, AntiReplay, Cipher,
};
use neqo_transport::{Output, RandomConnectionIdGenerator, Version};
use tokio::time::Sleep;
use crate::{udp, SharedArgs};
const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10);
mod http09;
mod http3;
#[derive(Debug)]
pub enum Error {
ArgumentError(&'static str),
Http3Error(neqo_http3::Error),
IoError(io::Error),
QlogError,
TransportError(neqo_transport::Error),
CryptoError(neqo_crypto::Error),
}
impl From<neqo_crypto::Error> for Error {
fn from(err: neqo_crypto::Error) -> Self {
Self::CryptoError(err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Self::IoError(err)
}
}
impl From<neqo_http3::Error> for Error {
fn from(err: neqo_http3::Error) -> Self {
Self::Http3Error(err)
}
}
impl From<qlog::Error> for Error {
fn from(_err: qlog::Error) -> Self {
Self::QlogError
}
}
impl From<neqo_transport::Error> for Error {
fn from(err: neqo_transport::Error) -> Self {
Self::TransportError(err)
}
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error: {self:?}")?;
Ok(())
}
}
impl std::error::Error for Error {}
type Res<T> = Result<T, Error>;
#[derive(Debug, Parser)]
#[command(author, version, about, long_about = None)]
pub struct Args {
#[command(flatten)]
shared: SharedArgs,
/// List of IP:port to listen on
#[arg(default_value = "[::]:4433")]
hosts: Vec<String>,
#[arg(short = 'd', long, default_value = "./test-fixture/db")]
/// NSS database directory.
db: PathBuf,
#[arg(short = 'k', long, default_value = "key")]
/// Name of key from NSS database.
key: String,
#[arg(name = "retry", long)]
/// Force a retry
retry: bool,
#[arg(name = "ech", long)]
/// Enable encrypted client hello (ECH).
/// This generates a new set of ECH keys when it is invoked.
/// The resulting configuration is printed to stdout in hexadecimal format.
ech: bool,
}
#[cfg(feature = "bench")]
impl Default for Args {
fn default() -> Self {
use std::str::FromStr;
Self {
shared: crate::SharedArgs::default(),
hosts: vec!["[::]:12345".to_string()],
db: PathBuf::from_str("../test-fixture/db").unwrap(),
key: "key".to_string(),
retry: false,
ech: false,
}
}
}
impl Args {
fn get_ciphers(&self) -> Vec<Cipher> {
self.shared
.ciphers
.iter()
.filter_map(|c| match c.as_str() {
"TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256),
"TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384),
"TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256),
_ => None,
})
.collect::<Vec<_>>()
}
fn listen_addresses(&self) -> Vec<SocketAddr> {
self.hosts
.iter()
.filter_map(|host| host.to_socket_addrs().ok())
.flatten()
.chain(self.shared.quic_parameters.preferred_address_v4())
.chain(self.shared.quic_parameters.preferred_address_v6())
.collect()
}
fn now(&self) -> Instant {
if self.shared.qns_test.is_some() {
// When NSS starts its anti-replay it blocks any acceptance of 0-RTT for a
// single period. This ensures that an attacker that is able to force a
// server to reboot is unable to use that to flush the anti-replay buffers
// and have something replayed.
//
// However, this is a massive inconvenience for us when we are testing.
// As we can't initialize `AntiReplay` in the past (see `neqo_common::time`
// for why), fast forward time here so that the connections get times from
// in the future.
//
// This is NOT SAFE. Don't do this.
Instant::now() + ANTI_REPLAY_WINDOW
} else {
Instant::now()
}
}
}
fn qns_read_response(filename: &str) -> Result<Vec<u8>, io::Error> {
let path: PathBuf = ["/www", filename.trim_matches(|p| p == '/')]
.iter()
.collect();
fs::read(path)
}
#[allow(clippy::module_name_repetitions)]
pub trait HttpServer: Display {
fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output;
fn process_events(&mut self, now: Instant);
fn has_events(&self) -> bool;
}
#[allow(clippy::module_name_repetitions)]
pub struct ServerRunner {
now: Box<dyn Fn() -> Instant>,
server: Box<dyn HttpServer>,
timeout: Option<Pin<Box<Sleep>>>,
sockets: Vec<(SocketAddr, udp::Socket)>,
}
impl ServerRunner {
#[must_use]
pub fn new(
now: Box<dyn Fn() -> Instant>,
server: Box<dyn HttpServer>,
sockets: Vec<(SocketAddr, udp::Socket)>,
) -> Self {
Self {
now,
server,
timeout: None,
sockets,
}
}
/// Tries to find a socket, but then just falls back to sending from the first.
fn find_socket(&mut self, addr: SocketAddr) -> &mut udp::Socket {
let ((_host, first_socket), rest) = self.sockets.split_first_mut().unwrap();
rest.iter_mut()
.map(|(_host, socket)| socket)
.find(|socket| {
socket
.local_addr()
.ok()
.map_or(false, |socket_addr| socket_addr == addr)
})
.unwrap_or(first_socket)
}
async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> {
loop {
match self.server.process(dgram.take(), (self.now)()) {
Output::Datagram(dgram) => {
let socket = self.find_socket(dgram.source());
socket.writable().await?;
socket.send(&dgram)?;
}
Output::Callback(new_timeout) => {
qdebug!("Setting timeout of {:?}", new_timeout);
self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout)));
break;
}
Output::None => {
break;
}
}
}
Ok(())
}
// Wait for any of the sockets to be readable or the timeout to fire.
async fn ready(&mut self) -> Result<Ready, io::Error> {
let sockets_ready = select_all(
self.sockets
.iter()
.map(|(_host, socket)| Box::pin(socket.readable())),
)
.map(|(res, inx, _)| match res {
Ok(()) => Ok(Ready::Socket(inx)),
Err(e) => Err(e),
});
let timeout_ready = self
.timeout
.as_mut()
.map_or(Either::Right(futures::future::pending()), Either::Left)
.map(|()| Ok(Ready::Timeout));
select(sockets_ready, timeout_ready).await.factor_first().0
}
pub async fn run(mut self) -> Res<()> {
loop {
self.server.process_events((self.now)());
self.process(None).await?;
if self.server.has_events() {
continue;
}
match self.ready().await? {
Ready::Socket(inx) => loop {
let (host, socket) = self.sockets.get_mut(inx).unwrap();
let dgrams = socket.recv(host)?;
if dgrams.is_empty() {
break;
}
for dgram in dgrams {
self.process(Some(&dgram)).await?;
}
},
Ready::Timeout => {
self.timeout = None;
self.process(None).await?;
}
}
}
}
}
enum Ready {
Socket(usize),
Timeout,
}
pub async fn server(mut args: Args) -> Res<()> {
const HQ_INTEROP: &str = "hq-interop";
neqo_common::log::init(
args.shared
.verbose
.as_ref()
.map(clap_verbosity_flag::Verbosity::log_level_filter),
);
assert!(!args.key.is_empty(), "Need at least one key");
init_db(args.db.clone())?;
if let Some(testcase) = args.shared.qns_test.as_ref() {
if args.shared.quic_parameters.quic_version.is_empty() {
// Quic Interop Runner expects the server to support `Version1`
// only. Exceptions are testcases `versionnegotiation` (not yet
// implemented) and `v2`.
if testcase != "v2" {
args.shared.quic_parameters.quic_version = vec![Version::Version1];
}
} else {
qwarn!("Both -V and --qns-test were set. Ignoring testcase specific versions.");
}
// TODO: More options to deduplicate with client?
match testcase.as_str() {
"http3" => (),
"zerortt" => {
args.shared.use_old_http = true;
args.shared.alpn = String::from(HQ_INTEROP);
args.shared.quic_parameters.max_streams_bidi = 100;
}
"handshake" | "transfer" | "resumption" | "multiconnect" | "v2" | "ecn" => {
args.shared.use_old_http = true;
args.shared.alpn = String::from(HQ_INTEROP);
}
"chacha20" => {
args.shared.use_old_http = true;
args.shared.alpn = String::from(HQ_INTEROP);
args.shared.ciphers.clear();
args.shared
.ciphers
.extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]);
}
"retry" => {
args.shared.use_old_http = true;
args.shared.alpn = String::from(HQ_INTEROP);
args.retry = true;
}
_ => exit(127),
}
}
let hosts = args.listen_addresses();
if hosts.is_empty() {
qerror!("No valid hosts defined");
Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts"))?;
}
let sockets = hosts
.into_iter()
.map(|host| {
let socket = udp::Socket::bind(host)?;
let local_addr = socket.local_addr()?;
qinfo!("Server waiting for connection on: {local_addr:?}");
Ok((host, socket))
})
.collect::<Result<_, io::Error>>()?;
// Note: this is the exception to the case where we use `Args::now`.
let anti_replay = AntiReplay::new(Instant::now(), ANTI_REPLAY_WINDOW, 7, 14)
.expect("unable to setup anti-replay");
let cid_mgr = Rc::new(RefCell::new(RandomConnectionIdGenerator::new(10)));
let server: Box<dyn HttpServer> = if args.shared.use_old_http {
Box::new(
http09::HttpServer::new(&args, anti_replay, cid_mgr).expect("We cannot make a server!"),
)
} else {
Box::new(http3::HttpServer::new(&args, anti_replay, cid_mgr))
};
ServerRunner::new(Box::new(move || args.now()), server, sockets)
.run()
.await
}

218
third_party/rust/neqo-bin/src/udp.rs vendored Normal file
View File

@ -0,0 +1,218 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(clippy::missing_errors_doc)] // Functions simply delegate to tokio and quinn-udp.
#![allow(clippy::missing_panics_doc)] // Functions simply delegate to tokio and quinn-udp.
use std::{
io::{self, IoSliceMut},
net::{SocketAddr, ToSocketAddrs},
slice,
};
use neqo_common::{Datagram, IpTos};
use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState};
use tokio::io::Interest;
/// Socket receive buffer size.
///
/// Allows reading multiple datagrams in a single [`Socket::recv`] call.
const RECV_BUF_SIZE: usize = u16::MAX as usize;
pub struct Socket {
#[allow(unknown_lints)] // available with Rust v1.75
#[allow(clippy::struct_field_names)]
socket: tokio::net::UdpSocket,
state: UdpSocketState,
recv_buf: Vec<u8>,
}
impl Socket {
/// Calls [`std::net::UdpSocket::bind`] and instantiates [`quinn_udp::UdpSocketState`].
pub fn bind<A: ToSocketAddrs>(addr: A) -> Result<Self, io::Error> {
let socket = std::net::UdpSocket::bind(addr)?;
Ok(Self {
state: quinn_udp::UdpSocketState::new((&socket).into())?,
socket: tokio::net::UdpSocket::from_std(socket)?,
recv_buf: vec![0; RECV_BUF_SIZE],
})
}
/// See [`tokio::net::UdpSocket::local_addr`].
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.socket.local_addr()
}
/// See [`tokio::net::UdpSocket::writable`].
pub async fn writable(&self) -> Result<(), io::Error> {
self.socket.writable().await
}
/// See [`tokio::net::UdpSocket::readable`].
pub async fn readable(&self) -> Result<(), io::Error> {
self.socket.readable().await
}
/// Send the UDP datagram on the specified socket.
pub fn send(&self, d: &Datagram) -> io::Result<()> {
let transmit = Transmit {
destination: d.destination(),
ecn: EcnCodepoint::from_bits(Into::<u8>::into(d.tos())),
contents: d,
segment_size: None,
src_ip: None,
};
self.socket.try_io(Interest::WRITABLE, || {
self.state.send((&self.socket).into(), &transmit)
})?;
Ok(())
}
/// Receive a UDP datagram on the specified socket.
pub fn recv(&mut self, local_address: &SocketAddr) -> Result<Vec<Datagram>, io::Error> {
let mut meta = RecvMeta::default();
match self.socket.try_io(Interest::READABLE, || {
self.state.recv(
(&self.socket).into(),
&mut [IoSliceMut::new(&mut self.recv_buf)],
slice::from_mut(&mut meta),
)
}) {
Ok(n) => {
assert_eq!(n, 1, "only passed one slice");
}
Err(ref err)
if err.kind() == io::ErrorKind::WouldBlock
|| err.kind() == io::ErrorKind::Interrupted =>
{
return Ok(vec![])
}
Err(err) => {
return Err(err);
}
};
if meta.len == 0 {
eprintln!("zero length datagram received?");
return Ok(vec![]);
}
if meta.len == self.recv_buf.len() {
eprintln!(
"Might have received more than {} bytes",
self.recv_buf.len()
);
}
Ok(self.recv_buf[0..meta.len]
.chunks(meta.stride.min(self.recv_buf.len()))
.map(|d| {
Datagram::new(
meta.addr,
*local_address,
meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(),
None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749
d,
)
})
.collect())
}
}
#[cfg(test)]
mod tests {
use neqo_common::{IpTosDscp, IpTosEcn};
use super::*;
#[tokio::test]
async fn datagram_tos() -> Result<(), io::Error> {
let sender = Socket::bind("127.0.0.1:0")?;
let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
let mut receiver = Socket::bind(receiver_addr)?;
let datagram = Datagram::new(
sender.local_addr()?,
receiver.local_addr()?,
IpTos::from((IpTosDscp::Le, IpTosEcn::Ect1)),
None,
"Hello, world!".as_bytes().to_vec(),
);
sender.writable().await?;
sender.send(&datagram)?;
receiver.readable().await?;
let received_datagram = receiver
.recv(&receiver_addr)
.expect("receive to succeed")
.into_iter()
.next()
.expect("receive to yield datagram");
// Assert that the ECN is correct.
assert_eq!(
IpTosEcn::from(datagram.tos()),
IpTosEcn::from(received_datagram.tos())
);
Ok(())
}
/// Expect [`Socket::recv`] to handle multiple [`Datagram`]s on GRO read.
#[tokio::test]
#[cfg_attr(not(any(target_os = "linux", target_os = "windows")), ignore)]
async fn many_datagrams_through_gro() -> Result<(), io::Error> {
const SEGMENT_SIZE: usize = 128;
let sender = Socket::bind("127.0.0.1:0")?;
let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
let mut receiver = Socket::bind(receiver_addr)?;
// `neqo_common::udp::Socket::send` does not yet
// (https://github.com/mozilla/neqo/issues/1693) support GSO. Use
// `quinn_udp` directly.
let max_gso_segments = sender.state.max_gso_segments();
let msg = vec![0xAB; SEGMENT_SIZE * max_gso_segments];
let transmit = Transmit {
destination: receiver.local_addr()?,
ecn: EcnCodepoint::from_bits(Into::<u8>::into(IpTos::from((
IpTosDscp::Le,
IpTosEcn::Ect1,
)))),
contents: &msg,
segment_size: Some(SEGMENT_SIZE),
src_ip: None,
};
sender.writable().await?;
sender.socket.try_io(Interest::WRITABLE, || {
sender.state.send((&sender.socket).into(), &transmit)
})?;
// Allow for one GSO sendmmsg to result in multiple GRO recvmmsg.
let mut num_received = 0;
while num_received < max_gso_segments {
receiver.readable().await?;
receiver
.recv(&receiver_addr)
.expect("receive to succeed")
.into_iter()
.for_each(|d| {
assert_eq!(
SEGMENT_SIZE,
d.len(),
"Expect received datagrams to have same length as sent datagrams."
);
num_received += 1;
});
}
Ok(())
}
}

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"d0dcdff68dab18eb8770515e182ff2497d8cfa68e70b0633ab51bf2f96ac1dba","LICENSE-APACHE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","LICENSE-MIT":"4b2d0aca6789fa39e03d6738e869ea0988cceba210ca34ebb59c15c463e93a04","src/cmsg/mod.rs":"c5b3ffc33d05383894bb1aecfd6dce9f85162104b5158a2106ae5b9a13573926","src/cmsg/unix.rs":"138cd32f0861e81555e5da6e47de852594bb02c0b1b3ab7e4759dd51fdbfa80d","src/cmsg/windows.rs":"6fb936ec4a283efc5796872e777441e3039c40589073865644a8ef7936af4f4b","src/fallback.rs":"7fe9666b0bf508d1b5ec0b3690bb7add94c8f213cb51a263c9959e22a5094ad0","src/lib.rs":"f3abbcd52754786ea3a0fb7398253cee1cde952937c318a4a0471e9cda63b753","src/unix.rs":"ebf6a21859bc185b0850ba7b4348991f301c8bf15649f4794b7e1afd1c30df75","src/windows.rs":"7e89b0c8808a422dcbe2c190f0d357e1cd717c6f1e94742d64627f3cd7b8571b","tests/tests.rs":"8a4f9d4a17e12d4f88bfe729a500e91e065dcb9347052a8db0f86735a6639138"},"package":"cb7ad7bc932e4968523fa7d9c320ee135ff779de720e9350fee8728838551764"}

51
third_party/rust/quinn-udp/Cargo.toml vendored Normal file
View File

@ -0,0 +1,51 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.66"
name = "quinn-udp"
version = "0.5.0"
description = "UDP sockets with ECN information for the QUIC transport protocol"
keywords = ["quic"]
categories = [
"network-programming",
"asynchronous",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/quinn-rs/quinn"
[package.metadata.docs.rs]
all-features = true
[dependencies.libc]
version = "0.2.113"
[dependencies.socket2]
version = "0.5"
[dependencies.tracing]
version = "0.1.10"
[features]
default = ["log"]
log = ["tracing/log"]
[target."cfg(windows)".dependencies.once_cell]
version = "1.19.0"
[target."cfg(windows)".dependencies.windows-sys]
version = "0.52.0"
features = [
"Win32_Foundation",
"Win32_System_IO",
"Win32_Networking_WinSock",
]

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,7 @@
Copyright (c) 2018 The quinn Developers
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,142 @@
use std::{
ffi::{c_int, c_uchar},
mem, ptr,
};
#[cfg(unix)]
#[path = "unix.rs"]
mod imp;
#[cfg(windows)]
#[path = "windows.rs"]
mod imp;
pub(crate) use imp::Aligned;
/// Helper to encode a series of control messages (native "cmsgs") to a buffer for use in `sendmsg`
// like API.
///
/// The operation must be "finished" for the native msghdr to be usable, either by calling `finish`
/// explicitly or by dropping the `Encoder`.
pub(crate) struct Encoder<'a, M: MsgHdr> {
hdr: &'a mut M,
cmsg: Option<&'a mut M::ControlMessage>,
len: usize,
}
impl<'a, M: MsgHdr> Encoder<'a, M> {
/// # Safety
/// - `hdr` must contain a suitably aligned pointer to a big enough buffer to hold control messages
/// bytes. All bytes of this buffer can be safely written.
/// - The `Encoder` must be dropped before `hdr` is passed to a system call, and must not be leaked.
pub(crate) unsafe fn new(hdr: &'a mut M) -> Self {
Self {
cmsg: hdr.cmsg_first_hdr().as_mut(),
hdr,
len: 0,
}
}
/// Append a control message to the buffer.
///
/// # Panics
/// - If insufficient buffer space remains.
/// - If `T` has stricter alignment requirements than `M::ControlMessage`
pub(crate) fn push<T: Copy + ?Sized>(&mut self, level: c_int, ty: c_int, value: T) {
assert!(mem::align_of::<T>() <= mem::align_of::<M::ControlMessage>());
let space = M::ControlMessage::cmsg_space(mem::size_of_val(&value));
assert!(
self.hdr.control_len() >= self.len + space,
"control message buffer too small. Required: {}, Available: {}",
self.len + space,
self.hdr.control_len()
);
let cmsg = self.cmsg.take().expect("no control buffer space remaining");
cmsg.set(
level,
ty,
M::ControlMessage::cmsg_len(mem::size_of_val(&value)),
);
unsafe {
ptr::write(cmsg.cmsg_data() as *const T as *mut T, value);
}
self.len += space;
self.cmsg = unsafe { self.hdr.cmsg_nxt_hdr(cmsg).as_mut() };
}
/// Finishes appending control messages to the buffer
pub(crate) fn finish(self) {
// Delegates to the `Drop` impl
}
}
// Statically guarantees that the encoding operation is "finished" before the control buffer is read
// by `sendmsg` like API.
impl<'a, M: MsgHdr> Drop for Encoder<'a, M> {
fn drop(&mut self) {
self.hdr.set_control_len(self.len as _);
}
}
/// # Safety
///
/// `cmsg` must refer to a native cmsg containing a payload of type `T`
pub(crate) unsafe fn decode<T: Copy, C: CMsgHdr>(cmsg: &impl CMsgHdr) -> T {
assert!(mem::align_of::<T>() <= mem::align_of::<C>());
debug_assert_eq!(cmsg.len(), C::cmsg_len(mem::size_of::<T>()));
ptr::read(cmsg.cmsg_data() as *const T)
}
pub(crate) struct Iter<'a, M: MsgHdr> {
hdr: &'a M,
cmsg: Option<&'a M::ControlMessage>,
}
impl<'a, M: MsgHdr> Iter<'a, M> {
/// # Safety
///
/// `hdr` must hold a pointer to memory outliving `'a` which can be soundly read for the
/// lifetime of the constructed `Iter` and contains a buffer of native cmsgs, i.e. is aligned
// for native `cmsghdr`, is fully initialized, and has correct internal links.
pub(crate) unsafe fn new(hdr: &'a M) -> Self {
Self {
hdr,
cmsg: hdr.cmsg_first_hdr().as_ref(),
}
}
}
impl<'a, M: MsgHdr> Iterator for Iter<'a, M> {
type Item = &'a M::ControlMessage;
fn next(&mut self) -> Option<Self::Item> {
let current = self.cmsg.take()?;
self.cmsg = unsafe { self.hdr.cmsg_nxt_hdr(current).as_ref() };
Some(current)
}
}
// Helper traits for native types for control messages
pub(crate) trait MsgHdr {
type ControlMessage: CMsgHdr;
fn cmsg_first_hdr(&self) -> *mut Self::ControlMessage;
fn cmsg_nxt_hdr(&self, cmsg: &Self::ControlMessage) -> *mut Self::ControlMessage;
fn set_control_len(&mut self, len: usize);
fn control_len(&self) -> usize;
}
pub(crate) trait CMsgHdr {
fn cmsg_len(length: usize) -> usize;
fn cmsg_space(length: usize) -> usize;
fn cmsg_data(&self) -> *mut c_uchar;
fn set(&mut self, level: c_int, ty: c_int, len: usize);
fn len(&self) -> usize;
}

View File

@ -0,0 +1,53 @@
use std::ffi::{c_int, c_uchar};
use super::{CMsgHdr, MsgHdr};
#[derive(Copy, Clone)]
#[repr(align(8))] // Conservative bound for align_of<libc::cmsghdr>
pub(crate) struct Aligned<T>(pub(crate) T);
/// Helpers for [`libc::msghdr`]
impl MsgHdr for libc::msghdr {
type ControlMessage = libc::cmsghdr;
fn cmsg_first_hdr(&self) -> *mut Self::ControlMessage {
unsafe { libc::CMSG_FIRSTHDR(self) }
}
fn cmsg_nxt_hdr(&self, cmsg: &Self::ControlMessage) -> *mut Self::ControlMessage {
unsafe { libc::CMSG_NXTHDR(self, cmsg) }
}
fn set_control_len(&mut self, len: usize) {
self.msg_controllen = len as _;
}
fn control_len(&self) -> usize {
self.msg_controllen as _
}
}
/// Helpers for [`libc::cmsghdr`]
impl CMsgHdr for libc::cmsghdr {
fn cmsg_len(length: usize) -> usize {
unsafe { libc::CMSG_LEN(length as _) as usize }
}
fn cmsg_space(length: usize) -> usize {
unsafe { libc::CMSG_SPACE(length as _) as usize }
}
fn cmsg_data(&self) -> *mut c_uchar {
unsafe { libc::CMSG_DATA(self) }
}
fn set(&mut self, level: c_int, ty: c_int, len: usize) {
self.cmsg_level = level as _;
self.cmsg_type = ty as _;
self.cmsg_len = len as _;
}
fn len(&self) -> usize {
self.cmsg_len as _
}
}

View File

@ -0,0 +1,83 @@
use std::{
ffi::{c_int, c_uchar},
mem, ptr,
};
use windows_sys::Win32::Networking::WinSock;
use super::{CMsgHdr, MsgHdr};
#[derive(Copy, Clone)]
#[repr(align(8))] // Conservative bound for align_of<WinSock::CMSGHDR>
pub(crate) struct Aligned<T>(pub(crate) T);
/// Helpers for [`WinSock::WSAMSG`]
// https://learn.microsoft.com/en-us/windows/win32/api/ws2def/ns-ws2def-wsamsg
// https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/struct.WSAMSG.html
impl MsgHdr for WinSock::WSAMSG {
type ControlMessage = WinSock::CMSGHDR;
fn cmsg_first_hdr(&self) -> *mut Self::ControlMessage {
if self.Control.len as usize >= mem::size_of::<WinSock::CMSGHDR>() {
self.Control.buf as *mut WinSock::CMSGHDR
} else {
ptr::null_mut::<WinSock::CMSGHDR>()
}
}
fn cmsg_nxt_hdr(&self, cmsg: &Self::ControlMessage) -> *mut Self::ControlMessage {
let next =
(cmsg as *const _ as usize + cmsghdr_align(cmsg.cmsg_len)) as *mut WinSock::CMSGHDR;
let max = self.Control.buf as usize + self.Control.len as usize;
if unsafe { next.offset(1) } as usize > max {
ptr::null_mut()
} else {
next
}
}
fn set_control_len(&mut self, len: usize) {
self.Control.len = len as _;
}
fn control_len(&self) -> usize {
self.Control.len as _
}
}
/// Helpers for [`WinSock::CMSGHDR`]
// https://learn.microsoft.com/en-us/windows/win32/api/ws2def/ns-ws2def-wsacmsghdr
// https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/struct.CMSGHDR.html
impl CMsgHdr for WinSock::CMSGHDR {
fn cmsg_len(length: usize) -> usize {
cmsgdata_align(mem::size_of::<Self>()) + length
}
fn cmsg_space(length: usize) -> usize {
cmsgdata_align(mem::size_of::<Self>() + cmsghdr_align(length))
}
fn cmsg_data(&self) -> *mut c_uchar {
(self as *const _ as usize + cmsgdata_align(mem::size_of::<Self>())) as *mut c_uchar
}
fn set(&mut self, level: c_int, ty: c_int, len: usize) {
self.cmsg_level = level as _;
self.cmsg_type = ty as _;
self.cmsg_len = len as _;
}
fn len(&self) -> usize {
self.cmsg_len as _
}
}
// Helpers functions for `WinSock::WSAMSG` and `WinSock::CMSGHDR` are based on C macros from
// https://github.com/microsoft/win32metadata/blob/main/generation/WinSDK/RecompiledIdlHeaders/shared/ws2def.h#L741
fn cmsghdr_align(length: usize) -> usize {
(length + mem::align_of::<WinSock::CMSGHDR>() - 1) & !(mem::align_of::<WinSock::CMSGHDR>() - 1)
}
fn cmsgdata_align(length: usize) -> usize {
(length + mem::align_of::<usize>() - 1) & !(mem::align_of::<usize>() - 1)
}

View File

@ -0,0 +1,88 @@
use std::{
io::{self, IoSliceMut},
sync::Mutex,
time::Instant,
};
use super::{log_sendmsg_error, RecvMeta, Transmit, UdpSockRef, IO_ERROR_LOG_INTERVAL};
/// Fallback UDP socket interface that stubs out all special functionality
///
/// Used when a better implementation is not available for a particular target, at the cost of
/// reduced performance compared to that enabled by some target-specific interfaces.
#[derive(Debug)]
pub struct UdpSocketState {
last_send_error: Mutex<Instant>,
}
impl UdpSocketState {
pub fn new(socket: UdpSockRef<'_>) -> io::Result<Self> {
socket.0.set_nonblocking(true)?;
let now = Instant::now();
Ok(Self {
last_send_error: Mutex::new(now.checked_sub(2 * IO_ERROR_LOG_INTERVAL).unwrap_or(now)),
})
}
pub fn send(&self, socket: UdpSockRef<'_>, transmit: &Transmit<'_>) -> io::Result<()> {
let Err(e) = socket.0.send_to(
transmit.contents,
&socket2::SockAddr::from(transmit.destination),
) else {
return Ok(());
};
if e.kind() == io::ErrorKind::WouldBlock {
return Err(e);
}
// Other errors are ignored, since they will usually be handled
// by higher level retransmits and timeouts.
// - PermissionDenied errors have been observed due to iptable rules.
// Those are not fatal errors, since the
// configuration can be dynamically changed.
// - Destination unreachable errors have been observed for other
log_sendmsg_error(&self.last_send_error, e, transmit);
Ok(())
}
pub fn recv(
&self,
socket: UdpSockRef<'_>,
bufs: &mut [IoSliceMut<'_>],
meta: &mut [RecvMeta],
) -> io::Result<usize> {
// Safety: both `IoSliceMut` and `MaybeUninitSlice` promise to have the
// same layout, that of `iovec`/`WSABUF`. Furthermore `recv_vectored`
// promises to not write unitialised bytes to the `bufs` and pass it
// directly to the `recvmsg` system call, so this is safe.
let bufs = unsafe {
&mut *(bufs as *mut [IoSliceMut<'_>] as *mut [socket2::MaybeUninitSlice<'_>])
};
let (len, _flags, addr) = socket.0.recv_from_vectored(bufs)?;
meta[0] = RecvMeta {
len,
stride: len,
addr: addr.as_socket().unwrap(),
ecn: None,
dst_ip: None,
};
Ok(1)
}
#[inline]
pub fn max_gso_segments(&self) -> usize {
1
}
#[inline]
pub fn gro_segments(&self) -> usize {
1
}
#[inline]
pub fn may_fragment(&self) -> bool {
true
}
}
pub(crate) const BATCH_SIZE: usize = 1;

193
third_party/rust/quinn-udp/src/lib.rs vendored Normal file
View File

@ -0,0 +1,193 @@
//! Uniform interface to send and receive UDP packets with advanced features useful for QUIC
//!
//! This crate exposes kernel UDP stack features available on most modern systems which are required
//! for an efficient and conformant QUIC implementation. As of this writing, these are not available
//! in std or major async runtimes, and their niche character and complexity are a barrier to adding
//! them. Hence, a dedicated crate.
//!
//! Exposed features include:
//!
//! - Segmentation offload for bulk send and receive operations, reducing CPU load.
//! - Reporting the exact destination address of received packets and specifying explicit source
//! addresses for sent packets, allowing responses to be sent from the address that the peer
//! expects when there are multiple possibilities. This is common when bound to a wildcard address
//! in IPv6 due to [RFC 8981] temporary addresses.
//! - [Explicit Congestion Notification], which is required by QUIC to prevent packet loss and reduce
//! latency on congested links when supported by the network path.
//! - Disabled IP-layer fragmentation, which allows the true physical MTU to be detected and reduces
//! risk of QUIC packet loss.
//!
//! Some features are unavailable in some environments. This can be due to an outdated operating
//! system or drivers. Some operating systems may not implement desired features at all, or may not
//! yet be supported by the crate. When support is unavailable, functionality will gracefully
//! degrade.
//!
//! [RFC 8981]: https://www.rfc-editor.org/rfc/rfc8981.html
//! [Explicit Congestion Notification]: https://www.rfc-editor.org/rfc/rfc3168.html
#![warn(unreachable_pub)]
#![warn(clippy::use_self)]
#[cfg(unix)]
use std::os::unix::io::AsFd;
#[cfg(windows)]
use std::os::windows::io::AsSocket;
use std::{
net::{IpAddr, Ipv6Addr, SocketAddr},
sync::Mutex,
time::{Duration, Instant},
};
use tracing::warn;
#[cfg(any(unix, windows))]
mod cmsg;
#[cfg(unix)]
#[path = "unix.rs"]
mod imp;
#[cfg(windows)]
#[path = "windows.rs"]
mod imp;
// No ECN support
#[cfg(not(any(unix, windows)))]
#[path = "fallback.rs"]
mod imp;
pub use imp::UdpSocketState;
/// Number of UDP packets to send/receive at a time
pub const BATCH_SIZE: usize = imp::BATCH_SIZE;
/// Metadata for a single buffer filled with bytes received from the network
///
/// This associated buffer can contain one or more datagrams, see [`stride`].
///
/// [`stride`]: RecvMeta::stride
#[derive(Debug, Copy, Clone)]
pub struct RecvMeta {
/// The source address of the datagram(s) contained in the buffer
pub addr: SocketAddr,
/// The number of bytes the associated buffer has
pub len: usize,
/// The size of a single datagram in the associated buffer
///
/// When GRO (Generic Receive Offload) is used this indicates the size of a single
/// datagram inside the buffer. If the buffer is larger, that is if [`len`] is greater
/// then this value, then the individual datagrams contained have their boundaries at
/// `stride` increments from the start. The last datagram could be smaller than
/// `stride`.
///
/// [`len`]: RecvMeta::len
pub stride: usize,
/// The Explicit Congestion Notification bits for the datagram(s) in the buffer
pub ecn: Option<EcnCodepoint>,
/// The destination IP address which was encoded in this datagram
pub dst_ip: Option<IpAddr>,
}
impl Default for RecvMeta {
/// Constructs a value with arbitrary fields, intended to be overwritten
fn default() -> Self {
Self {
addr: SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0),
len: 0,
stride: 0,
ecn: None,
dst_ip: None,
}
}
}
/// An outgoing packet
#[derive(Debug, Clone)]
pub struct Transmit<'a> {
/// The socket this datagram should be sent to
pub destination: SocketAddr,
/// Explicit congestion notification bits to set on the packet
pub ecn: Option<EcnCodepoint>,
/// Contents of the datagram
pub contents: &'a [u8],
/// The segment size if this transmission contains multiple datagrams.
/// This is `None` if the transmit only contains a single datagram
pub segment_size: Option<usize>,
/// Optional source IP address for the datagram
pub src_ip: Option<IpAddr>,
}
/// Log at most 1 IO error per minute
const IO_ERROR_LOG_INTERVAL: Duration = std::time::Duration::from_secs(60);
/// Logs a warning message when sendmsg fails
///
/// Logging will only be performed if at least [`IO_ERROR_LOG_INTERVAL`]
/// has elapsed since the last error was logged.
fn log_sendmsg_error(
last_send_error: &Mutex<Instant>,
err: impl core::fmt::Debug,
transmit: &Transmit,
) {
let now = Instant::now();
let last_send_error = &mut *last_send_error.lock().expect("poisend lock");
if now.saturating_duration_since(*last_send_error) > IO_ERROR_LOG_INTERVAL {
*last_send_error = now;
warn!(
"sendmsg error: {:?}, Transmit: {{ destination: {:?}, src_ip: {:?}, enc: {:?}, len: {:?}, segment_size: {:?} }}",
err, transmit.destination, transmit.src_ip, transmit.ecn, transmit.contents.len(), transmit.segment_size);
}
}
/// A borrowed UDP socket
///
/// On Unix, constructible via `From<T: AsRawFd>`. On Windows, constructible via `From<T:
/// AsRawSocket>`.
// Wrapper around socket2 to avoid making it a public dependency and incurring stability risk
pub struct UdpSockRef<'a>(socket2::SockRef<'a>);
#[cfg(unix)]
impl<'s, S> From<&'s S> for UdpSockRef<'s>
where
S: AsFd,
{
fn from(socket: &'s S) -> Self {
Self(socket.into())
}
}
#[cfg(windows)]
impl<'s, S> From<&'s S> for UdpSockRef<'s>
where
S: AsSocket,
{
fn from(socket: &'s S) -> Self {
Self(socket.into())
}
}
/// Explicit congestion notification codepoint
#[repr(u8)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum EcnCodepoint {
#[doc(hidden)]
Ect0 = 0b10,
#[doc(hidden)]
Ect1 = 0b01,
#[doc(hidden)]
Ce = 0b11,
}
impl EcnCodepoint {
/// Create new object from the given bits
pub fn from_bits(x: u8) -> Option<Self> {
use self::EcnCodepoint::*;
Some(match x & 0b11 {
0b10 => Ect0,
0b01 => Ect1,
0b11 => Ce,
_ => {
return None;
}
})
}
}

775
third_party/rust/quinn-udp/src/unix.rs vendored Normal file
View File

@ -0,0 +1,775 @@
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
use std::ptr;
use std::{
io::{self, IoSliceMut},
mem::{self, MaybeUninit},
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
os::unix::io::AsRawFd,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Mutex,
},
time::Instant,
};
use socket2::SockRef;
use super::{
cmsg, log_sendmsg_error, EcnCodepoint, RecvMeta, Transmit, UdpSockRef, IO_ERROR_LOG_INTERVAL,
};
#[cfg(target_os = "freebsd")]
type IpTosTy = libc::c_uchar;
#[cfg(not(target_os = "freebsd"))]
type IpTosTy = libc::c_int;
/// Tokio-compatible UDP socket with some useful specializations.
///
/// Unlike a standard tokio UDP socket, this allows ECN bits to be read and written on some
/// platforms.
#[derive(Debug)]
pub struct UdpSocketState {
last_send_error: Mutex<Instant>,
max_gso_segments: AtomicUsize,
gro_segments: usize,
may_fragment: bool,
/// True if we have received EINVAL error from `sendmsg` or `sendmmsg` system call at least once.
///
/// If enabled, we assume that old kernel is used and switch to fallback mode.
/// In particular, we do not use IP_TOS cmsg_type in this case,
/// which is not supported on Linux <3.13 and results in not sending the UDP packet at all.
sendmsg_einval: AtomicBool,
}
impl UdpSocketState {
pub fn new(sock: UdpSockRef<'_>) -> io::Result<Self> {
let io = sock.0;
let mut cmsg_platform_space = 0;
if cfg!(target_os = "linux")
|| cfg!(target_os = "freebsd")
|| cfg!(target_os = "macos")
|| cfg!(target_os = "ios")
|| cfg!(target_os = "android")
{
cmsg_platform_space +=
unsafe { libc::CMSG_SPACE(mem::size_of::<libc::in6_pktinfo>() as _) as usize };
}
assert!(
CMSG_LEN
>= unsafe { libc::CMSG_SPACE(mem::size_of::<libc::c_int>() as _) as usize }
+ cmsg_platform_space
);
assert!(
mem::align_of::<libc::cmsghdr>() <= mem::align_of::<cmsg::Aligned<[u8; 0]>>(),
"control message buffers will be misaligned"
);
io.set_nonblocking(true)?;
let addr = io.local_addr()?;
let is_ipv4 = addr.family() == libc::AF_INET as libc::sa_family_t;
// mac and ios do not support IP_RECVTOS on dual-stack sockets :(
// older macos versions also don't have the flag and will error out if we don't ignore it
if is_ipv4 || !io.only_v6()? {
if let Err(err) = set_socket_option(&*io, libc::IPPROTO_IP, libc::IP_RECVTOS, OPTION_ON)
{
tracing::debug!("Ignoring error setting IP_RECVTOS on socket: {err:?}",);
}
}
let mut may_fragment = false;
#[cfg(any(target_os = "linux", target_os = "android"))]
{
// opportunistically try to enable GRO. See gro::gro_segments().
#[cfg(target_os = "linux")]
let _ = set_socket_option(&*io, libc::SOL_UDP, libc::UDP_GRO, OPTION_ON);
// Forbid IPv4 fragmentation. Set even for IPv6 to account for IPv6 mapped IPv4 addresses.
// Set `may_fragment` to `true` if this option is not supported on the platform.
may_fragment |= !set_socket_option_supported(
&*io,
libc::IPPROTO_IP,
libc::IP_MTU_DISCOVER,
libc::IP_PMTUDISC_PROBE,
)?;
if is_ipv4 {
set_socket_option(&*io, libc::IPPROTO_IP, libc::IP_PKTINFO, OPTION_ON)?;
} else {
// Set `may_fragment` to `true` if this option is not supported on the platform.
may_fragment |= !set_socket_option_supported(
&*io,
libc::IPPROTO_IPV6,
libc::IPV6_MTU_DISCOVER,
libc::IP_PMTUDISC_PROBE,
)?;
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "ios"))]
{
if is_ipv4 {
// Set `may_fragment` to `true` if this option is not supported on the platform.
may_fragment |= !set_socket_option_supported(
&*io,
libc::IPPROTO_IP,
libc::IP_DONTFRAG,
OPTION_ON,
)?;
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "ios"))]
// IP_RECVDSTADDR == IP_SENDSRCADDR on FreeBSD
// macOS uses only IP_RECVDSTADDR, no IP_SENDSRCADDR on macOS
// macOS also supports IP_PKTINFO
{
if is_ipv4 {
set_socket_option(&*io, libc::IPPROTO_IP, libc::IP_RECVDSTADDR, OPTION_ON)?;
}
}
// Options standardized in RFC 3542
if !is_ipv4 {
set_socket_option(&*io, libc::IPPROTO_IPV6, libc::IPV6_RECVPKTINFO, OPTION_ON)?;
set_socket_option(&*io, libc::IPPROTO_IPV6, libc::IPV6_RECVTCLASS, OPTION_ON)?;
// Linux's IP_PMTUDISC_PROBE allows us to operate under interface MTU rather than the
// kernel's path MTU guess, but actually disabling fragmentation requires this too. See
// __ip6_append_data in ip6_output.c.
// Set `may_fragment` to `true` if this option is not supported on the platform.
may_fragment |= !set_socket_option_supported(
&*io,
libc::IPPROTO_IPV6,
libc::IPV6_DONTFRAG,
OPTION_ON,
)?;
}
let now = Instant::now();
Ok(Self {
last_send_error: Mutex::new(now.checked_sub(2 * IO_ERROR_LOG_INTERVAL).unwrap_or(now)),
max_gso_segments: AtomicUsize::new(gso::max_gso_segments()),
gro_segments: gro::gro_segments(),
may_fragment,
sendmsg_einval: AtomicBool::new(false),
})
}
pub fn send(&self, socket: UdpSockRef<'_>, transmit: &Transmit<'_>) -> io::Result<()> {
send(self, socket.0, transmit)
}
pub fn recv(
&self,
socket: UdpSockRef<'_>,
bufs: &mut [IoSliceMut<'_>],
meta: &mut [RecvMeta],
) -> io::Result<usize> {
recv(socket.0, bufs, meta)
}
/// The maximum amount of segments which can be transmitted if a platform
/// supports Generic Send Offload (GSO).
///
/// This is 1 if the platform doesn't support GSO. Subject to change if errors are detected
/// while using GSO.
#[inline]
pub fn max_gso_segments(&self) -> usize {
self.max_gso_segments.load(Ordering::Relaxed)
}
/// The number of segments to read when GRO is enabled. Used as a factor to
/// compute the receive buffer size.
///
/// Returns 1 if the platform doesn't support GRO.
#[inline]
pub fn gro_segments(&self) -> usize {
self.gro_segments
}
/// Whether transmitted datagrams might get fragmented by the IP layer
///
/// Returns `false` on targets which employ e.g. the `IPV6_DONTFRAG` socket option.
#[inline]
pub fn may_fragment(&self) -> bool {
self.may_fragment
}
/// Returns true if we previously got an EINVAL error from `sendmsg` or `sendmmsg` syscall.
fn sendmsg_einval(&self) -> bool {
self.sendmsg_einval.load(Ordering::Relaxed)
}
/// Sets the flag indicating we got EINVAL error from `sendmsg` or `sendmmsg` syscall.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
fn set_sendmsg_einval(&self) {
self.sendmsg_einval.store(true, Ordering::Relaxed)
}
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
fn send(
#[allow(unused_variables)] // only used on Linux
state: &UdpSocketState,
io: SockRef<'_>,
transmit: &Transmit<'_>,
) -> io::Result<()> {
#[allow(unused_mut)] // only mutable on FreeBSD
let mut encode_src_ip = true;
#[cfg(target_os = "freebsd")]
{
let addr = io.local_addr()?;
let is_ipv4 = addr.family() == libc::AF_INET as libc::sa_family_t;
if is_ipv4 {
if let Some(socket) = addr.as_socket_ipv4() {
encode_src_ip = socket.ip() == &Ipv4Addr::UNSPECIFIED;
}
}
}
let mut msg_hdr: libc::msghdr = unsafe { mem::zeroed() };
let mut iovec: libc::iovec = unsafe { mem::zeroed() };
let mut cmsgs = cmsg::Aligned([0u8; CMSG_LEN]);
let dst_addr = socket2::SockAddr::from(transmit.destination);
prepare_msg(
transmit,
&dst_addr,
&mut msg_hdr,
&mut iovec,
&mut cmsgs,
encode_src_ip,
state.sendmsg_einval(),
);
loop {
let n = unsafe { libc::sendmsg(io.as_raw_fd(), &msg_hdr, 0) };
if n == -1 {
let e = io::Error::last_os_error();
match e.kind() {
io::ErrorKind::Interrupted => {
// Retry the transmission
continue;
}
io::ErrorKind::WouldBlock => return Err(e),
_ => {
// Some network adapters and drivers do not support GSO. Unfortunately, Linux
// offers no easy way for us to detect this short of an EIO or sometimes EINVAL
// when we try to actually send datagrams using it.
#[cfg(target_os = "linux")]
if let Some(libc::EIO) | Some(libc::EINVAL) = e.raw_os_error() {
// Prevent new transmits from being scheduled using GSO. Existing GSO transmits
// may already be in the pipeline, so we need to tolerate additional failures.
if state.max_gso_segments() > 1 {
tracing::error!("got transmit error, halting segmentation offload");
state
.max_gso_segments
.store(1, std::sync::atomic::Ordering::Relaxed);
}
}
if e.raw_os_error() == Some(libc::EINVAL) {
// Some arguments to `sendmsg` are not supported.
// Switch to fallback mode.
state.set_sendmsg_einval();
}
// Other errors are ignored, since they will usually be handled
// by higher level retransmits and timeouts.
// - PermissionDenied errors have been observed due to iptable rules.
// Those are not fatal errors, since the
// configuration can be dynamically changed.
// - Destination unreachable errors have been observed for other
// - EMSGSIZE is expected for MTU probes. Future work might be able to avoid
// these by automatically clamping the MTUD upper bound to the interface MTU.
if e.raw_os_error() != Some(libc::EMSGSIZE) {
log_sendmsg_error(&state.last_send_error, e, transmit);
}
return Ok(());
}
}
}
return Ok(());
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn send(state: &UdpSocketState, io: SockRef<'_>, transmit: &Transmit<'_>) -> io::Result<()> {
let mut hdr: libc::msghdr = unsafe { mem::zeroed() };
let mut iov: libc::iovec = unsafe { mem::zeroed() };
let mut ctrl = cmsg::Aligned([0u8; CMSG_LEN]);
let addr = socket2::SockAddr::from(transmit.destination);
prepare_msg(
transmit,
&addr,
&mut hdr,
&mut iov,
&mut ctrl,
// Only tested on macOS and iOS
cfg!(target_os = "macos") || cfg!(target_os = "ios"),
state.sendmsg_einval(),
);
let n = unsafe { libc::sendmsg(io.as_raw_fd(), &hdr, 0) };
if n == -1 {
let e = io::Error::last_os_error();
match e.kind() {
io::ErrorKind::Interrupted => {
// Retry the transmission
}
io::ErrorKind::WouldBlock => return Err(e),
_ => {
// Other errors are ignored, since they will usually be handled
// by higher level retransmits and timeouts.
// - PermissionDenied errors have been observed due to iptable rules.
// Those are not fatal errors, since the
// configuration can be dynamically changed.
// - Destination unreachable errors have been observed for other
// - EMSGSIZE is expected for MTU probes. Future work might be able to avoid
// these by automatically clamping the MTUD upper bound to the interface MTU.
if e.raw_os_error() != Some(libc::EMSGSIZE) {
log_sendmsg_error(&state.last_send_error, e, transmit);
}
}
}
}
Ok(())
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> io::Result<usize> {
let mut names = [MaybeUninit::<libc::sockaddr_storage>::uninit(); BATCH_SIZE];
let mut ctrls = [cmsg::Aligned(MaybeUninit::<[u8; CMSG_LEN]>::uninit()); BATCH_SIZE];
let mut hdrs = unsafe { mem::zeroed::<[libc::mmsghdr; BATCH_SIZE]>() };
let max_msg_count = bufs.len().min(BATCH_SIZE);
for i in 0..max_msg_count {
prepare_recv(
&mut bufs[i],
&mut names[i],
&mut ctrls[i],
&mut hdrs[i].msg_hdr,
);
}
let msg_count = loop {
let n = unsafe {
recvmmsg_with_fallback(
io.as_raw_fd(),
hdrs.as_mut_ptr(),
bufs.len().min(BATCH_SIZE) as _,
)
};
if n == -1 {
let e = io::Error::last_os_error();
if e.kind() == io::ErrorKind::Interrupted {
continue;
}
return Err(e);
}
break n;
};
for i in 0..(msg_count as usize) {
meta[i] = decode_recv(&names[i], &hdrs[i].msg_hdr, hdrs[i].msg_len as usize);
}
Ok(msg_count as usize)
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> io::Result<usize> {
let mut name = MaybeUninit::<libc::sockaddr_storage>::uninit();
let mut ctrl = cmsg::Aligned(MaybeUninit::<[u8; CMSG_LEN]>::uninit());
let mut hdr = unsafe { mem::zeroed::<libc::msghdr>() };
prepare_recv(&mut bufs[0], &mut name, &mut ctrl, &mut hdr);
let n = loop {
let n = unsafe { libc::recvmsg(io.as_raw_fd(), &mut hdr, 0) };
if n == -1 {
let e = io::Error::last_os_error();
if e.kind() == io::ErrorKind::Interrupted {
continue;
}
return Err(e);
}
if hdr.msg_flags & libc::MSG_TRUNC != 0 {
continue;
}
break n;
};
meta[0] = decode_recv(&name, &hdr, n as usize);
Ok(1)
}
/// Implementation of `recvmmsg` with a fallback
/// to `recvmsg` if syscall is not available.
///
/// It uses [`libc::syscall`] instead of [`libc::recvmmsg`]
/// to avoid linking error on systems where libc does not contain `recvmmsg`.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
unsafe fn recvmmsg_with_fallback(
sockfd: libc::c_int,
msgvec: *mut libc::mmsghdr,
vlen: libc::c_uint,
) -> libc::c_int {
let flags = 0;
let timeout = ptr::null_mut::<libc::timespec>();
#[cfg(not(target_os = "freebsd"))]
{
let ret =
libc::syscall(libc::SYS_recvmmsg, sockfd, msgvec, vlen, flags, timeout) as libc::c_int;
if ret != -1 {
return ret;
}
}
// libc on FreeBSD implements `recvmmsg` as a high-level abstraction over `recvmsg`,
// thus `SYS_recvmmsg` constant and direct system call do not exist
#[cfg(target_os = "freebsd")]
{
let ret = libc::recvmmsg(sockfd, msgvec, vlen as usize, flags, timeout) as libc::c_int;
if ret != -1 {
return ret;
}
}
let e = io::Error::last_os_error();
match e.raw_os_error() {
Some(libc::ENOSYS) => {
// Fallback to `recvmsg`.
recvmmsg_fallback(sockfd, msgvec, vlen)
}
_ => -1,
}
}
/// Fallback implementation of `recvmmsg` using `recvmsg`
/// for systems which do not support `recvmmsg`
/// such as Linux <2.6.33.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
unsafe fn recvmmsg_fallback(
sockfd: libc::c_int,
msgvec: *mut libc::mmsghdr,
vlen: libc::c_uint,
) -> libc::c_int {
let flags = 0;
if vlen == 0 {
return 0;
}
let n = libc::recvmsg(sockfd, &mut (*msgvec).msg_hdr, flags);
if n == -1 {
-1
} else {
// type of `msg_len` field differs on Linux and FreeBSD,
// it is up to the compiler to infer and cast `n` to correct type
(*msgvec).msg_len = n as _;
1
}
}
const CMSG_LEN: usize = 88;
fn prepare_msg(
transmit: &Transmit<'_>,
dst_addr: &socket2::SockAddr,
hdr: &mut libc::msghdr,
iov: &mut libc::iovec,
ctrl: &mut cmsg::Aligned<[u8; CMSG_LEN]>,
#[allow(unused_variables)] // only used on FreeBSD & macOS
encode_src_ip: bool,
sendmsg_einval: bool,
) {
iov.iov_base = transmit.contents.as_ptr() as *const _ as *mut _;
iov.iov_len = transmit.contents.len();
// SAFETY: Casting the pointer to a mutable one is legal,
// as sendmsg is guaranteed to not alter the mutable pointer
// as per the POSIX spec. See the section on the sys/socket.h
// header for details. The type is only mutable in the first
// place because it is reused by recvmsg as well.
let name = dst_addr.as_ptr() as *mut libc::c_void;
let namelen = dst_addr.len();
hdr.msg_name = name as *mut _;
hdr.msg_namelen = namelen;
hdr.msg_iov = iov;
hdr.msg_iovlen = 1;
hdr.msg_control = ctrl.0.as_mut_ptr() as _;
hdr.msg_controllen = CMSG_LEN as _;
let mut encoder = unsafe { cmsg::Encoder::new(hdr) };
let ecn = transmit.ecn.map_or(0, |x| x as libc::c_int);
// True for IPv4 or IPv4-Mapped IPv6
let is_ipv4 = transmit.destination.is_ipv4()
|| matches!(transmit.destination.ip(), IpAddr::V6(addr) if addr.to_ipv4_mapped().is_some());
if is_ipv4 {
if !sendmsg_einval {
encoder.push(libc::IPPROTO_IP, libc::IP_TOS, ecn as IpTosTy);
}
} else {
encoder.push(libc::IPPROTO_IPV6, libc::IPV6_TCLASS, ecn);
}
if let Some(segment_size) = transmit.segment_size {
gso::set_segment_size(&mut encoder, segment_size as u16);
}
if let Some(ip) = &transmit.src_ip {
match ip {
IpAddr::V4(v4) => {
#[cfg(any(target_os = "linux", target_os = "android"))]
{
let pktinfo = libc::in_pktinfo {
ipi_ifindex: 0,
ipi_spec_dst: libc::in_addr {
s_addr: u32::from_ne_bytes(v4.octets()),
},
ipi_addr: libc::in_addr { s_addr: 0 },
};
encoder.push(libc::IPPROTO_IP, libc::IP_PKTINFO, pktinfo);
}
#[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "ios"))]
{
if encode_src_ip {
let addr = libc::in_addr {
s_addr: u32::from_ne_bytes(v4.octets()),
};
encoder.push(libc::IPPROTO_IP, libc::IP_RECVDSTADDR, addr);
}
}
}
IpAddr::V6(v6) => {
let pktinfo = libc::in6_pktinfo {
ipi6_ifindex: 0,
ipi6_addr: libc::in6_addr {
s6_addr: v6.octets(),
},
};
encoder.push(libc::IPPROTO_IPV6, libc::IPV6_PKTINFO, pktinfo);
}
}
}
encoder.finish();
}
fn prepare_recv(
buf: &mut IoSliceMut,
name: &mut MaybeUninit<libc::sockaddr_storage>,
ctrl: &mut cmsg::Aligned<MaybeUninit<[u8; CMSG_LEN]>>,
hdr: &mut libc::msghdr,
) {
hdr.msg_name = name.as_mut_ptr() as _;
hdr.msg_namelen = mem::size_of::<libc::sockaddr_storage>() as _;
hdr.msg_iov = buf as *mut IoSliceMut as *mut libc::iovec;
hdr.msg_iovlen = 1;
hdr.msg_control = ctrl.0.as_mut_ptr() as _;
hdr.msg_controllen = CMSG_LEN as _;
hdr.msg_flags = 0;
}
fn decode_recv(
name: &MaybeUninit<libc::sockaddr_storage>,
hdr: &libc::msghdr,
len: usize,
) -> RecvMeta {
let name = unsafe { name.assume_init() };
let mut ecn_bits = 0;
let mut dst_ip = None;
#[allow(unused_mut)] // only mutable on Linux
let mut stride = len;
let cmsg_iter = unsafe { cmsg::Iter::new(hdr) };
for cmsg in cmsg_iter {
match (cmsg.cmsg_level, cmsg.cmsg_type) {
// FreeBSD uses IP_RECVTOS here, and we can be liberal because cmsgs are opt-in.
(libc::IPPROTO_IP, libc::IP_TOS) | (libc::IPPROTO_IP, libc::IP_RECVTOS) => unsafe {
ecn_bits = cmsg::decode::<u8, libc::cmsghdr>(cmsg);
},
(libc::IPPROTO_IPV6, libc::IPV6_TCLASS) => unsafe {
// Temporary hack around broken macos ABI. Remove once upstream fixes it.
// https://bugreport.apple.com/web/?problemID=48761855
#[allow(clippy::unnecessary_cast)] // cmsg.cmsg_len defined as size_t
if (cfg!(target_os = "macos") || cfg!(target_os = "ios"))
&& cmsg.cmsg_len as usize == libc::CMSG_LEN(mem::size_of::<u8>() as _) as usize
{
ecn_bits = cmsg::decode::<u8, libc::cmsghdr>(cmsg);
} else {
ecn_bits = cmsg::decode::<libc::c_int, libc::cmsghdr>(cmsg) as u8;
}
},
#[cfg(any(target_os = "linux", target_os = "android"))]
(libc::IPPROTO_IP, libc::IP_PKTINFO) => {
let pktinfo = unsafe { cmsg::decode::<libc::in_pktinfo, libc::cmsghdr>(cmsg) };
dst_ip = Some(IpAddr::V4(Ipv4Addr::from(
pktinfo.ipi_addr.s_addr.to_ne_bytes(),
)));
}
#[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "ios"))]
(libc::IPPROTO_IP, libc::IP_RECVDSTADDR) => {
let in_addr = unsafe { cmsg::decode::<libc::in_addr, libc::cmsghdr>(cmsg) };
dst_ip = Some(IpAddr::V4(Ipv4Addr::from(in_addr.s_addr.to_ne_bytes())));
}
(libc::IPPROTO_IPV6, libc::IPV6_PKTINFO) => {
let pktinfo = unsafe { cmsg::decode::<libc::in6_pktinfo, libc::cmsghdr>(cmsg) };
dst_ip = Some(IpAddr::V6(Ipv6Addr::from(pktinfo.ipi6_addr.s6_addr)));
}
#[cfg(target_os = "linux")]
(libc::SOL_UDP, libc::UDP_GRO) => unsafe {
stride = cmsg::decode::<libc::c_int, libc::cmsghdr>(cmsg) as usize;
},
_ => {}
}
}
let addr = match libc::c_int::from(name.ss_family) {
libc::AF_INET => {
// Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in.
let addr: &libc::sockaddr_in =
unsafe { &*(&name as *const _ as *const libc::sockaddr_in) };
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()),
u16::from_be(addr.sin_port),
))
}
libc::AF_INET6 => {
// Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6.
let addr: &libc::sockaddr_in6 =
unsafe { &*(&name as *const _ as *const libc::sockaddr_in6) };
SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::from(addr.sin6_addr.s6_addr),
u16::from_be(addr.sin6_port),
addr.sin6_flowinfo,
addr.sin6_scope_id,
))
}
_ => unreachable!(),
};
RecvMeta {
len,
stride,
addr,
ecn: EcnCodepoint::from_bits(ecn_bits),
dst_ip,
}
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
// Chosen somewhat arbitrarily; might benefit from additional tuning.
pub(crate) const BATCH_SIZE: usize = 32;
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) const BATCH_SIZE: usize = 1;
#[cfg(target_os = "linux")]
mod gso {
use super::*;
/// Checks whether GSO support is available by setting the UDP_SEGMENT
/// option on a socket
pub(crate) fn max_gso_segments() -> usize {
const GSO_SIZE: libc::c_int = 1500;
let socket = match std::net::UdpSocket::bind("[::]:0")
.or_else(|_| std::net::UdpSocket::bind("127.0.0.1:0"))
{
Ok(socket) => socket,
Err(_) => return 1,
};
// As defined in linux/udp.h
// #define UDP_MAX_SEGMENTS (1 << 6UL)
match set_socket_option(&socket, libc::SOL_UDP, libc::UDP_SEGMENT, GSO_SIZE) {
Ok(()) => 64,
Err(_) => 1,
}
}
pub(crate) fn set_segment_size(encoder: &mut cmsg::Encoder<libc::msghdr>, segment_size: u16) {
encoder.push(libc::SOL_UDP, libc::UDP_SEGMENT, segment_size);
}
}
#[cfg(not(target_os = "linux"))]
mod gso {
use super::*;
pub(super) fn max_gso_segments() -> usize {
1
}
pub(super) fn set_segment_size(_encoder: &mut cmsg::Encoder<libc::msghdr>, _segment_size: u16) {
panic!("Setting a segment size is not supported on current platform");
}
}
#[cfg(target_os = "linux")]
mod gro {
use super::*;
pub(crate) fn gro_segments() -> usize {
let socket = match std::net::UdpSocket::bind("[::]:0")
.or_else(|_| std::net::UdpSocket::bind("127.0.0.1:0"))
{
Ok(socket) => socket,
Err(_) => return 1,
};
// As defined in net/ipv4/udp_offload.c
// #define UDP_GRO_CNT_MAX 64
//
// NOTE: this MUST be set to UDP_GRO_CNT_MAX to ensure that the receive buffer size
// (get_max_udp_payload_size() * gro_segments()) is large enough to hold the largest GRO
// list the kernel might potentially produce. See
// https://github.com/quinn-rs/quinn/pull/1354.
match set_socket_option(&socket, libc::SOL_UDP, libc::UDP_GRO, OPTION_ON) {
Ok(()) => 64,
Err(_) => 1,
}
}
}
/// Returns whether the given socket option is supported on the current platform
///
/// Yields `Ok(true)` if the option was set successfully, `Ok(false)` if setting
/// the option raised an `ENOPROTOOPT` error, and `Err` for any other error.
fn set_socket_option_supported(
socket: &impl AsRawFd,
level: libc::c_int,
name: libc::c_int,
value: libc::c_int,
) -> io::Result<bool> {
match set_socket_option(socket, level, name, value) {
Ok(()) => Ok(true),
Err(err) if err.raw_os_error() == Some(libc::ENOPROTOOPT) => Ok(false),
Err(err) => Err(err),
}
}
fn set_socket_option(
socket: &impl AsRawFd,
level: libc::c_int,
name: libc::c_int,
value: libc::c_int,
) -> io::Result<()> {
let rc = unsafe {
libc::setsockopt(
socket.as_raw_fd(),
level,
name,
&value as *const _ as _,
mem::size_of_val(&value) as _,
)
};
match rc == 0 {
true => Ok(()),
false => Err(io::Error::last_os_error()),
}
}
const OPTION_ON: libc::c_int = 1;
#[cfg(not(target_os = "linux"))]
mod gro {
pub(super) fn gro_segments() -> usize {
1
}
}

View File

@ -0,0 +1,453 @@
use std::{
io::{self, IoSliceMut},
mem,
net::{IpAddr, Ipv4Addr},
os::windows::io::AsRawSocket,
ptr,
sync::Mutex,
time::Instant,
};
use libc::{c_int, c_uint};
use once_cell::sync::Lazy;
use windows_sys::Win32::Networking::WinSock;
use crate::{
cmsg::{self, CMsgHdr},
log_sendmsg_error, EcnCodepoint, RecvMeta, Transmit, UdpSockRef, IO_ERROR_LOG_INTERVAL,
};
/// QUIC-friendly UDP socket for Windows
///
/// Unlike a standard Windows UDP socket, this allows ECN bits to be read and written.
#[derive(Debug)]
pub struct UdpSocketState {
last_send_error: Mutex<Instant>,
}
impl UdpSocketState {
pub fn new(socket: UdpSockRef<'_>) -> io::Result<Self> {
assert!(
CMSG_LEN
>= WinSock::CMSGHDR::cmsg_space(mem::size_of::<WinSock::IN6_PKTINFO>())
+ WinSock::CMSGHDR::cmsg_space(mem::size_of::<c_int>())
+ WinSock::CMSGHDR::cmsg_space(mem::size_of::<u32>())
);
assert!(
mem::align_of::<WinSock::CMSGHDR>() <= mem::align_of::<cmsg::Aligned<[u8; 0]>>(),
"control message buffers will be misaligned"
);
socket.0.set_nonblocking(true)?;
let addr = socket.0.local_addr()?;
let is_ipv6 = addr.as_socket_ipv6().is_some();
let v6only = unsafe {
let mut result: u32 = 0;
let mut len = mem::size_of_val(&result) as i32;
let rc = WinSock::getsockopt(
socket.0.as_raw_socket() as _,
WinSock::IPPROTO_IPV6,
WinSock::IPV6_V6ONLY as _,
&mut result as *mut _ as _,
&mut len,
);
if rc == -1 {
return Err(io::Error::last_os_error());
}
result != 0
};
let is_ipv4 = addr.as_socket_ipv4().is_some() || !v6only;
// We don't support old versions of Windows that do not enable access to `WSARecvMsg()`
if WSARECVMSG_PTR.is_none() {
tracing::error!("network stack does not support WSARecvMsg function");
return Err(io::Error::from(io::ErrorKind::Unsupported));
}
if is_ipv4 {
set_socket_option(
&*socket.0,
WinSock::IPPROTO_IP,
WinSock::IP_DONTFRAGMENT,
OPTION_ON,
)?;
set_socket_option(
&*socket.0,
WinSock::IPPROTO_IP,
WinSock::IP_PKTINFO,
OPTION_ON,
)?;
set_socket_option(&*socket.0, WinSock::IPPROTO_IP, WinSock::IP_ECN, OPTION_ON)?;
}
if is_ipv6 {
set_socket_option(
&*socket.0,
WinSock::IPPROTO_IPV6,
WinSock::IPV6_DONTFRAG,
OPTION_ON,
)?;
set_socket_option(
&*socket.0,
WinSock::IPPROTO_IPV6,
WinSock::IPV6_PKTINFO,
OPTION_ON,
)?;
set_socket_option(
&*socket.0,
WinSock::IPPROTO_IPV6,
WinSock::IPV6_ECN,
OPTION_ON,
)?;
}
// Opportunistically try to enable GRO
_ = set_socket_option(
&*socket.0,
WinSock::IPPROTO_UDP,
WinSock::UDP_RECV_MAX_COALESCED_SIZE,
// u32 per
// https://learn.microsoft.com/en-us/windows/win32/winsock/ipproto-udp-socket-options.
// Choice of 2^16 - 1 inspired by msquic.
u16::MAX as u32,
);
let now = Instant::now();
Ok(Self {
last_send_error: Mutex::new(now.checked_sub(2 * IO_ERROR_LOG_INTERVAL).unwrap_or(now)),
})
}
pub fn send(&self, socket: UdpSockRef<'_>, transmit: &Transmit<'_>) -> io::Result<()> {
// we cannot use [`socket2::sendmsg()`] and [`socket2::MsgHdr`] as we do not have access
// to the inner field which holds the WSAMSG
let mut ctrl_buf = cmsg::Aligned([0; CMSG_LEN]);
let daddr = socket2::SockAddr::from(transmit.destination);
let mut data = WinSock::WSABUF {
buf: transmit.contents.as_ptr() as *mut _,
len: transmit.contents.len() as _,
};
let ctrl = WinSock::WSABUF {
buf: ctrl_buf.0.as_mut_ptr(),
len: ctrl_buf.0.len() as _,
};
let mut wsa_msg = WinSock::WSAMSG {
name: daddr.as_ptr() as *mut _,
namelen: daddr.len(),
lpBuffers: &mut data,
Control: ctrl,
dwBufferCount: 1,
dwFlags: 0,
};
// Add control messages (ECN and PKTINFO)
let mut encoder = unsafe { cmsg::Encoder::new(&mut wsa_msg) };
if let Some(ip) = transmit.src_ip {
let ip = std::net::SocketAddr::new(ip, 0);
let ip = socket2::SockAddr::from(ip);
match ip.family() {
WinSock::AF_INET => {
let src_ip = unsafe { ptr::read(ip.as_ptr() as *const WinSock::SOCKADDR_IN) };
let pktinfo = WinSock::IN_PKTINFO {
ipi_addr: src_ip.sin_addr,
ipi_ifindex: 0,
};
encoder.push(WinSock::IPPROTO_IP, WinSock::IP_PKTINFO, pktinfo);
}
WinSock::AF_INET6 => {
let src_ip = unsafe { ptr::read(ip.as_ptr() as *const WinSock::SOCKADDR_IN6) };
let pktinfo = WinSock::IN6_PKTINFO {
ipi6_addr: src_ip.sin6_addr,
ipi6_ifindex: unsafe { src_ip.Anonymous.sin6_scope_id },
};
encoder.push(WinSock::IPPROTO_IPV6, WinSock::IPV6_PKTINFO, pktinfo);
}
_ => {
return Err(io::Error::from(io::ErrorKind::InvalidInput));
}
}
}
// ECN is a C integer https://learn.microsoft.com/en-us/windows/win32/winsock/winsock-ecn
let ecn = transmit.ecn.map_or(0, |x| x as c_int);
// True for IPv4 or IPv4-Mapped IPv6
let is_ipv4 = transmit.destination.is_ipv4()
|| matches!(transmit.destination.ip(), IpAddr::V6(addr) if addr.to_ipv4_mapped().is_some());
if is_ipv4 {
encoder.push(WinSock::IPPROTO_IP, WinSock::IP_ECN, ecn);
} else {
encoder.push(WinSock::IPPROTO_IPV6, WinSock::IPV6_ECN, ecn);
}
// Segment size is a u32 https://learn.microsoft.com/en-us/windows/win32/api/ws2tcpip/nf-ws2tcpip-wsasetudpsendmessagesize
if let Some(segment_size) = transmit.segment_size {
encoder.push(
WinSock::IPPROTO_UDP,
WinSock::UDP_SEND_MSG_SIZE,
segment_size as u32,
);
}
encoder.finish();
let mut len = 0;
let rc = unsafe {
WinSock::WSASendMsg(
socket.0.as_raw_socket() as usize,
&wsa_msg,
0,
&mut len,
ptr::null_mut(),
None,
)
};
if rc != 0 {
let e = io::Error::last_os_error();
if e.kind() == io::ErrorKind::WouldBlock {
return Err(e);
}
// Other errors are ignored, since they will usually be handled
// by higher level retransmits and timeouts.
// - PermissionDenied errors have been observed due to iptable rules.
// Those are not fatal errors, since the
// configuration can be dynamically changed.
// - Destination unreachable errors have been observed for other
log_sendmsg_error(&self.last_send_error, e, transmit);
}
Ok(())
}
pub fn recv(
&self,
socket: UdpSockRef<'_>,
bufs: &mut [IoSliceMut<'_>],
meta: &mut [RecvMeta],
) -> io::Result<usize> {
let wsa_recvmsg_ptr = WSARECVMSG_PTR.expect("valid function pointer for WSARecvMsg");
// we cannot use [`socket2::MsgHdrMut`] as we do not have access to inner field which holds the WSAMSG
let mut ctrl_buf = cmsg::Aligned([0; CMSG_LEN]);
let mut source: WinSock::SOCKADDR_INET = unsafe { mem::zeroed() };
let mut data = WinSock::WSABUF {
buf: bufs[0].as_mut_ptr(),
len: bufs[0].len() as _,
};
let ctrl = WinSock::WSABUF {
buf: ctrl_buf.0.as_mut_ptr(),
len: ctrl_buf.0.len() as _,
};
let mut wsa_msg = WinSock::WSAMSG {
name: &mut source as *mut _ as *mut _,
namelen: mem::size_of_val(&source) as _,
lpBuffers: &mut data,
Control: ctrl,
dwBufferCount: 1,
dwFlags: 0,
};
let mut len = 0;
unsafe {
let rc = (wsa_recvmsg_ptr)(
socket.0.as_raw_socket() as usize,
&mut wsa_msg,
&mut len,
ptr::null_mut(),
None,
);
if rc == -1 {
return Err(io::Error::last_os_error());
}
}
let addr = unsafe {
let (_, addr) = socket2::SockAddr::try_init(|addr_storage, len| {
*len = mem::size_of_val(&source) as _;
ptr::copy_nonoverlapping(&source, addr_storage as _, 1);
Ok(())
})?;
addr.as_socket()
};
// Decode control messages (PKTINFO and ECN)
let mut ecn_bits = 0;
let mut dst_ip = None;
let mut stride = len;
let cmsg_iter = unsafe { cmsg::Iter::new(&wsa_msg) };
for cmsg in cmsg_iter {
const UDP_COALESCED_INFO: i32 = WinSock::UDP_COALESCED_INFO as i32;
// [header (len)][data][padding(len + sizeof(data))] -> [header][data][padding]
match (cmsg.cmsg_level, cmsg.cmsg_type) {
(WinSock::IPPROTO_IP, WinSock::IP_PKTINFO) => {
let pktinfo =
unsafe { cmsg::decode::<WinSock::IN_PKTINFO, WinSock::CMSGHDR>(cmsg) };
// Addr is stored in big endian format
let ip4 = Ipv4Addr::from(u32::from_be(unsafe { pktinfo.ipi_addr.S_un.S_addr }));
dst_ip = Some(ip4.into());
}
(WinSock::IPPROTO_IPV6, WinSock::IPV6_PKTINFO) => {
let pktinfo =
unsafe { cmsg::decode::<WinSock::IN6_PKTINFO, WinSock::CMSGHDR>(cmsg) };
// Addr is stored in big endian format
dst_ip = Some(IpAddr::from(unsafe { pktinfo.ipi6_addr.u.Byte }));
}
(WinSock::IPPROTO_IP, WinSock::IP_ECN) => {
// ECN is a C integer https://learn.microsoft.com/en-us/windows/win32/winsock/winsock-ecn
ecn_bits = unsafe { cmsg::decode::<c_int, WinSock::CMSGHDR>(cmsg) };
}
(WinSock::IPPROTO_IPV6, WinSock::IPV6_ECN) => {
// ECN is a C integer https://learn.microsoft.com/en-us/windows/win32/winsock/winsock-ecn
ecn_bits = unsafe { cmsg::decode::<c_int, WinSock::CMSGHDR>(cmsg) };
}
(WinSock::IPPROTO_UDP, UDP_COALESCED_INFO) => {
// Has type u32 (aka DWORD) per
// https://learn.microsoft.com/en-us/windows/win32/winsock/ipproto-udp-socket-options
stride = unsafe { cmsg::decode::<u32, WinSock::CMSGHDR>(cmsg) };
}
_ => {}
}
}
meta[0] = RecvMeta {
len: len as usize,
stride: stride as usize,
addr: addr.unwrap(),
ecn: EcnCodepoint::from_bits(ecn_bits as u8),
dst_ip,
};
Ok(1)
}
/// The maximum amount of segments which can be transmitted if a platform
/// supports Generic Send Offload (GSO).
///
/// This is 1 if the platform doesn't support GSO. Subject to change if errors are detected
/// while using GSO.
#[inline]
pub fn max_gso_segments(&self) -> usize {
*MAX_GSO_SEGMENTS
}
/// The number of segments to read when GRO is enabled. Used as a factor to
/// compute the receive buffer size.
///
/// Returns 1 if the platform doesn't support GRO.
#[inline]
pub fn gro_segments(&self) -> usize {
// Arbitrary reasonable value inspired by Linux and msquic
64
}
#[inline]
pub fn may_fragment(&self) -> bool {
false
}
}
fn set_socket_option(
socket: &impl AsRawSocket,
level: i32,
name: i32,
value: u32,
) -> io::Result<()> {
let rc = unsafe {
WinSock::setsockopt(
socket.as_raw_socket() as usize,
level,
name,
&value as *const _ as _,
mem::size_of_val(&value) as _,
)
};
match rc == 0 {
true => Ok(()),
false => Err(io::Error::last_os_error()),
}
}
pub(crate) const BATCH_SIZE: usize = 1;
// Enough to store max(IP_PKTINFO + IP_ECN, IPV6_PKTINFO + IPV6_ECN) + max(UDP_SEND_MSG_SIZE, UDP_COALESCED_INFO) bytes (header + data) and some extra margin
const CMSG_LEN: usize = 128;
const OPTION_ON: u32 = 1;
// FIXME this could use [`std::sync::OnceLock`] once the MSRV is bumped to 1.70 and upper
static WSARECVMSG_PTR: Lazy<WinSock::LPFN_WSARECVMSG> = Lazy::new(|| {
let s = unsafe { WinSock::socket(WinSock::AF_INET as _, WinSock::SOCK_DGRAM as _, 0) };
if s == WinSock::INVALID_SOCKET {
tracing::debug!(
"ignoring WSARecvMsg function pointer due to socket creation error: {}",
io::Error::last_os_error()
);
return None;
}
// Detect if OS expose WSARecvMsg API based on
// https://github.com/Azure/mio-uds-windows/blob/a3c97df82018086add96d8821edb4aa85ec1b42b/src/stdnet/ext.rs#L601
let guid = WinSock::WSAID_WSARECVMSG;
let mut wsa_recvmsg_ptr = None;
let mut len = 0;
// Safety: Option handles the NULL pointer with a None value
let rc = unsafe {
WinSock::WSAIoctl(
s as _,
WinSock::SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid as *const _ as *const _,
mem::size_of_val(&guid) as u32,
&mut wsa_recvmsg_ptr as *mut _ as *mut _,
mem::size_of_val(&wsa_recvmsg_ptr) as u32,
&mut len,
ptr::null_mut(),
None,
)
};
if rc == -1 {
tracing::debug!(
"ignoring WSARecvMsg function pointer due to ioctl error: {}",
io::Error::last_os_error()
);
} else if len as usize != mem::size_of::<WinSock::LPFN_WSARECVMSG>() {
tracing::debug!("ignoring WSARecvMsg function pointer due to pointer size mismatch");
wsa_recvmsg_ptr = None;
}
unsafe {
WinSock::closesocket(s);
}
wsa_recvmsg_ptr
});
static MAX_GSO_SEGMENTS: Lazy<usize> = Lazy::new(|| {
let socket = match std::net::UdpSocket::bind("[::]:0")
.or_else(|_| std::net::UdpSocket::bind("127.0.0.1:0"))
{
Ok(socket) => socket,
Err(_) => return 1,
};
const GSO_SIZE: c_uint = 1500;
match set_socket_option(
&socket,
WinSock::IPPROTO_UDP,
WinSock::UDP_SEND_MSG_SIZE,
GSO_SIZE,
) {
// Empirically found on Windows 11 x64
Ok(()) => 512,
Err(_) => 1,
}
});

View File

@ -0,0 +1,223 @@
use std::{
io::IoSliceMut,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, UdpSocket},
slice,
};
use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState};
use socket2::Socket;
#[test]
fn basic() {
let send = UdpSocket::bind("[::1]:0")
.or_else(|_| UdpSocket::bind("127.0.0.1:0"))
.unwrap();
let recv = UdpSocket::bind("[::1]:0")
.or_else(|_| UdpSocket::bind("127.0.0.1:0"))
.unwrap();
let dst_addr = recv.local_addr().unwrap();
test_send_recv(
&send.into(),
&recv.into(),
Transmit {
destination: dst_addr,
ecn: None,
contents: b"hello",
segment_size: None,
src_ip: None,
},
);
}
#[test]
fn ecn_v6() {
let recv = socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.unwrap();
recv.set_only_v6(false).unwrap();
// We must use the unspecified address here, rather than a local address, to support dual-stack
// mode
recv.bind(&socket2::SockAddr::from(
"[::]:0".parse::<SocketAddr>().unwrap(),
))
.unwrap();
let recv_v6 = SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::LOCALHOST,
recv.local_addr().unwrap().as_socket().unwrap().port(),
0,
0,
));
let recv_v4 = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, recv_v6.port()));
for (src, dst) in [("[::1]:0", recv_v6), ("127.0.0.1:0", recv_v4)] {
dbg!(src, dst);
let send = UdpSocket::bind(src).unwrap();
let send = Socket::from(send);
for codepoint in [EcnCodepoint::Ect0, EcnCodepoint::Ect1] {
test_send_recv(
&send,
&recv,
Transmit {
destination: dst,
ecn: Some(codepoint),
contents: b"hello",
segment_size: None,
src_ip: None,
},
);
}
}
}
#[test]
fn ecn_v4() {
let send = Socket::from(UdpSocket::bind("127.0.0.1:0").unwrap());
let recv = Socket::from(UdpSocket::bind("127.0.0.1:0").unwrap());
for codepoint in [EcnCodepoint::Ect0, EcnCodepoint::Ect1] {
test_send_recv(
&send,
&recv,
Transmit {
destination: recv.local_addr().unwrap().as_socket().unwrap(),
ecn: Some(codepoint),
contents: b"hello",
segment_size: None,
src_ip: None,
},
);
}
}
#[test]
fn ecn_v4_mapped_v6() {
let send = socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.unwrap();
send.set_only_v6(false).unwrap();
send.bind(&socket2::SockAddr::from(
"[::]:0".parse::<SocketAddr>().unwrap(),
))
.unwrap();
let recv = UdpSocket::bind("127.0.0.1:0").unwrap();
let recv = Socket::from(recv);
let recv_v4_mapped_v6 = SocketAddr::V6(SocketAddrV6::new(
Ipv4Addr::LOCALHOST.to_ipv6_mapped(),
recv.local_addr().unwrap().as_socket().unwrap().port(),
0,
0,
));
for codepoint in [EcnCodepoint::Ect0, EcnCodepoint::Ect1] {
test_send_recv(
&send,
&recv,
Transmit {
destination: recv_v4_mapped_v6,
ecn: Some(codepoint),
contents: b"hello",
segment_size: None,
src_ip: None,
},
);
}
}
#[test]
#[cfg_attr(not(any(target_os = "linux", target_os = "windows")), ignore)]
fn gso() {
let send = UdpSocket::bind("[::1]:0")
.or_else(|_| UdpSocket::bind("127.0.0.1:0"))
.unwrap();
let recv = UdpSocket::bind("[::1]:0")
.or_else(|_| UdpSocket::bind("127.0.0.1:0"))
.unwrap();
let max_segments = UdpSocketState::new((&send).into())
.unwrap()
.max_gso_segments();
let dst_addr = recv.local_addr().unwrap();
const SEGMENT_SIZE: usize = 128;
let msg = vec![0xAB; SEGMENT_SIZE * max_segments];
test_send_recv(
&send.into(),
&recv.into(),
Transmit {
destination: dst_addr,
ecn: None,
contents: &msg,
segment_size: Some(SEGMENT_SIZE),
src_ip: None,
},
);
}
fn test_send_recv(send: &Socket, recv: &Socket, transmit: Transmit) {
let send_state = UdpSocketState::new(send.into()).unwrap();
let recv_state = UdpSocketState::new(recv.into()).unwrap();
// Reverse non-blocking flag set by `UdpSocketState` to make the test non-racy
recv.set_nonblocking(false).unwrap();
send_state.send(send.into(), &transmit).unwrap();
let mut buf = [0; u16::MAX as usize];
let mut meta = RecvMeta::default();
let segment_size = transmit.segment_size.unwrap_or(transmit.contents.len());
let expected_datagrams = transmit.contents.len() / segment_size;
let mut datagrams = 0;
while datagrams < expected_datagrams {
let n = recv_state
.recv(
recv.into(),
&mut [IoSliceMut::new(&mut buf)],
slice::from_mut(&mut meta),
)
.unwrap();
assert_eq!(n, 1);
let segments = meta.len / meta.stride;
for i in 0..segments {
assert_eq!(
&buf[(i * meta.stride)..((i + 1) * meta.stride)],
&transmit.contents
[(datagrams + i) * segment_size..(datagrams + i + 1) * segment_size]
);
}
datagrams += segments;
assert_eq!(
meta.addr.port(),
send.local_addr().unwrap().as_socket().unwrap().port()
);
let send_v6 = send.local_addr().unwrap().as_socket().unwrap().is_ipv6();
let recv_v6 = recv.local_addr().unwrap().as_socket().unwrap().is_ipv6();
let src = meta.addr.ip();
let dst = meta.dst_ip.unwrap();
for addr in [src, dst] {
match (send_v6, recv_v6) {
(_, false) => assert_eq!(addr, Ipv4Addr::LOCALHOST),
// Windows gives us real IPv4 addrs, whereas *nix use IPv6-mapped IPv4
// addrs. Canonicalize to IPv6-mapped for robustness.
(false, true) => {
assert_eq!(ip_to_v6_mapped(addr), Ipv4Addr::LOCALHOST.to_ipv6_mapped())
}
(true, true) => assert!(
addr == Ipv6Addr::LOCALHOST || addr == Ipv4Addr::LOCALHOST.to_ipv6_mapped()
),
}
}
assert_eq!(meta.ecn, transmit.ecn);
}
assert_eq!(datagrams, expected_datagrams);
}
fn ip_to_v6_mapped(x: IpAddr) -> IpAddr {
match x {
IpAddr::V4(x) => IpAddr::V6(x.to_ipv6_mapped()),
IpAddr::V6(_) => x,
}
}

View File

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"0fa977ef55cfe5c26907f5f3c8700a777d205608ab9edf7fd98762b60ba95790","Cargo.toml":"8133ec808652d1d7aa1eb39b135c2afa559265bb46ebbaf2a3cfe8b552fa9202","LICENSE":"9797ea525350e8779aab3771e0276bbeea8b824893882172acfc94743b8d953d","README.md":"6094ea500349ce239a12b07d7dfd4ea965a7f14c993da2abc4b3c39a0479683a","src/entry.rs":"237a4a8e159e841aaa2b49ec1697a68fef6165542ac0197dba1ee963f688dc1d","src/lib.rs":"8ada51fda322e36e6b52a3e1fcbeab7c72332f79e54af111584f5a17b961afb5","src/select.rs":"e01c34fe0fdbc49a40b15b5b42816eea7d7b13db6e3a2774de92eb87f6e48231"},"package":"630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"}

View File

@ -0,0 +1,172 @@
# 2.1.0 (April 25th, 2023)
- macros: fix typo in `#[tokio::test]` docs ([#5636])
- macros: make entrypoints more efficient ([#5621])
[#5621]: https://github.com/tokio-rs/tokio/pull/5621
[#5636]: https://github.com/tokio-rs/tokio/pull/5636
# 2.0.0 (March 24th, 2023)
This major release updates the dependency on the syn crate to 2.0.0, and
increases the MSRV to 1.56.
As part of this release, we are adopting a policy of depending on a specific minor
release of tokio-macros. This prevents Tokio from being able to pull in many different
versions of tokio-macros.
- macros: update `syn` ([#5572])
- macros: accept path as crate rename ([#5557])
[#5572]: https://github.com/tokio-rs/tokio/pull/5572
[#5557]: https://github.com/tokio-rs/tokio/pull/5557
# 1.8.2 (November 30th, 2022)
- fix a regression introduced in 1.8.1 ([#5244])
[#5244]: https://github.com/tokio-rs/tokio/pull/5244
# 1.8.1 (November 29th, 2022)
(yanked)
- macros: Pin Futures in `#[tokio::test]` to stack ([#5205])
- macros: Reduce usage of last statement spans in proc-macros ([#5092])
- macros: Improve the documentation for `#[tokio::test]` ([#4761])
[#5205]: https://github.com/tokio-rs/tokio/pull/5205
[#5092]: https://github.com/tokio-rs/tokio/pull/5092
[#4761]: https://github.com/tokio-rs/tokio/pull/4761
# 1.8.0 (June 4th, 2022)
- macros: always emit return statement ([#4636])
- macros: support setting a custom crate name for `#[tokio::main]` and `#[tokio::test]` ([#4613])
[#4613]: https://github.com/tokio-rs/tokio/pull/4613
[#4636]: https://github.com/tokio-rs/tokio/pull/4636
# 1.7.0 (December 15th, 2021)
- macros: address remaining `clippy::semicolon_if_nothing_returned` warning ([#4252])
[#4252]: https://github.com/tokio-rs/tokio/pull/4252
# 1.6.0 (November 16th, 2021)
- macros: fix mut patterns in `select!` macro ([#4211])
[#4211]: https://github.com/tokio-rs/tokio/pull/4211
# 1.5.1 (October 29th, 2021)
- macros: fix type resolution error in `#[tokio::main]` ([#4176])
[#4176]: https://github.com/tokio-rs/tokio/pull/4176
# 1.5.0 (October 13th, 2021)
- macros: make tokio-macros attributes more IDE friendly ([#4162])
[#4162]: https://github.com/tokio-rs/tokio/pull/4162
# 1.4.1 (September 30th, 2021)
Reverted: run `current_thread` inside `LocalSet` ([#4027])
# 1.4.0 (September 29th, 2021)
(yanked)
### Changed
- macros: run `current_thread` inside `LocalSet` ([#4027])
- macros: explicitly relaxed clippy lint for `.expect()` in runtime entry macro ([#4030])
### Fixed
- macros: fix invalid error messages in functions wrapped with `#[main]` or `#[test]` ([#4067])
[#4027]: https://github.com/tokio-rs/tokio/pull/4027
[#4030]: https://github.com/tokio-rs/tokio/pull/4030
[#4067]: https://github.com/tokio-rs/tokio/pull/4067
# 1.3.0 (July 7, 2021)
- macros: don't trigger `clippy::unwrap_used` ([#3926])
[#3926]: https://github.com/tokio-rs/tokio/pull/3926
# 1.2.0 (May 14, 2021)
- macros: forward input arguments in `#[tokio::test]` ([#3691])
- macros: improve diagnostics on type mismatch ([#3766])
- macros: various error message improvements ([#3677])
[#3677]: https://github.com/tokio-rs/tokio/pull/3677
[#3691]: https://github.com/tokio-rs/tokio/pull/3691
[#3766]: https://github.com/tokio-rs/tokio/pull/3766
# 1.1.0 (February 5, 2021)
- add `start_paused` option to macros ([#3492])
# 1.0.0 (December 23, 2020)
- track `tokio` 1.0 release.
# 0.3.1 (October 25, 2020)
### Fixed
- fix incorrect docs regarding `max_threads` option ([#3038])
# 0.3.0 (October 15, 2020)
- Track `tokio` 0.3 release.
### Changed
- options are renamed to track `tokio` runtime builder fn names.
- `#[tokio::main]` macro requires `rt-multi-thread` when no `flavor` is specified.
# 0.2.5 (February 27, 2019)
### Fixed
- doc improvements ([#2225]).
# 0.2.4 (January 27, 2019)
### Fixed
- generics on `#[tokio::main]` function ([#2177]).
### Added
- support for `tokio::select!` ([#2152]).
# 0.2.3 (January 7, 2019)
### Fixed
- Revert breaking change.
# 0.2.2 (January 7, 2019)
### Added
- General refactoring and inclusion of additional runtime options ([#2022] and [#2038])
# 0.2.1 (December 18, 2019)
### Fixes
- inherit visibility when wrapping async fn ([#1954]).
# 0.2.0 (November 26, 2019)
- Initial release
[#1954]: https://github.com/tokio-rs/tokio/pull/1954
[#2022]: https://github.com/tokio-rs/tokio/pull/2022
[#2038]: https://github.com/tokio-rs/tokio/pull/2038
[#2152]: https://github.com/tokio-rs/tokio/pull/2152
[#2177]: https://github.com/tokio-rs/tokio/pull/2177
[#2225]: https://github.com/tokio-rs/tokio/pull/2225
[#3038]: https://github.com/tokio-rs/tokio/pull/3038
[#3492]: https://github.com/tokio-rs/tokio/pull/3492

View File

@ -0,0 +1,47 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.56"
name = "tokio-macros"
version = "2.1.0"
authors = ["Tokio Contributors <team@tokio.rs>"]
description = """
Tokio's proc macros.
"""
homepage = "https://tokio.rs"
readme = "README.md"
categories = ["asynchronous"]
license = "MIT"
repository = "https://github.com/tokio-rs/tokio"
[package.metadata.docs.rs]
all-features = true
[lib]
proc-macro = true
[dependencies.proc-macro2]
version = "1.0.7"
[dependencies.quote]
version = "1"
[dependencies.syn]
version = "2.0"
features = ["full"]
[dev-dependencies.tokio]
version = "1.0.0"
features = ["full"]
[features]

47
third_party/rust/tokio-macros/LICENSE vendored Normal file
View File

@ -0,0 +1,47 @@
Copyright (c) 2023 Tokio Contributors
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
The MIT License (MIT)
Copyright (c) 2019 Yoshua Wuyts
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

13
third_party/rust/tokio-macros/README.md vendored Normal file
View File

@ -0,0 +1,13 @@
# Tokio Macros
Procedural macros for use with Tokio
## License
This project is licensed under the [MIT license](LICENSE).
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in Tokio by you, shall be licensed as MIT, without any additional
terms or conditions.

View File

@ -0,0 +1,591 @@
use proc_macro2::{Span, TokenStream, TokenTree};
use quote::{quote, quote_spanned, ToTokens};
use syn::parse::{Parse, ParseStream, Parser};
use syn::{braced, Attribute, Ident, Path, Signature, Visibility};
// syn::AttributeArgs does not implement syn::Parse
type AttributeArgs = syn::punctuated::Punctuated<syn::Meta, syn::Token![,]>;
#[derive(Clone, Copy, PartialEq)]
enum RuntimeFlavor {
CurrentThread,
Threaded,
}
impl RuntimeFlavor {
fn from_str(s: &str) -> Result<RuntimeFlavor, String> {
match s {
"current_thread" => Ok(RuntimeFlavor::CurrentThread),
"multi_thread" => Ok(RuntimeFlavor::Threaded),
"single_thread" => Err("The single threaded runtime flavor is called `current_thread`.".to_string()),
"basic_scheduler" => Err("The `basic_scheduler` runtime flavor has been renamed to `current_thread`.".to_string()),
"threaded_scheduler" => Err("The `threaded_scheduler` runtime flavor has been renamed to `multi_thread`.".to_string()),
_ => Err(format!("No such runtime flavor `{}`. The runtime flavors are `current_thread` and `multi_thread`.", s)),
}
}
}
struct FinalConfig {
flavor: RuntimeFlavor,
worker_threads: Option<usize>,
start_paused: Option<bool>,
crate_name: Option<Path>,
}
/// Config used in case of the attribute not being able to build a valid config
const DEFAULT_ERROR_CONFIG: FinalConfig = FinalConfig {
flavor: RuntimeFlavor::CurrentThread,
worker_threads: None,
start_paused: None,
crate_name: None,
};
struct Configuration {
rt_multi_thread_available: bool,
default_flavor: RuntimeFlavor,
flavor: Option<RuntimeFlavor>,
worker_threads: Option<(usize, Span)>,
start_paused: Option<(bool, Span)>,
is_test: bool,
crate_name: Option<Path>,
}
impl Configuration {
fn new(is_test: bool, rt_multi_thread: bool) -> Self {
Configuration {
rt_multi_thread_available: rt_multi_thread,
default_flavor: match is_test {
true => RuntimeFlavor::CurrentThread,
false => RuntimeFlavor::Threaded,
},
flavor: None,
worker_threads: None,
start_paused: None,
is_test,
crate_name: None,
}
}
fn set_flavor(&mut self, runtime: syn::Lit, span: Span) -> Result<(), syn::Error> {
if self.flavor.is_some() {
return Err(syn::Error::new(span, "`flavor` set multiple times."));
}
let runtime_str = parse_string(runtime, span, "flavor")?;
let runtime =
RuntimeFlavor::from_str(&runtime_str).map_err(|err| syn::Error::new(span, err))?;
self.flavor = Some(runtime);
Ok(())
}
fn set_worker_threads(
&mut self,
worker_threads: syn::Lit,
span: Span,
) -> Result<(), syn::Error> {
if self.worker_threads.is_some() {
return Err(syn::Error::new(
span,
"`worker_threads` set multiple times.",
));
}
let worker_threads = parse_int(worker_threads, span, "worker_threads")?;
if worker_threads == 0 {
return Err(syn::Error::new(span, "`worker_threads` may not be 0."));
}
self.worker_threads = Some((worker_threads, span));
Ok(())
}
fn set_start_paused(&mut self, start_paused: syn::Lit, span: Span) -> Result<(), syn::Error> {
if self.start_paused.is_some() {
return Err(syn::Error::new(span, "`start_paused` set multiple times."));
}
let start_paused = parse_bool(start_paused, span, "start_paused")?;
self.start_paused = Some((start_paused, span));
Ok(())
}
fn set_crate_name(&mut self, name: syn::Lit, span: Span) -> Result<(), syn::Error> {
if self.crate_name.is_some() {
return Err(syn::Error::new(span, "`crate` set multiple times."));
}
let name_path = parse_path(name, span, "crate")?;
self.crate_name = Some(name_path);
Ok(())
}
fn macro_name(&self) -> &'static str {
if self.is_test {
"tokio::test"
} else {
"tokio::main"
}
}
fn build(&self) -> Result<FinalConfig, syn::Error> {
let flavor = self.flavor.unwrap_or(self.default_flavor);
use RuntimeFlavor::*;
let worker_threads = match (flavor, self.worker_threads) {
(CurrentThread, Some((_, worker_threads_span))) => {
let msg = format!(
"The `worker_threads` option requires the `multi_thread` runtime flavor. Use `#[{}(flavor = \"multi_thread\")]`",
self.macro_name(),
);
return Err(syn::Error::new(worker_threads_span, msg));
}
(CurrentThread, None) => None,
(Threaded, worker_threads) if self.rt_multi_thread_available => {
worker_threads.map(|(val, _span)| val)
}
(Threaded, _) => {
let msg = if self.flavor.is_none() {
"The default runtime flavor is `multi_thread`, but the `rt-multi-thread` feature is disabled."
} else {
"The runtime flavor `multi_thread` requires the `rt-multi-thread` feature."
};
return Err(syn::Error::new(Span::call_site(), msg));
}
};
let start_paused = match (flavor, self.start_paused) {
(Threaded, Some((_, start_paused_span))) => {
let msg = format!(
"The `start_paused` option requires the `current_thread` runtime flavor. Use `#[{}(flavor = \"current_thread\")]`",
self.macro_name(),
);
return Err(syn::Error::new(start_paused_span, msg));
}
(CurrentThread, Some((start_paused, _))) => Some(start_paused),
(_, None) => None,
};
Ok(FinalConfig {
crate_name: self.crate_name.clone(),
flavor,
worker_threads,
start_paused,
})
}
}
fn parse_int(int: syn::Lit, span: Span, field: &str) -> Result<usize, syn::Error> {
match int {
syn::Lit::Int(lit) => match lit.base10_parse::<usize>() {
Ok(value) => Ok(value),
Err(e) => Err(syn::Error::new(
span,
format!("Failed to parse value of `{}` as integer: {}", field, e),
)),
},
_ => Err(syn::Error::new(
span,
format!("Failed to parse value of `{}` as integer.", field),
)),
}
}
fn parse_string(int: syn::Lit, span: Span, field: &str) -> Result<String, syn::Error> {
match int {
syn::Lit::Str(s) => Ok(s.value()),
syn::Lit::Verbatim(s) => Ok(s.to_string()),
_ => Err(syn::Error::new(
span,
format!("Failed to parse value of `{}` as string.", field),
)),
}
}
fn parse_path(lit: syn::Lit, span: Span, field: &str) -> Result<Path, syn::Error> {
match lit {
syn::Lit::Str(s) => {
let err = syn::Error::new(
span,
format!(
"Failed to parse value of `{}` as path: \"{}\"",
field,
s.value()
),
);
s.parse::<syn::Path>().map_err(|_| err.clone())
}
_ => Err(syn::Error::new(
span,
format!("Failed to parse value of `{}` as path.", field),
)),
}
}
fn parse_bool(bool: syn::Lit, span: Span, field: &str) -> Result<bool, syn::Error> {
match bool {
syn::Lit::Bool(b) => Ok(b.value),
_ => Err(syn::Error::new(
span,
format!("Failed to parse value of `{}` as bool.", field),
)),
}
}
fn build_config(
input: &ItemFn,
args: AttributeArgs,
is_test: bool,
rt_multi_thread: bool,
) -> Result<FinalConfig, syn::Error> {
if input.sig.asyncness.is_none() {
let msg = "the `async` keyword is missing from the function declaration";
return Err(syn::Error::new_spanned(input.sig.fn_token, msg));
}
let mut config = Configuration::new(is_test, rt_multi_thread);
let macro_name = config.macro_name();
for arg in args {
match arg {
syn::Meta::NameValue(namevalue) => {
let ident = namevalue
.path
.get_ident()
.ok_or_else(|| {
syn::Error::new_spanned(&namevalue, "Must have specified ident")
})?
.to_string()
.to_lowercase();
let lit = match &namevalue.value {
syn::Expr::Lit(syn::ExprLit { lit, .. }) => lit,
expr => return Err(syn::Error::new_spanned(expr, "Must be a literal")),
};
match ident.as_str() {
"worker_threads" => {
config.set_worker_threads(lit.clone(), syn::spanned::Spanned::span(lit))?;
}
"flavor" => {
config.set_flavor(lit.clone(), syn::spanned::Spanned::span(lit))?;
}
"start_paused" => {
config.set_start_paused(lit.clone(), syn::spanned::Spanned::span(lit))?;
}
"core_threads" => {
let msg = "Attribute `core_threads` is renamed to `worker_threads`";
return Err(syn::Error::new_spanned(namevalue, msg));
}
"crate" => {
config.set_crate_name(lit.clone(), syn::spanned::Spanned::span(lit))?;
}
name => {
let msg = format!(
"Unknown attribute {} is specified; expected one of: `flavor`, `worker_threads`, `start_paused`, `crate`",
name,
);
return Err(syn::Error::new_spanned(namevalue, msg));
}
}
}
syn::Meta::Path(path) => {
let name = path
.get_ident()
.ok_or_else(|| syn::Error::new_spanned(&path, "Must have specified ident"))?
.to_string()
.to_lowercase();
let msg = match name.as_str() {
"threaded_scheduler" | "multi_thread" => {
format!(
"Set the runtime flavor with #[{}(flavor = \"multi_thread\")].",
macro_name
)
}
"basic_scheduler" | "current_thread" | "single_threaded" => {
format!(
"Set the runtime flavor with #[{}(flavor = \"current_thread\")].",
macro_name
)
}
"flavor" | "worker_threads" | "start_paused" => {
format!("The `{}` attribute requires an argument.", name)
}
name => {
format!("Unknown attribute {} is specified; expected one of: `flavor`, `worker_threads`, `start_paused`, `crate`", name)
}
};
return Err(syn::Error::new_spanned(path, msg));
}
other => {
return Err(syn::Error::new_spanned(
other,
"Unknown attribute inside the macro",
));
}
}
}
config.build()
}
fn parse_knobs(mut input: ItemFn, is_test: bool, config: FinalConfig) -> TokenStream {
input.sig.asyncness = None;
// If type mismatch occurs, the current rustc points to the last statement.
let (last_stmt_start_span, last_stmt_end_span) = {
let mut last_stmt = input.stmts.last().cloned().unwrap_or_default().into_iter();
// `Span` on stable Rust has a limitation that only points to the first
// token, not the whole tokens. We can work around this limitation by
// using the first/last span of the tokens like
// `syn::Error::new_spanned` does.
let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span());
let end = last_stmt.last().map_or(start, |t| t.span());
(start, end)
};
let crate_path = config
.crate_name
.map(ToTokens::into_token_stream)
.unwrap_or_else(|| Ident::new("tokio", last_stmt_start_span).into_token_stream());
let mut rt = match config.flavor {
RuntimeFlavor::CurrentThread => quote_spanned! {last_stmt_start_span=>
#crate_path::runtime::Builder::new_current_thread()
},
RuntimeFlavor::Threaded => quote_spanned! {last_stmt_start_span=>
#crate_path::runtime::Builder::new_multi_thread()
},
};
if let Some(v) = config.worker_threads {
rt = quote! { #rt.worker_threads(#v) };
}
if let Some(v) = config.start_paused {
rt = quote! { #rt.start_paused(#v) };
}
let header = if is_test {
quote! {
#[::core::prelude::v1::test]
}
} else {
quote! {}
};
let body_ident = quote! { body };
let last_block = quote_spanned! {last_stmt_end_span=>
#[allow(clippy::expect_used, clippy::diverging_sub_expression)]
{
return #rt
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(#body_ident);
}
};
let body = input.body();
// For test functions pin the body to the stack and use `Pin<&mut dyn
// Future>` to reduce the amount of `Runtime::block_on` (and related
// functions) copies we generate during compilation due to the generic
// parameter `F` (the future to block on). This could have an impact on
// performance, but because it's only for testing it's unlikely to be very
// large.
//
// We don't do this for the main function as it should only be used once so
// there will be no benefit.
let body = if is_test {
let output_type = match &input.sig.output {
// For functions with no return value syn doesn't print anything,
// but that doesn't work as `Output` for our boxed `Future`, so
// default to `()` (the same type as the function output).
syn::ReturnType::Default => quote! { () },
syn::ReturnType::Type(_, ret_type) => quote! { #ret_type },
};
quote! {
let body = async #body;
#crate_path::pin!(body);
let body: ::std::pin::Pin<&mut dyn ::std::future::Future<Output = #output_type>> = body;
}
} else {
quote! {
let body = async #body;
}
};
input.into_tokens(header, body, last_block)
}
fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream {
tokens.extend(error.into_compile_error());
tokens
}
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream {
// If any of the steps for this macro fail, we still want to expand to an item that is as close
// to the expected output as possible. This helps out IDEs such that completions and other
// related features keep working.
let input: ItemFn = match syn::parse2(item.clone()) {
Ok(it) => it,
Err(e) => return token_stream_with_error(item, e),
};
let config = if input.sig.ident == "main" && !input.sig.inputs.is_empty() {
let msg = "the main function cannot accept arguments";
Err(syn::Error::new_spanned(&input.sig.ident, msg))
} else {
AttributeArgs::parse_terminated
.parse2(args)
.and_then(|args| build_config(&input, args, false, rt_multi_thread))
};
match config {
Ok(config) => parse_knobs(input, false, config),
Err(e) => token_stream_with_error(parse_knobs(input, false, DEFAULT_ERROR_CONFIG), e),
}
}
pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream {
// If any of the steps for this macro fail, we still want to expand to an item that is as close
// to the expected output as possible. This helps out IDEs such that completions and other
// related features keep working.
let input: ItemFn = match syn::parse2(item.clone()) {
Ok(it) => it,
Err(e) => return token_stream_with_error(item, e),
};
let config = if let Some(attr) = input.attrs().find(|attr| attr.meta.path().is_ident("test")) {
let msg = "second test attribute is supplied";
Err(syn::Error::new_spanned(attr, msg))
} else {
AttributeArgs::parse_terminated
.parse2(args)
.and_then(|args| build_config(&input, args, true, rt_multi_thread))
};
match config {
Ok(config) => parse_knobs(input, true, config),
Err(e) => token_stream_with_error(parse_knobs(input, true, DEFAULT_ERROR_CONFIG), e),
}
}
struct ItemFn {
outer_attrs: Vec<Attribute>,
vis: Visibility,
sig: Signature,
brace_token: syn::token::Brace,
inner_attrs: Vec<Attribute>,
stmts: Vec<proc_macro2::TokenStream>,
}
impl ItemFn {
/// Access all attributes of the function item.
fn attrs(&self) -> impl Iterator<Item = &Attribute> {
self.outer_attrs.iter().chain(self.inner_attrs.iter())
}
/// Get the body of the function item in a manner so that it can be
/// conveniently used with the `quote!` macro.
fn body(&self) -> Body<'_> {
Body {
brace_token: self.brace_token,
stmts: &self.stmts,
}
}
/// Convert our local function item into a token stream.
fn into_tokens(
self,
header: proc_macro2::TokenStream,
body: proc_macro2::TokenStream,
last_block: proc_macro2::TokenStream,
) -> TokenStream {
let mut tokens = proc_macro2::TokenStream::new();
header.to_tokens(&mut tokens);
// Outer attributes are simply streamed as-is.
for attr in self.outer_attrs {
attr.to_tokens(&mut tokens);
}
// Inner attributes require extra care, since they're not supported on
// blocks (which is what we're expanded into) we instead lift them
// outside of the function. This matches the behaviour of `syn`.
for mut attr in self.inner_attrs {
attr.style = syn::AttrStyle::Outer;
attr.to_tokens(&mut tokens);
}
self.vis.to_tokens(&mut tokens);
self.sig.to_tokens(&mut tokens);
self.brace_token.surround(&mut tokens, |tokens| {
body.to_tokens(tokens);
last_block.to_tokens(tokens);
});
tokens
}
}
impl Parse for ItemFn {
#[inline]
fn parse(input: ParseStream<'_>) -> syn::Result<Self> {
// This parse implementation has been largely lifted from `syn`, with
// the exception of:
// * We don't have access to the plumbing necessary to parse inner
// attributes in-place.
// * We do our own statements parsing to avoid recursively parsing
// entire statements and only look for the parts we're interested in.
let outer_attrs = input.call(Attribute::parse_outer)?;
let vis: Visibility = input.parse()?;
let sig: Signature = input.parse()?;
let content;
let brace_token = braced!(content in input);
let inner_attrs = Attribute::parse_inner(&content)?;
let mut buf = proc_macro2::TokenStream::new();
let mut stmts = Vec::new();
while !content.is_empty() {
if let Some(semi) = content.parse::<Option<syn::Token![;]>>()? {
semi.to_tokens(&mut buf);
stmts.push(buf);
buf = proc_macro2::TokenStream::new();
continue;
}
// Parse a single token tree and extend our current buffer with it.
// This avoids parsing the entire content of the sub-tree.
buf.extend([content.parse::<TokenTree>()?]);
}
if !buf.is_empty() {
stmts.push(buf);
}
Ok(Self {
outer_attrs,
vis,
sig,
brace_token,
inner_attrs,
stmts,
})
}
}
struct Body<'a> {
brace_token: syn::token::Brace,
// Statements, with terminating `;`.
stmts: &'a [TokenStream],
}
impl ToTokens for Body<'_> {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
self.brace_token.surround(tokens, |tokens| {
for stmt in self.stmts {
stmt.to_tokens(tokens);
}
})
}
}

489
third_party/rust/tokio-macros/src/lib.rs vendored Normal file
View File

@ -0,0 +1,489 @@
#![allow(clippy::needless_doctest_main)]
#![warn(
missing_debug_implementations,
missing_docs,
rust_2018_idioms,
unreachable_pub
)]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
))]
//! Macros for use with Tokio
// This `extern` is required for older `rustc` versions but newer `rustc`
// versions warn about the unused `extern crate`.
#[allow(unused_extern_crates)]
extern crate proc_macro;
mod entry;
mod select;
use proc_macro::TokenStream;
/// Marks async function to be executed by the selected runtime. This macro
/// helps set up a `Runtime` without requiring the user to use
/// [Runtime](../tokio/runtime/struct.Runtime.html) or
/// [Builder](../tokio/runtime/struct.Builder.html) directly.
///
/// Note: This macro is designed to be simplistic and targets applications that
/// do not require a complex setup. If the provided functionality is not
/// sufficient, you may be interested in using
/// [Builder](../tokio/runtime/struct.Builder.html), which provides a more
/// powerful interface.
///
/// Note: This macro can be used on any function and not just the `main`
/// function. Using it on a non-main function makes the function behave as if it
/// was synchronous by starting a new runtime each time it is called. If the
/// function is called often, it is preferable to create the runtime using the
/// runtime builder so the runtime can be reused across calls.
///
/// # Non-worker async function
///
/// Note that the async function marked with this macro does not run as a
/// worker. The expectation is that other tasks are spawned by the function here.
/// Awaiting on other futures from the function provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi-threaded runtime
///
/// To use the multi-threaded runtime, the macro can be configured using
///
/// ```
/// #[tokio::main(flavor = "multi_thread", worker_threads = 10)]
/// # async fn main() {}
/// ```
///
/// The `worker_threads` option configures the number of worker threads, and
/// defaults to the number of cpus on the system. This is the default flavor.
///
/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature
/// flag.
///
/// # Current thread runtime
///
/// To use the single-threaded runtime known as the `current_thread` runtime,
/// the macro can be configured using
///
/// ```
/// #[tokio::main(flavor = "current_thread")]
/// # async fn main() {}
/// ```
///
/// ## Function arguments:
///
/// Arguments are allowed for any functions aside from `main` which is special
///
/// ## Usage
///
/// ### Using the multi-thread runtime
///
/// ```rust
/// #[tokio::main]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// fn main() {
/// tokio::runtime::Builder::new_multi_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
///
/// ### Using current thread runtime
///
/// The basic scheduler is single-threaded.
///
/// ```rust
/// #[tokio::main(flavor = "current_thread")]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// fn main() {
/// tokio::runtime::Builder::new_current_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
///
/// ### Set number of worker threads
///
/// ```rust
/// #[tokio::main(worker_threads = 2)]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// fn main() {
/// tokio::runtime::Builder::new_multi_thread()
/// .worker_threads(2)
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
///
/// ### Configure the runtime to start with time paused
///
/// ```rust
/// #[tokio::main(flavor = "current_thread", start_paused = true)]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// fn main() {
/// tokio::runtime::Builder::new_current_thread()
/// .enable_all()
/// .start_paused(true)
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
///
/// Note that `start_paused` requires the `test-util` feature to be enabled.
///
/// ### Rename package
///
/// ```rust
/// use tokio as tokio1;
///
/// #[tokio1::main(crate = "tokio1")]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// use tokio as tokio1;
///
/// fn main() {
/// tokio1::runtime::Builder::new_multi_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
entry::main(args.into(), item.into(), true).into()
}
/// Marks async function to be executed by selected runtime. This macro helps set up a `Runtime`
/// without requiring the user to use [Runtime](../tokio/runtime/struct.Runtime.html) or
/// [Builder](../tokio/runtime/struct.builder.html) directly.
///
/// ## Function arguments:
///
/// Arguments are allowed for any functions aside from `main` which is special
///
/// ## Usage
///
/// ### Using default
///
/// ```rust
/// #[tokio::main(flavor = "current_thread")]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// fn main() {
/// tokio::runtime::Builder::new_current_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
///
/// ### Rename package
///
/// ```rust
/// use tokio as tokio1;
///
/// #[tokio1::main(crate = "tokio1")]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
///
/// Equivalent code not using `#[tokio::main]`
///
/// ```rust
/// use tokio as tokio1;
///
/// fn main() {
/// tokio1::runtime::Builder::new_multi_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// println!("Hello world");
/// })
/// }
/// ```
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream {
entry::main(args.into(), item.into(), false).into()
}
/// Marks async function to be executed by runtime, suitable to test environment.
/// This macro helps set up a `Runtime` without requiring the user to use
/// [Runtime](../tokio/runtime/struct.Runtime.html) or
/// [Builder](../tokio/runtime/struct.Builder.html) directly.
///
/// Note: This macro is designed to be simplistic and targets applications that
/// do not require a complex setup. If the provided functionality is not
/// sufficient, you may be interested in using
/// [Builder](../tokio/runtime/struct.Builder.html), which provides a more
/// powerful interface.
///
/// # Multi-threaded runtime
///
/// To use the multi-threaded runtime, the macro can be configured using
///
/// ```no_run
/// #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
///
/// The `worker_threads` option configures the number of worker threads, and
/// defaults to the number of cpus on the system.
///
/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature
/// flag.
///
/// # Current thread runtime
///
/// The default test runtime is single-threaded. Each test gets a
/// separate current-thread runtime.
///
/// ```no_run
/// #[tokio::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
///
/// ## Usage
///
/// ### Using the multi-thread runtime
///
/// ```no_run
/// #[tokio::test(flavor = "multi_thread")]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
///
/// Equivalent code not using `#[tokio::test]`
///
/// ```no_run
/// #[test]
/// fn my_test() {
/// tokio::runtime::Builder::new_multi_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// assert!(true);
/// })
/// }
/// ```
///
/// ### Using current thread runtime
///
/// ```no_run
/// #[tokio::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
///
/// Equivalent code not using `#[tokio::test]`
///
/// ```no_run
/// #[test]
/// fn my_test() {
/// tokio::runtime::Builder::new_current_thread()
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// assert!(true);
/// })
/// }
/// ```
///
/// ### Set number of worker threads
///
/// ```no_run
/// #[tokio::test(flavor ="multi_thread", worker_threads = 2)]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
///
/// Equivalent code not using `#[tokio::test]`
///
/// ```no_run
/// #[test]
/// fn my_test() {
/// tokio::runtime::Builder::new_multi_thread()
/// .worker_threads(2)
/// .enable_all()
/// .build()
/// .unwrap()
/// .block_on(async {
/// assert!(true);
/// })
/// }
/// ```
///
/// ### Configure the runtime to start with time paused
///
/// ```no_run
/// #[tokio::test(start_paused = true)]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
///
/// Equivalent code not using `#[tokio::test]`
///
/// ```no_run
/// #[test]
/// fn my_test() {
/// tokio::runtime::Builder::new_current_thread()
/// .enable_all()
/// .start_paused(true)
/// .build()
/// .unwrap()
/// .block_on(async {
/// assert!(true);
/// })
/// }
/// ```
///
/// Note that `start_paused` requires the `test-util` feature to be enabled.
///
/// ### Rename package
///
/// ```rust
/// use tokio as tokio1;
///
/// #[tokio1::test(crate = "tokio1")]
/// async fn my_test() {
/// println!("Hello world");
/// }
/// ```
#[proc_macro_attribute]
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
entry::test(args.into(), item.into(), true).into()
}
/// Marks async function to be executed by runtime, suitable to test environment
///
/// ## Usage
///
/// ```no_run
/// #[tokio::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test_rt(args: TokenStream, item: TokenStream) -> TokenStream {
entry::test(args.into(), item.into(), false).into()
}
/// Always fails with the error message below.
/// ```text
/// The #[tokio::main] macro requires rt or rt-multi-thread.
/// ```
#[proc_macro_attribute]
pub fn main_fail(_args: TokenStream, _item: TokenStream) -> TokenStream {
syn::Error::new(
proc_macro2::Span::call_site(),
"The #[tokio::main] macro requires rt or rt-multi-thread.",
)
.to_compile_error()
.into()
}
/// Always fails with the error message below.
/// ```text
/// The #[tokio::test] macro requires rt or rt-multi-thread.
/// ```
#[proc_macro_attribute]
pub fn test_fail(_args: TokenStream, _item: TokenStream) -> TokenStream {
syn::Error::new(
proc_macro2::Span::call_site(),
"The #[tokio::test] macro requires rt or rt-multi-thread.",
)
.to_compile_error()
.into()
}
/// Implementation detail of the `select!` macro. This macro is **not** intended
/// to be used as part of the public API and is permitted to change.
#[proc_macro]
#[doc(hidden)]
pub fn select_priv_declare_output_enum(input: TokenStream) -> TokenStream {
select::declare_output_enum(input)
}
/// Implementation detail of the `select!` macro. This macro is **not** intended
/// to be used as part of the public API and is permitted to change.
#[proc_macro]
#[doc(hidden)]
pub fn select_priv_clean_pattern(input: TokenStream) -> TokenStream {
select::clean_pattern_macro(input)
}

View File

@ -0,0 +1,109 @@
use proc_macro::{TokenStream, TokenTree};
use proc_macro2::Span;
use quote::quote;
use syn::{parse::Parser, Ident};
pub(crate) fn declare_output_enum(input: TokenStream) -> TokenStream {
// passed in is: `(_ _ _)` with one `_` per branch
let branches = match input.into_iter().next() {
Some(TokenTree::Group(group)) => group.stream().into_iter().count(),
_ => panic!("unexpected macro input"),
};
let variants = (0..branches)
.map(|num| Ident::new(&format!("_{}", num), Span::call_site()))
.collect::<Vec<_>>();
// Use a bitfield to track which futures completed
let mask = Ident::new(
if branches <= 8 {
"u8"
} else if branches <= 16 {
"u16"
} else if branches <= 32 {
"u32"
} else if branches <= 64 {
"u64"
} else {
panic!("up to 64 branches supported");
},
Span::call_site(),
);
TokenStream::from(quote! {
pub(super) enum Out<#( #variants ),*> {
#( #variants(#variants), )*
// Include a `Disabled` variant signifying that all select branches
// failed to resolve.
Disabled,
}
pub(super) type Mask = #mask;
})
}
pub(crate) fn clean_pattern_macro(input: TokenStream) -> TokenStream {
// If this isn't a pattern, we return the token stream as-is. The select!
// macro is using it in a location requiring a pattern, so an error will be
// emitted there.
let mut input: syn::Pat = match syn::Pat::parse_single.parse(input.clone()) {
Ok(it) => it,
Err(_) => return input,
};
clean_pattern(&mut input);
quote::ToTokens::into_token_stream(input).into()
}
// Removes any occurrences of ref or mut in the provided pattern.
fn clean_pattern(pat: &mut syn::Pat) {
match pat {
syn::Pat::Lit(_literal) => {}
syn::Pat::Macro(_macro) => {}
syn::Pat::Path(_path) => {}
syn::Pat::Range(_range) => {}
syn::Pat::Rest(_rest) => {}
syn::Pat::Verbatim(_tokens) => {}
syn::Pat::Wild(_underscore) => {}
syn::Pat::Ident(ident) => {
ident.by_ref = None;
ident.mutability = None;
if let Some((_at, pat)) = &mut ident.subpat {
clean_pattern(&mut *pat);
}
}
syn::Pat::Or(or) => {
for case in or.cases.iter_mut() {
clean_pattern(case);
}
}
syn::Pat::Slice(slice) => {
for elem in slice.elems.iter_mut() {
clean_pattern(elem);
}
}
syn::Pat::Struct(struct_pat) => {
for field in struct_pat.fields.iter_mut() {
clean_pattern(&mut field.pat);
}
}
syn::Pat::Tuple(tuple) => {
for elem in tuple.elems.iter_mut() {
clean_pattern(elem);
}
}
syn::Pat::TupleStruct(tuple) => {
for elem in tuple.elems.iter_mut() {
clean_pattern(elem);
}
}
syn::Pat::Reference(reference) => {
reference.mutability = None;
clean_pattern(&mut reference.pat);
}
syn::Pat::Type(type_pat) => {
clean_pattern(&mut type_pat.pat);
}
_ => {}
}
}