Bug 1689358 - Generate minidumps for child process crashes using the minidump-writer crate on Android r=glandium,supply-chain-reviewers

Differential Revision: https://phabricator.services.mozilla.com/D178783
This commit is contained in:
Gabriele Svelto 2023-07-21 12:05:38 +00:00
parent 11f3384263
commit 9e6f16b062
104 changed files with 9486 additions and 3058 deletions

View File

@ -110,6 +110,11 @@ git = "https://github.com/mozilla/uniffi-rs.git"
rev = "bc7ff8977bf38d0fdd1a458810b14f434d4dc4de"
replace-with = "vendored-sources"
[source."git+https://github.com/rust-minidump/minidump-writer.git?rev=a15bd5cab6a3de251c0c23264be14b977c0af09c"]
git = "https://github.com/rust-minidump/minidump-writer.git"
rev = "a15bd5cab6a3de251c0c23264be14b977c0af09c"
replace-with = "vendored-sources"
# Take advantage of the fact that cargo will treat lines starting with #
# as comments to add preprocessing directives. This file can thus by copied

60
Cargo.lock generated
View File

@ -321,7 +321,7 @@ dependencies = [
"libc",
"libudev",
"log",
"memoffset",
"memoffset 0.8.999",
"nom",
"nss-gk-api",
"pkcs11-bindings",
@ -930,9 +930,9 @@ dependencies = [
[[package]]
name = "crash-context"
version = "0.6.0"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d433b84b88830c0c253292a52fe43bd3f385668b6a39a84ce291e6e7db52724"
checksum = "b85cef661eeca0c6675116310936972c520ebb0a33ddef16fd7efc957f4c1288"
dependencies = [
"cfg-if 1.0.0",
"libc",
@ -978,7 +978,7 @@ dependencies = [
"autocfg",
"cfg-if 1.0.0",
"crossbeam-utils",
"memoffset",
"memoffset 0.8.999",
"scopeguard",
]
@ -2294,9 +2294,16 @@ dependencies = [
[[package]]
name = "goblin"
version = "0.6.0"
version = "0.6.999"
dependencies = [
"goblin 0.7.1",
]
[[package]]
name = "goblin"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "572564d6cba7d09775202c8e7eebc4d534d5ae36578ab402fb21e182a0ac9505"
checksum = "f27c1b4369c2cd341b5de549380158b105a04c331be5db9110eef7b6d2742134"
dependencies = [
"log",
"plain",
@ -3173,9 +3180,16 @@ dependencies = [
[[package]]
name = "memoffset"
version = "0.8.0"
version = "0.8.999"
dependencies = [
"memoffset 0.9.0",
]
[[package]]
name = "memoffset"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
dependencies = [
"autocfg",
]
@ -3246,9 +3260,9 @@ dependencies = [
[[package]]
name = "minidump-common"
version = "0.15.2"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "694717103b2c15f8c16ddfaec1333fe15673bc22b10ffa6164427415701974ba"
checksum = "9114b15d86ee5e5c3e3b4d05821d17237adbf98c11dd07fc8f5a9b037a010ee5"
dependencies = [
"bitflags 1.3.2",
"debugid",
@ -3261,21 +3275,21 @@ dependencies = [
[[package]]
name = "minidump-writer"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8109e02c3cf4a587eea0bec18ccdfecc9041f91b5ebffa223b1e692c9a223c26"
version = "0.8.1"
source = "git+https://github.com/rust-minidump/minidump-writer.git?rev=a15bd5cab6a3de251c0c23264be14b977c0af09c#a15bd5cab6a3de251c0c23264be14b977c0af09c"
dependencies = [
"bitflags 2.999.999",
"byteorder",
"cfg-if 1.0.0",
"crash-context",
"goblin",
"goblin 0.7.1",
"libc",
"mach2",
"memmap2",
"memoffset",
"memoffset 0.9.0",
"minidump-common",
"nix 0.26.2",
"procfs-core",
"scroll",
"tempfile",
"thiserror",
@ -3390,10 +3404,10 @@ dependencies = [
name = "mozannotation_server"
version = "0.1.0"
dependencies = [
"goblin",
"goblin 0.7.1",
"libc",
"mach2",
"memoffset",
"memoffset 0.8.999",
"mozannotation_client",
"nsstring",
"thin-vec",
@ -4196,6 +4210,16 @@ dependencies = [
"xpcom",
]
[[package]]
name = "procfs-core"
version = "0.16.0-RC1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ee00a90a41543fce203e6a8771bad043bfd6d88de8fd4e3118435a233d0c3c4"
dependencies = [
"bitflags 2.999.999",
"hex",
]
[[package]]
name = "profiler-macros"
version = "0.1.0"
@ -5629,7 +5653,7 @@ dependencies = [
"camino",
"fs-err",
"glob",
"goblin",
"goblin 0.6.999",
"heck",
"once_cell",
"paste",

View File

@ -43,7 +43,7 @@ exclude = [
"dom/origin-trials/ffi",
# Excluded because we don't want to vendor their dependencies.
"intl/l10n/rust/l10nregistry-tests"
"intl/l10n/rust/l10nregistry-tests",
]
# Use the new dependency resolver to reduce some of the platform-specific dependencies.
@ -150,6 +150,13 @@ nix = { path = "build/rust/nix" }
# Patch autocfg to hide rustc output. Workaround for https://github.com/cuviper/autocfg/issues/30
autocfg = { path = "third_party/rust/autocfg" }
# Patch goblin 0.6.0 to 0.7.0 because uniffi crates still use the older version
# and we want to avoid duplications
goblin = { path = "build/rust/goblin" }
# Patch memoffset from 0.8.0 to 0.9.0 since it's compatible and it avoids duplication
memoffset = { path = "build/rust/memoffset" }
# The following overrides point to dummy projects, as a temporary measure until this is resolved:
# https://github.com/rust-lang/cargo/issues/6179
js-sys = { path = "build/rust/dummy-web/js-sys" }
@ -167,14 +174,16 @@ moz_asserts = { path = "mozglue/static/rust/moz_asserts" }
rure = { path = "third_party/rust/rure" }
# Other overrides
chardetng = { git = "https://github.com/hsivonen/chardetng", rev="3484d3e3ebdc8931493aa5df4d7ee9360a90e76b" }
chardetng_c = { git = "https://github.com/hsivonen/chardetng_c", rev="ed8a4c6f900a90d4dbc1d64b856e61490a1c3570" }
coremidi = { git = "https://github.com/chris-zen/coremidi.git", rev="fc68464b5445caf111e41f643a2e69ccce0b4f83" }
chardetng = { git = "https://github.com/hsivonen/chardetng", rev = "3484d3e3ebdc8931493aa5df4d7ee9360a90e76b" }
chardetng_c = { git = "https://github.com/hsivonen/chardetng_c", rev = "ed8a4c6f900a90d4dbc1d64b856e61490a1c3570" }
coremidi = { git = "https://github.com/chris-zen/coremidi.git", rev = "fc68464b5445caf111e41f643a2e69ccce0b4f83" }
cose = { git = "https://github.com/franziskuskiefer/cose-rust", rev = "43c22248d136c8b38fe42ea709d08da6355cf04b" }
firefox-on-glean = { path = "toolkit/components/glean/api" }
libudev-sys = { path = "dom/webauthn/libudev-sys" }
packed_simd = { package = "packed_simd_2", git = "https://github.com/hsivonen/packed_simd", rev="412f9a0aa556611de021bde89dee8fefe6e0fbbd" }
packed_simd = { package = "packed_simd_2", git = "https://github.com/hsivonen/packed_simd", rev = "412f9a0aa556611de021bde89dee8fefe6e0fbbd" }
midir = { git = "https://github.com/mozilla/midir.git", rev = "519e651241e867af3391db08f9ae6400bc023e18" }
# Avoid the dependency on chrono
minidump-writer = { git = "https://github.com/rust-minidump/minidump-writer.git", rev = "a15bd5cab6a3de251c0c23264be14b977c0af09c" }
# warp 0.3.3 + https://github.com/seanmonstar/warp/pull/1007
warp = { git = "https://github.com/glandium/warp", rev = "4af45fae95bc98b0eba1ef0db17e1dac471bb23d" }

View File

@ -0,0 +1,11 @@
[package]
name = "goblin"
version = "0.6.999"
edition = "2018"
license = "MIT/Apache-2.0"
[lib]
path = "lib.rs"
[dependencies.goblin]
version = "0.7.0"

11
build/rust/goblin/lib.rs Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use goblin::*;

View File

@ -0,0 +1,11 @@
[package]
name = "memoffset"
version = "0.8.999"
edition = "2018"
license = "MIT/Apache-2.0"
[lib]
path = "lib.rs"
[dependencies.memoffset]
version = "0.9"

View File

@ -0,0 +1,11 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use memoffset::*;

View File

@ -797,6 +797,11 @@ manually-generated bindings (which are minimal). The few small bugfixes are
sound.
"""
[[audits.crash-context]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.6.0 -> 0.6.1"
[[audits.crossbeam-channel]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
@ -1602,6 +1607,11 @@ criteria = "safe-to-deploy"
delta = "0.5.4 -> 0.6.0"
notes = "Mostly bug fixes and some added functionality"
[[audits.goblin]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.6.0 -> 0.7.1"
[[audits.gpu-alloc]]
who = "Teodor Tanasoaia <ttanasoaia@mozilla.com>"
criteria = "safe-to-deploy"
@ -1900,6 +1910,11 @@ who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.6.5 -> 0.7.1"
[[audits.memoffset]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.8.0 -> 0.9.0"
[[audits.metal]]
who = "Jim Blandy <jimb@red-bean.com>"
criteria = "safe-to-deploy"
@ -1922,28 +1937,33 @@ criteria = "safe-to-deploy"
version = "0.15.2"
notes = "The code in this crate was written or reviewed by Mozilla employees."
[[audits.minidump-common]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.15.2 -> 0.17.0"
[[audits.minidump-writer]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
version = "0.7.0"
notes = "The code in this crate was written or reviewed by Mozilla employees, the crate it evolved from was written specifically for gecko."
[[audits.minidump-writer]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
delta = "0.7.0 -> 0.7.0@git:59179c83ba62e4378619c6967c0b8c0c077cac2d"
[[audits.minidump-writer]]
who = "Bobby Holley <bobbyholley@gmail.com>"
criteria = "safe-to-deploy"
delta = "0.7.0 -> 0.7.0@git:7d76616d27b9dc87fe3a94639b8b4f947d52a6aa"
[[audits.minidump-writer]]
who = "Alex Franchuk <afranchuk@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.7.0 -> 0.8.0"
notes = "The code in this crate was written or reviewed by Mozilla employees, the crate it evolved from was written specifically for gecko."
[[audits.minidump-writer]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.8.0 -> 0.8.1"
[[audits.minidump-writer]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.8.1 -> 0.8.1@git:a15bd5cab6a3de251c0c23264be14b977c0af09c"
[[audits.miniz_oxide]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
@ -2380,6 +2400,11 @@ who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
delta = "1.0.49 -> 1.0.51"
[[audits.procfs-core]]
who = "Gabriele Svelto <gsvelto@mozilla.com>"
criteria = "safe-to-deploy"
version = "0.16.0-RC1"
[[audits.profiling]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"

View File

@ -87,6 +87,10 @@ audit-as-crates-io = false
audit-as-crates-io = true
notes = "This is a pinned version of the upstream code, presumably to get a fix that hadn't been released yet. We should consider switching to the latest official release."
[policy.minidump-writer]
audit-as-crates-io = true
notes = "Unreleased upstream."
[policy."mio:0.6.23"]
audit-as-crates-io = true
notes = "Version 0.6.23 is a local fork of upstream which just twiddles some dependencies."

View File

@ -1 +1 @@
{"files":{"CHANGELOG.md":"5108348ce76a50393b51b3711f77910f419fd782eaf5b5aff0c6650c50ad65c4","Cargo.toml":"d116b62ca2aec61c49001c19a37d5768407ee3d31fd8b8835ab8e9565e25b884","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"090a294a492ab2f41388252312a65cf2f0e423330b721a68c6665ac64766753b","README.md":"5df5b51de9b86b2e724954224209463a48f8549fd023bcb10c1d9cecc754fff2","release.toml":"287ba3b6c89e3b289eae59827d36d6eb6e27b88cc2ada2c0d9a663c8b487117e","src/lib.rs":"26957a6a2555ab82aa9b6d3d1f24efaf20753d6c5eb1510395789283890ac1d1","src/linux.rs":"cf05c1217709a60adeea08e8623438f68a18dea66758b194de0e07ff398b090d","src/linux/getcontext.rs":"4164236732556d71cbb9e04bf4f2b41fd6f51f9bb94dfb974158cc5f49c3c789","src/linux/getcontext/aarch64.rs":"1193e68f06f7f2f4d3e64d80a196804e6cdfd03643ac50332c7af10928a5eccb","src/linux/getcontext/arm.rs":"682f163f4a96c21930e37427a6d687efc68199cbd8a9125b34d99a81280dd31b","src/linux/getcontext/x86.rs":"9f83062e99204d6ed24001be4d3b0d39974ab7a644003fb503795fcdf9316e87","src/linux/getcontext/x86_64.rs":"278ee4e5c64230da96c018ae2c539d1e3203f0ad4c9a9750044c2f88708f091e","src/mac.rs":"13d25443466d387eabf28adae361708f4b6297949c7eeb5bf1b38cb0ca13a418","src/mac/guard.rs":"115d1e8d5ac7bd9ecc666b11a0c584ed1e997160aacb0a1cc0f215ff5a1e9803","src/mac/ipc.rs":"2fc139ee5b70964bd726a30853d7fe9f74f7a6e0f8cf3d150e72a2ac802c1fba","src/mac/resource.rs":"8289db9294a45d6148329d537530512913c456a182783059a832767e39c67295","src/windows.rs":"c6c043cf56cf0840cc1373edc4bd39cf829566d181e50589174745629ab2ad37","tests/capture_context.rs":"899e94c522cd015fd1f45230aff5c8970346ba20623da46cd34da892bbd07f7e"},"package":"6d433b84b88830c0c253292a52fe43bd3f385668b6a39a84ce291e6e7db52724"}
{"files":{"CHANGELOG.md":"843168dabb93f6bc42076d4924040cd8c8a268f03cb02856b55691b5e9a873e1","Cargo.toml":"94c36b790e888c511e093c2b20ade75157daa86fad994d508d071bd4c049223a","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"090a294a492ab2f41388252312a65cf2f0e423330b721a68c6665ac64766753b","README.md":"f91ee04dfaa356214af7ac68e4cfa1d6c8674f7fea08d65275f3fafde28301df","release.toml":"287ba3b6c89e3b289eae59827d36d6eb6e27b88cc2ada2c0d9a663c8b487117e","src/lib.rs":"26957a6a2555ab82aa9b6d3d1f24efaf20753d6c5eb1510395789283890ac1d1","src/linux.rs":"cf05c1217709a60adeea08e8623438f68a18dea66758b194de0e07ff398b090d","src/linux/getcontext.rs":"4164236732556d71cbb9e04bf4f2b41fd6f51f9bb94dfb974158cc5f49c3c789","src/linux/getcontext/aarch64.rs":"1193e68f06f7f2f4d3e64d80a196804e6cdfd03643ac50332c7af10928a5eccb","src/linux/getcontext/arm.rs":"682f163f4a96c21930e37427a6d687efc68199cbd8a9125b34d99a81280dd31b","src/linux/getcontext/x86.rs":"7c585ec44835910f99801cbb3ac34153e8d687b5dcbc682f9b7768873655c4a0","src/linux/getcontext/x86_64.rs":"db63a1c05e2c7c5b998f3c57b399972fc4e756eb36bcd119a99419c94470444a","src/mac.rs":"13d25443466d387eabf28adae361708f4b6297949c7eeb5bf1b38cb0ca13a418","src/mac/guard.rs":"115d1e8d5ac7bd9ecc666b11a0c584ed1e997160aacb0a1cc0f215ff5a1e9803","src/mac/ipc.rs":"2fc139ee5b70964bd726a30853d7fe9f74f7a6e0f8cf3d150e72a2ac802c1fba","src/mac/resource.rs":"8289db9294a45d6148329d537530512913c456a182783059a832767e39c67295","src/windows.rs":"c6c043cf56cf0840cc1373edc4bd39cf829566d181e50589174745629ab2ad37","tests/capture_context.rs":"899e94c522cd015fd1f45230aff5c8970346ba20623da46cd34da892bbd07f7e"},"package":"b85cef661eeca0c6675116310936972c520ebb0a33ddef16fd7efc957f4c1288"}

View File

@ -9,11 +9,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<!-- next-header -->
## [Unreleased] - ReleaseDate
## [0.6.1] - 2023-06-19
### Added
- [PR#76](https://github.com/EmbarkStudios/crash-handling/pull/76) added support for `i686-linux-android` and `x86_64-linux-android`. Thanks [@gabrielesvelto](https://github.com/gabrielesvelto)!
## [0.6.0] - 2023-04-03
### Changed
- [PR#70](https://github.com/EmbarkStudios/crash-handling/pull/70) removed the `winapi` dependency in favor of embedded bindings to avoid dependencies.
- [PR#70](https://github.com/EmbarkStudios/crash-handling/pull/70) removed the asm implementations for Windows CPU context retrieval in favor of using `RtlCaptureContext`. This means that floating state is not captured, but is otherwise and improvement.
### Added
- [PR#68](https://github.com/EmbarkStudios/crash-handling/pull/68) added capture context support for x86 Windows, but this change was supplanted in [PR#70](https://github.com/EmbarkStudios/crash-handling/pull/70) to use `RtlCaptureContext` instead.
### Fixed
- [PR#71](https://github.com/EmbarkStudios/crash-handling/pull/71) fixed the definition of `mcontext_t` for `i686-unknow-linux`. Thanks [@afranchuk](https://github.com/afranchuk)!
## [0.5.1] - 2022-11-17
### Fixed
- [PR#66](https://github.com/EmbarkStudios/crash-handling/pull/66) (apparently) resolved [#65](https://github.com/EmbarkStudios/crash-handling/issues/65) by...changing from AT&T to Intel syntax. This shouldn't have changed anything, but it did, and I'm too tired and have other things to work on, so here we are.
@ -58,7 +68,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Initial pass of crash-context, Linux only
<!-- next-url -->
[Unreleased]: https://github.com/EmbarkStudios/crash-handling/compare/crash-context-0.6.0...HEAD
[Unreleased]: https://github.com/EmbarkStudios/crash-handling/compare/crash-context-0.6.1...HEAD
[0.6.1]: https://github.com/EmbarkStudios/crash-handling/compare/crash-context-0.6.0...crash-context-0.6.1
[0.6.0]: https://github.com/EmbarkStudios/crash-handling/compare/crash-context-0.5.1...crash-context-0.6.0
[0.5.1]: https://github.com/EmbarkStudios/crash-handling/compare/crash-context-0.5.0...crash-context-0.5.1
[0.5.0]: https://github.com/EmbarkStudios/crash-handling/compare/crash-context-0.4.0...crash-context-0.5.0

View File

@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.59.0"
name = "crash-context"
version = "0.6.0"
version = "0.6.1"
authors = [
"Embark <opensource@embark-studios.com>",
"Jake Shadle <jake.shadle@embark-studios.com>",

View File

@ -21,19 +21,21 @@
## Supported targets
- `x86_64-unknown-linux-gnu`
- `x86_64-unknown-linux-musl`
- `i686-unknown-linux-gnu`
- `i686-unknown-linux-musl`
- `aarch64-android-linux`
- `aarch64-apple-darwin`
- `aarch64-unknown-linux-gnu`
- `aarch64-unknown-linux-musl`
- `aarch64-android-linux`
- `arm-linux-androideabi`
- `arm-unknown-linux-gnueabi`
- `arm-unknown-linux-musleabi`
- `x86_64-pc-windows-msvc`
- `i686-linux-android`
- `i686-unknown-linux-gnu`
- `i686-unknown-linux-musl`
- `x86_64-apple-darwin`
- `aarch64-apple-darwin`
- `x86_64-linux-android`
- `x86_64-pc-windows-msvc`
- `x86_64-unknown-linux-gnu`
- `x86_64-unknown-linux-musl`
## Contribution

View File

@ -1,53 +1,68 @@
#[cfg(target_os = "android")]
compile_error!("please file an issue if you care about this target");
// Unfortunately, the asm! macro has a few really annoying limitations at the
// moment
//
// 1. const operands are unstable
// 2. cfg attributes can't be used inside the asm macro at all
//
// and the worst part is we need it for literally only 1 thing, using a different
// offset to the fpstate in ucontext depending on whether we are targeting android
// or not :(
macro_rules! asm_func {
($offset:expr) => {
std::arch::global_asm! {
".text",
".global crash_context_getcontext",
".hidden crash_context_getcontext",
".align 4",
".type crash_context_getcontext, @function",
"crash_context_getcontext:",
"movl 4(%esp), %eax", // eax = uc
std::arch::global_asm! {
".text",
".global crash_context_getcontext",
".hidden crash_context_getcontext",
".align 4",
".type crash_context_getcontext, @function",
"crash_context_getcontext:",
"movl 4(%esp), %eax", // eax = uc
// Save register values
"movl %ecx, 0x3c(%eax)",
"movl %edx, 0x38(%eax)",
"movl %ebx, 0x34(%eax)",
"movl %edi, 0x24(%eax)",
"movl %esi, 0x28(%eax)",
"movl %ebp, 0x2c(%eax)",
// Save register values
"movl %ecx, 0x3c(%eax)",
"movl %edx, 0x38(%eax)",
"movl %ebx, 0x34(%eax)",
"movl %edi, 0x24(%eax)",
"movl %esi, 0x28(%eax)",
"movl %ebp, 0x2c(%eax)",
"movl (%esp), %edx", /* return address */
"lea 4(%esp), %ecx", /* exclude return address from stack */
"mov %edx, 0x4c(%eax)",
"mov %ecx, 0x30(%eax)",
"movl (%esp), %edx", /* return address */
"lea 4(%esp), %ecx", /* exclude return address from stack */
"mov %edx, 0x4c(%eax)",
"mov %ecx, 0x30(%eax)",
"xorl %ecx, %ecx",
"movw %fs, %cx",
"mov %ecx, 0x18(%eax)",
"xorl %ecx, %ecx",
"movw %fs, %cx",
"mov %ecx, 0x18(%eax)",
"movl $0, 0x40(%eax)",
"movl $0, 0x40(%eax)",
// Save floating point state to fpregstate, then update
// the fpregs pointer to point to it
stringify!(leal $offset(%eax),%ecx),
"fnstenv (%ecx)",
"fldenv (%ecx)",
"mov %ecx, 0x60(%eax)",
// Save floating point state to fpregstate, then update
// the fpregs pointer to point to it
"leal 0xec(%eax), %ecx",
"fnstenv (%ecx)",
"fldenv (%ecx)",
"mov %ecx, 0x60(%eax)",
// Save signal mask: sigprocmask(SIGBLOCK, NULL, &uc->uc_sigmask)
"leal 0x6c(%eax), %edx",
"xorl %ecx, %ecx",
"push %edx", /* &uc->uc_sigmask */
"push %ecx", /* NULL */
"push %ecx", /* SIGBLOCK == 0 on i386 */
"call sigprocmask@PLT",
"addl $12, %esp",
// Save signal mask: sigprocmask(SIGBLOCK, NULL, &uc->uc_sigmask)
"leal 0x6c(%eax), %edx",
"xorl %ecx, %ecx",
"push %edx", /* &uc->uc_sigmask */
"push %ecx", /* NULL */
"push %ecx", /* SIGBLOCK == 0 on i386 */
"call sigprocmask@PLT",
"addl $12, %esp",
"movl $0, %eax",
"ret",
"movl $0, %eax",
"ret",
".size crash_context_getcontext, . - crash_context_getcontext",
options(att_syntax)
".size crash_context_getcontext, . - crash_context_getcontext",
options(att_syntax)
}
};
}
#[cfg(target_os = "linux")]
asm_func!(0xec);
#[cfg(target_os = "android")]
asm_func!(0x74);

View File

@ -32,7 +32,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
// adding certain lines/blocks of asm based using cfg https://github.com/rust-lang/rust/issues/15701
// and they're not really inputs, just literals, so...yah
#[cfg(target_os = "linux")]
#[cfg(any(target_os = "linux", target_os = "android"))]
// Unfortunately, the asm! macro has a few really annoying limitations at the
// moment

View File

@ -1 +1 @@
{"files":{"CHANGELOG.md":"45e187ea086d30738d40ecd0a893326d0acf416d754284786190b9b9eb60e2f6","Cargo.toml":"ba0aff6d6cbeddd63473b7ffe5e979b4a0d956d1c2271422a67f451555ea5f4e","LICENSE":"036bf6b6d6fd6dd1abda2ff6cdb672a63bdf32c468048720072910f2268a965f","README.md":"c5854ea5d60a3e3a0e89e9ed04b6b3653efe6d7e941894a734888abd52bf9212","src/archive/mod.rs":"ae739638d7267011bedf51712516d3485171d8f2df2ab6746a0d942d86efd6a6","src/elf/compression_header.rs":"2eb5fdda9177c1c897310d86714967de019b39c6e23b1f3a890dd3a659be0acc","src/elf/constants_header.rs":"f2ede290ecacf60b1719e9994aa45612bf0f7baf63806a293d4530a674e5861a","src/elf/constants_relocation.rs":"a010071cd2a25ab71e0c7181eb1d9f417daa2d1ec25a09c74bd12ad944892225","src/elf/dynamic.rs":"c26e75311f2da9e34dc4c0a2120dfcc20df88a41d67c52b9bf703258de018fd8","src/elf/gnu_hash.rs":"7a9fcaf6cb38167d20527364bdf9bc2379c44dede5d7666275a1eb20dc665179","src/elf/header.rs":"3391a1fa9b8e3923f7ce74caff0668d8ddb5b34767bf3da309ff497fd81c34c7","src/elf/mod.rs":"d2ecb62524d7da16a0f46b4b8fa43929839d6897ca93c9bb8f5299d76d66a977","src/elf/note.rs":"3c354f1108fa8e5a69f6cf629a36b61241a321f235a429d9e9900aa7a4c02f46","src/elf/program_header.rs":"7d934c7c761fc73b1a30a8fc2b048d98511b529dd5429cb0848b4bdd5dcdade7","src/elf/reloc.rs":"a4b7843c0e201f83c344c0681dfd285754a7211e3472ddd53024a520e17c847f","src/elf/section_header.rs":"ca52e85f6a0e10177368f2790d41bc9ae3348216bbd4393d483f0646030e1cc7","src/elf/sym.rs":"045c01107f4e100d6827cb819b82a28ea10c0d9bc00a1cdddb04a0865f1162ec","src/elf/symver.rs":"3f899201f64a702653d44288f860003e7acd75e38111d36479af823ed92b1341","src/error.rs":"af620a5692bca070dc727d49cdbb566a533bfb97724ca68932ae7fec7dc05cf6","src/lib.rs":"465eb53b540dfd142d204984ee7280130542d7f83d6c53691299d773f7394faf","src/mach/bind_opcodes.rs":"1dcacfb853d05c2c7e6dbb4509ee705a8ea645db0d334991a2293fef92eee851","src/mach/constants.rs":"c2a2381a0b9c3047d37582465e8965d995dca414d0da21fb7bcc6b8334e49eb6","src/mach/exports.rs":"d22122744673a3ce5f54b2b4b20bfa47d17378e64d3dda2858dd13add74ed3dc","src/mach/fat.rs":"45a3228aaa1ab8b77f322dd4924b7383f1357e226ffc079846d67c0268389ea7","src/mach/header.rs":"ae15265e9f1a92eb9ba04333e5bb309f276e1300b87f43386f7829e820318938","src/mach/imports.rs":"2153269dfff32e23d72f76a82d658be06bd79b7e35d79b7e17115e4eb24b13d5","src/mach/load_command.rs":"6435666c46e875610375b92b1ab1e3fdc9f6048d51728d996dd4531329eb3d39","src/mach/mod.rs":"53ad219fd2265a5689ab38d5031722268eab6bbb649c75756e74295df4b611b7","src/mach/relocation.rs":"11b0b76ed7d997c87e396100515f931fe84473c228bed0e980fbab311530070a","src/mach/segment.rs":"0dc29bf42b25f60c7258bc8b757f6a862e846582dd6d2e70737933ad6334a0e4","src/mach/symbols.rs":"d2505fa8d65ea267abfcb6a9fc4d1acd47d5605aa6775935757e2fa8e92af507","src/pe/characteristic.rs":"6f810a6e5646b922cf7e3ca6d314677a4e1e1ad5695278c2b1b527a05f4299f3","src/pe/data_directories.rs":"d4e156f0c5b509860ceb3c7d42e1621e6c2143b90fc412806b3cefab1acc577a","src/pe/debug.rs":"3811c616a9b6d6b54e15348bb369b794bb89532e04fe19eca91b745d7c51a553","src/pe/exception.rs":"de2c9c07812ecd315c8400fc8fdcadc6a44d7a8be96e69a3f4ccf14ef8cf8426","src/pe/export.rs":"c98f5ce0b1b18bb87f06d1d41dbf70f443d65ecb1624cb23a1ef6c5f93a892e1","src/pe/header.rs":"f02a4beddc00ddd6624df7defc42991ceb507360b5aa1003cf33332c1c89a743","src/pe/import.rs":"855276e46c01ccd7631104e4d1265592e36c9468aadcacc937a40c29d94aabe3","src/pe/mod.rs":"f43524ceb77ad263a094eea038f81f010fc6f1de1d144f24218ee3224abab0fd","src/pe/optional_header.rs":"f3fb9aec04fccde3b765ec3d54cb27bfe636efceb94ddbe34e88098e28f56b55","src/pe/options.rs":"b38f4e87f13ae381712621786f89e931452b2b4857a7bb6f140c4c21a63aa652","src/pe/relocation.rs":"c479b80bb1d6910f2168505dda4f2d8925b7edc34bed4e25d069546f88f52bb3","src/pe/section_table.rs":"d7144c7be3242d7aa653d22dca1cf15f7110f79a946a15cbe6ecf531e0cacb19","src/pe/symbol.rs":"9a65226c93c4499e21d094ceb838d58db706951580a1c43dfb36b95dbaff70f0","src/pe/utils.rs":"907cc565030db20f694c72a2a9b89ec8038e4f2593e6233b65d2a6854738e6d1","src/strtab.rs":"6d122084cf5d5244b2bd734b1d6d2c018116cc537ffc0c81d042d5b8815d7782","tests/bins/elf/gnu_hash/README.md":"52581e2ea7067a55bd8aedf4079200fb76448573ae9ffef7d886b9556e980db9"},"package":"572564d6cba7d09775202c8e7eebc4d534d5ae36578ab402fb21e182a0ac9505"}
{"files":{"CHANGELOG.md":"ade9f25d4bd1545f2ff2661d6a1301fe228cf2551a9cb27fcaa17c8119b73c8b","Cargo.toml":"09b271ef4ee3491cb1f6309cef8b60471b960c057c6e57fc90ed579adcc57453","LICENSE":"036bf6b6d6fd6dd1abda2ff6cdb672a63bdf32c468048720072910f2268a965f","README.md":"c09b08f3d5e7e33c4a8fd647708d313ee2ba98b165a1d077fb90f280dcb4da31","src/archive/mod.rs":"ae739638d7267011bedf51712516d3485171d8f2df2ab6746a0d942d86efd6a6","src/elf/compression_header.rs":"2eb5fdda9177c1c897310d86714967de019b39c6e23b1f3a890dd3a659be0acc","src/elf/constants_header.rs":"f2ede290ecacf60b1719e9994aa45612bf0f7baf63806a293d4530a674e5861a","src/elf/constants_relocation.rs":"a010071cd2a25ab71e0c7181eb1d9f417daa2d1ec25a09c74bd12ad944892225","src/elf/dynamic.rs":"c26e75311f2da9e34dc4c0a2120dfcc20df88a41d67c52b9bf703258de018fd8","src/elf/gnu_hash.rs":"7a9fcaf6cb38167d20527364bdf9bc2379c44dede5d7666275a1eb20dc665179","src/elf/header.rs":"3391a1fa9b8e3923f7ce74caff0668d8ddb5b34767bf3da309ff497fd81c34c7","src/elf/mod.rs":"2ee0faa0917deb5e90ca60e9c852434745a4c7f553e609e9603a57b7d55b739f","src/elf/note.rs":"bf5e45e2697f7700d5adbb52f890ea4c63b70b7077ca0e7c751420bb92923529","src/elf/program_header.rs":"4c322eb124c4e2bdeec4915067d2bb11fe9e7fba1811dc351a3f7581df121da0","src/elf/reloc.rs":"8b29162055b2846342b49e5e9e0a1482786fb92b4787bb9eb1c6d04f38b94e87","src/elf/section_header.rs":"f55f4d263f618bd1dec76ff0483f3b2dc3791c8e5c5c2b6ff296a5bc26001666","src/elf/sym.rs":"045c01107f4e100d6827cb819b82a28ea10c0d9bc00a1cdddb04a0865f1162ec","src/elf/symver.rs":"3f899201f64a702653d44288f860003e7acd75e38111d36479af823ed92b1341","src/error.rs":"af620a5692bca070dc727d49cdbb566a533bfb97724ca68932ae7fec7dc05cf6","src/lib.rs":"465eb53b540dfd142d204984ee7280130542d7f83d6c53691299d773f7394faf","src/mach/bind_opcodes.rs":"1dcacfb853d05c2c7e6dbb4509ee705a8ea645db0d334991a2293fef92eee851","src/mach/constants.rs":"c2a2381a0b9c3047d37582465e8965d995dca414d0da21fb7bcc6b8334e49eb6","src/mach/exports.rs":"d22122744673a3ce5f54b2b4b20bfa47d17378e64d3dda2858dd13add74ed3dc","src/mach/fat.rs":"45a3228aaa1ab8b77f322dd4924b7383f1357e226ffc079846d67c0268389ea7","src/mach/header.rs":"006619188f51fa43051dc04aa4b2ecd5f89136cf05cb6a7b23a228228008e6ae","src/mach/imports.rs":"2153269dfff32e23d72f76a82d658be06bd79b7e35d79b7e17115e4eb24b13d5","src/mach/load_command.rs":"0a689e774ae96212666165909c026037f22a3c4e3645250b9bae60c957d50ca4","src/mach/mod.rs":"53ad219fd2265a5689ab38d5031722268eab6bbb649c75756e74295df4b611b7","src/mach/relocation.rs":"11b0b76ed7d997c87e396100515f931fe84473c228bed0e980fbab311530070a","src/mach/segment.rs":"0dc29bf42b25f60c7258bc8b757f6a862e846582dd6d2e70737933ad6334a0e4","src/mach/symbols.rs":"d2505fa8d65ea267abfcb6a9fc4d1acd47d5605aa6775935757e2fa8e92af507","src/pe/authenticode.rs":"c3df9266c4f0a865e0da4b10fa1494eca083953fc4ded0b707b547a7d4ef296a","src/pe/certificate_table.rs":"75ab5dce6bc0c28d3687a5c119c0fa0d00e4796c8959a32d9d208f2369273c50","src/pe/characteristic.rs":"6f810a6e5646b922cf7e3ca6d314677a4e1e1ad5695278c2b1b527a05f4299f3","src/pe/data_directories.rs":"d4e156f0c5b509860ceb3c7d42e1621e6c2143b90fc412806b3cefab1acc577a","src/pe/debug.rs":"3811c616a9b6d6b54e15348bb369b794bb89532e04fe19eca91b745d7c51a553","src/pe/exception.rs":"de2c9c07812ecd315c8400fc8fdcadc6a44d7a8be96e69a3f4ccf14ef8cf8426","src/pe/export.rs":"c98f5ce0b1b18bb87f06d1d41dbf70f443d65ecb1624cb23a1ef6c5f93a892e1","src/pe/header.rs":"f02a4beddc00ddd6624df7defc42991ceb507360b5aa1003cf33332c1c89a743","src/pe/import.rs":"855276e46c01ccd7631104e4d1265592e36c9468aadcacc937a40c29d94aabe3","src/pe/mod.rs":"ec958ee9a717672dec7b56d9d7d33e444c37eb781f299a920a60eb7fa39ef7a1","src/pe/optional_header.rs":"4fd94187fb343756817f23ccc58ec035a1b462b69457c706d9e2f11225d0cb1c","src/pe/options.rs":"b38f4e87f13ae381712621786f89e931452b2b4857a7bb6f140c4c21a63aa652","src/pe/relocation.rs":"c479b80bb1d6910f2168505dda4f2d8925b7edc34bed4e25d069546f88f52bb3","src/pe/section_table.rs":"d7144c7be3242d7aa653d22dca1cf15f7110f79a946a15cbe6ecf531e0cacb19","src/pe/symbol.rs":"9a65226c93c4499e21d094ceb838d58db706951580a1c43dfb36b95dbaff70f0","src/pe/utils.rs":"88e1cd9114c5d4ad58a09c39b312689de20ddd7382654ec660b00424f5c3129c","src/strtab.rs":"6d122084cf5d5244b2bd734b1d6d2c018116cc537ffc0c81d042d5b8815d7782","tests/bins/elf/gnu_hash/README.md":"52581e2ea7067a55bd8aedf4079200fb76448573ae9ffef7d886b9556e980db9"},"package":"f27c1b4369c2cd341b5de549380158b105a04c331be5db9110eef7b6d2742134"}

View File

@ -3,7 +3,28 @@ All notable changes to this project will be documented in this file.
Before 1.0, this project does not adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
Goblin is now 0.5, which means we will try our best to ease breaking changes. Tracking issue is here: https://github.com/m4b/goblin/issues/97
Goblin is now 0.7, which means we will try our best to ease breaking changes. Tracking issue is here: https://github.com/m4b/goblin/issues/97
## [0.7.0] - unreleased
### Breaking
mach: Implement `LC_NOTE`, (breakage=load commands are marked non-exhaustive), thanks @messense: https://github.com/m4b/goblin/pull/342
### Fixed
elf: fix is_lib detection, thanks @m-hilgendorf: https://github.com/m4b/goblin/pull/366
pe: fix out of bounds access while parsing AttributeCertificate, thanks @anfedotoff: https://github.com/m4b/goblin/pull/368
### Added
pe: support basic certificates enumeration, thanks @RaitoBezarius: https://github.com/m4b/goblin/pull/354
pe: fix certificate tables parsing, thanks @baloo: https://github.com/m4b/goblin/pull/359
pe: add pe authenticode support, thanks @baloo: https://github.com/m4b/goblin/pull/362
mach: implement `LC_FILESET_ENTRY`, thanks @mmaekr: https://github.com/m4b/goblin/pull/369
build: add afl fuzzing support, thanks @anfedotoff: https://github.com/m4b/goblin/pull/351
## [0.6.1] - 2023-2-26
### Fixed
elf.section_header: additional workaround for 0-length sections, thanks @Jhynjhiruu: https://github.com/m4b/goblin/pull/347
pe.utils: file alignment check, thanks @anfedotoff: https://github.com/m4b/goblin/pull/340
### Added
elf: Add basic GNU PROPERTY note support, thanks @x64k: https://github.com/m4b/goblin/pull/352
mach: Implement `LC_BUILD_VERSION`, thanks @messense: https://github.com/m4b/goblin/pull/341
## [0.6.0] - 2022-10-23
### Breaking

View File

@ -11,9 +11,9 @@
[package]
edition = "2021"
rust-version = "1.56.0"
rust-version = "1.60.0"
name = "goblin"
version = "0.6.0"
version = "0.7.1"
authors = [
"m4b <m4b.github.io@gmail.com>",
"seu <seu@panopticon.re>",
@ -44,7 +44,6 @@ categories = [
]
license = "MIT"
repository = "https://github.com/m4b/goblin"
resolver = "2"
[dependencies.log]
version = "0.4"

View File

@ -20,13 +20,13 @@ https://docs.rs/goblin/
### Usage
Goblin requires `rustc` 1.56.0 (Rust 2021 edition).
Goblin requires `rustc` 1.60.0 (Rust 2021 edition).
Add to your `Cargo.toml`
```toml
[dependencies]
goblin = "0.6"
goblin = "0.7"
```
### Features
@ -112,6 +112,7 @@ In lexicographic order:
- [@amanieu]
- [@anfedotoff]
- [@apalm]
- [@baloo]
- [@burjui]
- [@connorkuehl]
- [@dancrossnyc]
@ -125,6 +126,7 @@ In lexicographic order:
- [@jan-auer]
- [@jessehui]
- [@jdub]
- [@Jhynjhiruu]
- [@johannst]
- [@jrmuizel]
- [@jsgf]
@ -138,6 +140,8 @@ In lexicographic order:
- [@lumag]
- [@lzutao]
- [@lzybkr]
- [@m-hilgendorf]
- [@mmaekr]
- [@m4b]
- [@messense]
- [@mitsuhiko]
@ -154,6 +158,7 @@ In lexicographic order:
- [@Pzixel]
- [@quake]
- [@raindev]
- [@RaitoBezarius]
- [@rocallahan]
- [@sanxiyn]
- [@skdltmxn]
@ -173,6 +178,7 @@ In lexicographic order:
- [@woodruffw]
- [@wyxloading]
- [@xcoldhandsx]
- [@x64k]
<!-- Contributors -->
@ -181,6 +187,7 @@ In lexicographic order:
[@amanieu]: https://github.com/amanieu
[@anfedotoff]: https://github.com/anfedotoff
[@apalm]: https://github.com/apalm
[@baloo]: https://github.com/baloo
[@burjui]: https://github.com/burjui
[@connorkuehl]: https://github.com/connorkuehl
[@dancrossnyc]: https://github.com/dancrossnyc
@ -193,6 +200,7 @@ In lexicographic order:
[@jackcmay]: https://github.com/jackcmay
[@jan-auer]: https://github.com/jan-auer
[@jessehui]: https://github.com/jessehui
[@Jhynjhiruu]: https://github.com/Jhynjhiruu
[@johannst]: https://github.com/johannst
[@jdub]: https://github.com/jdub
[@jrmuizel]: https://github.com/jrmuizel
@ -207,6 +215,8 @@ In lexicographic order:
[@lumag]: https://github.com/lumag
[@lzutao]: https://github.com/lzutao
[@lzybkr]: https://github.com/lzybkr
[@m-hilgendorf]: https://github.com/m-hilgendorf
[@mmaekr]: https://github.com/mmaekr
[@m4b]: https://github.com/m4b
[@messense]: https://github.com/messense
[@mitsuhiko]: https://github.com/mitsuhiko
@ -223,6 +233,7 @@ In lexicographic order:
[@Pzixel]: https://github.com/Pzixel
[@quake]: https://github.com/quake
[@raindev]: https://github.com/raindev
[@RaitoBezarius]: https://github.com/RaitoBezarius
[@rocallahan]: https://github.com/rocallahan
[@sanxiyn]: https://github.com/sanxiyn
[@skdltmxn]: https://github.com/skdltmxn
@ -242,6 +253,7 @@ In lexicographic order:
[@woodruffw]: https://github.com/woodruffw
[@wyxloading]: https://github.com/wyxloading
[@xcoldhandsx]: https://github.com/xcoldhandsx
[@x64k]: https://github.com/x64k
## Contributing

View File

@ -307,6 +307,7 @@ if_sylvan! {
strtab = get_strtab(&section_headers, shdr.sh_link as usize)?;
}
let mut is_pie = false;
let mut soname = None;
let mut libraries = vec![];
let mut rpaths = vec![];
@ -319,6 +320,8 @@ if_sylvan! {
let dynamic = Dynamic::parse(bytes, &program_headers, ctx)?;
if let Some(ref dynamic) = dynamic {
let dyn_info = &dynamic.info;
is_pie = dyn_info.flags_1 & dynamic::DF_1_PIE != 0;
dynstrtab = Strtab::parse(bytes,
dyn_info.strtab,
dyn_info.strsz,
@ -379,6 +382,8 @@ if_sylvan! {
let verdef = symver::VerdefSection::parse(bytes, &section_headers, ctx)?;
let verneed = symver::VerneedSection::parse(bytes, &section_headers, ctx)?;
let is_lib = misc.is_lib && !is_pie;
Ok(Elf {
header,
program_headers,
@ -399,7 +404,7 @@ if_sylvan! {
rpaths,
runpaths,
is_64: misc.is_64,
is_lib: misc.is_lib,
is_lib,
entry: misc.entry,
little_endian: misc.little_endian,
ctx: ctx,

View File

@ -42,6 +42,9 @@ pub const NT_GNU_BUILD_ID: u32 = 3;
/// Version note generated by GNU gold containing a version string.
pub const NT_GNU_GOLD_VERSION: u32 = 4;
/// Program property note
pub const NT_GNU_PROPERTY_TYPE_0: u32 = 5;
///Contains copy of prstatus struct.
pub const NT_PRSTATUS: u32 = 1;
@ -190,6 +193,7 @@ if_alloc! {
NT_GNU_HWCAP => "NT_GNU_HWCAP",
NT_GNU_BUILD_ID => "NT_GNU_BUILD_ID",
NT_GNU_GOLD_VERSION => "NT_GNU_GOLD_VERSION",
NT_GNU_PROPERTY_TYPE_0 => "NT_GNU_PROPERTY_0",
_ => "NT_UNKNOWN"
}
}
@ -232,15 +236,23 @@ if_alloc! {
mod tests {
use super::*;
static NOTE_DATA: [u8; 68] = [0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00,
0xbc, 0xfc, 0x66, 0xcd, 0xc7, 0xd5, 0x14, 0x7b,
0x53, 0xb1, 0x10, 0x11, 0x94, 0x86, 0x8e, 0xf9,
0x4f, 0xe8, 0xdd, 0xdb];
static NOTE_DATA: [u8; 132] = [0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00,
0xbc, 0xfc, 0x66, 0xcd, 0xc7, 0xd5, 0x14, 0x7b,
0x53, 0xb1, 0x10, 0x11, 0x94, 0x86, 0x8e, 0xf9,
0x4f, 0xe8, 0xdd, 0xdb, 0x04, 0x00, 0x00, 0x00,
0x30, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
0x47, 0x4E, 0x55, 0x00, 0x02, 0x80, 0x00, 0xC0,
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0xC0,
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xC0,
0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00];
static CONTEXT: (usize, container::Ctx) = (4, container::Ctx {
container: container::Container::Big,
@ -259,24 +271,26 @@ if_alloc! {
#[test]
fn iter_single_section() {
let mut notes = NoteIterator {
iters: vec![make_note_iter(0, 68)],
iters: vec![make_note_iter(0, 132)],
index: 0,
};
assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_ABI_TAG);
assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_BUILD_ID);
assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_PROPERTY_TYPE_0);
assert!(notes.next().is_none());
}
#[test]
fn iter_multiple_sections() {
let mut notes = NoteIterator {
iters: vec![make_note_iter(0, 32), make_note_iter(32, 68)],
iters: vec![make_note_iter(0, 32), make_note_iter(32, 68), make_note_iter(68, 132)],
index: 0,
};
assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_ABI_TAG);
assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_BUILD_ID);
assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_PROPERTY_TYPE_0);
assert!(notes.next().is_none());
}

View File

@ -22,6 +22,8 @@ pub const PT_NUM: u32 = 8;
pub const PT_LOOS: u32 = 0x6000_0000;
/// GCC .eh_frame_hdr segment
pub const PT_GNU_EH_FRAME: u32 = 0x6474_e550;
/// GNU property notes for linker and run-time loaders
pub const PT_GNU_PROPERTY: u32 = 0x6474_e553;
/// Indicates stack executability
pub const PT_GNU_STACK: u32 = 0x6474_e551;
/// Read-only after relocation
@ -69,6 +71,7 @@ pub fn pt_to_str(pt: u32) -> &'static str {
PT_NUM => "PT_NUM",
PT_LOOS => "PT_LOOS",
PT_GNU_EH_FRAME => "PT_GNU_EH_FRAME",
PT_GNU_PROPERTY => "PT_GNU_PROPERTY",
PT_GNU_STACK => "PT_GNU_STACK",
PT_GNU_RELRO => "PT_GNU_RELRO",
PT_SUNWBSS => "PT_SUNWBSS",

View File

@ -415,7 +415,11 @@ if_alloc! {
/// Parse a REL or RELA section of size `filesz` from `offset`.
pub fn parse(bytes: &'a [u8], offset: usize, filesz: usize, is_rela: bool, ctx: Ctx) -> crate::error::Result<RelocSection<'a>> {
// TODO: better error message when too large (see symtab implementation)
let bytes = bytes.pread_with(offset, filesz)?;
let bytes = if filesz != 0 {
bytes.pread_with::<&'a [u8]>(offset, filesz)?
} else {
&[]
};
Ok(RelocSection {
bytes: bytes,

View File

@ -470,7 +470,7 @@ if_alloc! {
Ok(section_headers)
}
pub fn check_size(&self, size: usize) -> error::Result<()> {
if self.sh_type == SHT_NOBITS {
if self.sh_type == SHT_NOBITS || self.sh_size == 0 {
return Ok(());
}
let (end, overflow) = self.sh_offset.overflowing_add(self.sh_size);

View File

@ -139,6 +139,8 @@ pub const MH_DYLIB_STUB: u32 = 0x9;
pub const MH_DSYM: u32 = 0xa;
/// x86_64 kexts
pub const MH_KEXT_BUNDLE: u32 = 0xb;
/// set of mach-o's
pub const MH_FILESET: u32 = 0xc;
pub fn filetype_to_str(filetype: u32) -> &'static str {
match filetype {
@ -153,6 +155,7 @@ pub fn filetype_to_str(filetype: u32) -> &'static str {
MH_DYLIB_STUB => "DYLIB_STUB",
MH_DSYM => "DSYM",
MH_KEXT_BUNDLE => "KEXT_BUNDLE",
MH_FILESET => "FILESET",
_ => "UNKNOWN FILETYPE",
}
}

View File

@ -1185,6 +1185,53 @@ pub struct EntryPointCommand {
pub const SIZEOF_ENTRY_POINT_COMMAND: usize = 24;
/// The build_version_command contains the min OS version on which this
/// binary was built to run for its platform. The list of known platforms and
/// tool values following it.
#[repr(C)]
#[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)]
pub struct BuildVersionCommand {
/// LC_BUILD_VERSION
pub cmd: u32,
pub cmdsize: u32,
/// platform
pub platform: u32,
/// X.Y.Z is encoded in nibbles xxxx.yy.zz
pub minos: u32,
/// X.Y.Z is encoded in nibbles xxxx.yy.zz
pub sdk: u32,
/// number of tool entries following this
pub ntools: u32,
}
/// Build tool version
#[repr(C)]
#[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)]
pub struct BuildToolVersion {
/// enum for the tool
pub tool: u32,
/// version number of the tool
pub version: u32,
}
/// The LC_FILESET_ENTRY command is used for Mach-O filesets which contain
/// multiple Mach-O's, such as the dyld shared cache and kernelcache
#[repr(C)]
#[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)]
pub struct FilesetEntryCommand {
/// LC_FILSET_ENTRY
pub cmd: u32,
pub cmdsize: u32,
/// memory address of the dylib
pub vmaddr: u64,
/// file offset of the dylib
pub fileoff: u64,
/// contained entry id
pub entry_id: LcStr,
/// reserved
pub reserved: u32,
}
/// The source_version_command is an optional load command containing
/// the version of the sources used to build the binary.
#[repr(C)]
@ -1211,6 +1258,22 @@ pub struct DataInCodeEntry {
pub kind: u16,
}
/// LC_NOTE commands describe a region of arbitrary data included in a Mach-O
/// file. Its initial use is to record extra data in MH_CORE files.
#[repr(C)]
#[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)]
pub struct NoteCommand {
/// LC_NOTE
pub cmd: u32,
pub cmdsize: u32,
/// owner name for this LC_NOTE
pub data_owner: [u8; 16],
/// file offset of this data
pub offset: u64,
/// length of data region
pub size: u64,
}
///////////////////////////////////////
// Constants, et. al
///////////////////////////////////////
@ -1269,6 +1332,20 @@ pub const LC_VERSION_MIN_TVOS: u32 = 0x2F;
pub const LC_VERSION_MIN_WATCHOS: u32 = 0x30;
pub const LC_NOTE: u32 = 0x31;
pub const LC_BUILD_VERSION: u32 = 0x32;
pub const LC_FILESET_ENTRY: u32 = 0x35 | LC_REQ_DYLD;
pub const PLATFORM_MACOS: u32 = 1;
pub const PLATFORM_IOS: u32 = 2;
pub const PLATFORM_TVOS: u32 = 3;
pub const PLATFORM_WATCHOS: u32 = 4;
pub const PLATFORM_BRIDGEOS: u32 = 5;
pub const PLATFORM_MACCATALYST: u32 = 6;
pub const PLATFORM_IOSSIMULATOR: u32 = 7;
pub const PLATFORM_TVOSSIMULATOR: u32 = 8;
pub const PLATFORM_WATCHOSSIMULATOR: u32 = 9;
pub const PLATFORM_DRIVERKIT: u32 = 10;
pub const TOOL_CLANG: u32 = 1;
pub const TOOL_SWIFT: u32 = 2;
pub const TOOL_LD: u32 = 3;
pub fn cmd_to_str(cmd: u32) -> &'static str {
match cmd {
@ -1323,6 +1400,7 @@ pub fn cmd_to_str(cmd: u32) -> &'static str {
LC_VERSION_MIN_WATCHOS => "LC_VERSION_MIN_WATCHOS",
LC_NOTE => "LC_NOTE",
LC_BUILD_VERSION => "LC_BUILD_VERSION",
LC_FILESET_ENTRY => "LC_FILESET_ENTRY",
LC_DYLD_EXPORTS_TRIE => "LC_DYLD_EXPORTS_TRIE",
LC_DYLD_CHAINED_FIXUPS => "LC_DYLD_CHAINED_FIXUPS",
_ => "LC_UNKNOWN",
@ -1335,6 +1413,7 @@ pub fn cmd_to_str(cmd: u32) -> &'static str {
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
#[non_exhaustive]
/// The various load commands as a cast-free variant/enum
pub enum CommandVariant {
Segment32(SegmentCommand32),
@ -1380,6 +1459,8 @@ pub enum CommandVariant {
DyldEnvironment(DylinkerCommand),
Main(EntryPointCommand),
DataInCode(LinkeditDataCommand),
BuildVersion(BuildVersionCommand),
FilesetEntry(FilesetEntryCommand),
SourceVersion(SourceVersionCommand),
DylibCodeSignDrs(LinkeditDataCommand),
LinkerOption(LinkeditDataCommand),
@ -1388,6 +1469,7 @@ pub enum CommandVariant {
VersionMinWatchos(VersionMinCommand),
DyldExportsTrie(LinkeditDataCommand),
DyldChainedFixups(LinkeditDataCommand),
Note(NoteCommand),
Unimplemented(LoadCommandHeader),
}
@ -1578,6 +1660,14 @@ impl<'a> ctx::TryFromCtx<'a, Endian> for CommandVariant {
let comm = bytes.pread_with::<LinkeditDataCommand>(0, le)?;
Ok((DataInCode(comm), size))
}
LC_BUILD_VERSION => {
let comm = bytes.pread_with::<BuildVersionCommand>(0, le)?;
Ok((BuildVersion(comm), size))
}
LC_FILESET_ENTRY => {
let comm = bytes.pread_with::<FilesetEntryCommand>(0, le)?;
Ok((FilesetEntry(comm), size))
}
LC_SOURCE_VERSION => {
let comm = bytes.pread_with::<SourceVersionCommand>(0, le)?;
Ok((SourceVersion(comm), size))
@ -1610,9 +1700,11 @@ impl<'a> ctx::TryFromCtx<'a, Endian> for CommandVariant {
let comm = bytes.pread_with::<LinkeditDataCommand>(0, le)?;
Ok((DyldChainedFixups(comm), size))
}
// TODO: LC_NOTE (NoteCommand) and LC_BUILD_VERSION (BuildVersionCommand)
// are unimplemented.
LC_NOTE | LC_BUILD_VERSION | _ => Ok((Unimplemented(lc), size)),
LC_NOTE => {
let comm = bytes.pread_with::<NoteCommand>(0, le)?;
Ok((Note(comm), size))
}
_ => Ok((Unimplemented(lc), size)),
}
}
}
@ -1664,6 +1756,8 @@ impl CommandVariant {
DyldEnvironment(comm) => comm.cmdsize,
Main(comm) => comm.cmdsize,
DataInCode(comm) => comm.cmdsize,
BuildVersion(comm) => comm.cmdsize,
FilesetEntry(comm) => comm.cmdsize,
SourceVersion(comm) => comm.cmdsize,
DylibCodeSignDrs(comm) => comm.cmdsize,
LinkerOption(comm) => comm.cmdsize,
@ -1672,6 +1766,7 @@ impl CommandVariant {
VersionMinWatchos(comm) => comm.cmdsize,
DyldExportsTrie(comm) => comm.cmdsize,
DyldChainedFixups(comm) => comm.cmdsize,
Note(comm) => comm.cmdsize,
Unimplemented(comm) => comm.cmdsize,
};
cmdsize as usize
@ -1722,6 +1817,8 @@ impl CommandVariant {
DyldEnvironment(comm) => comm.cmd,
Main(comm) => comm.cmd,
DataInCode(comm) => comm.cmd,
BuildVersion(comm) => comm.cmd,
FilesetEntry(comm) => comm.cmd,
SourceVersion(comm) => comm.cmd,
DylibCodeSignDrs(comm) => comm.cmd,
LinkerOption(comm) => comm.cmd,
@ -1730,6 +1827,7 @@ impl CommandVariant {
VersionMinWatchos(comm) => comm.cmd,
DyldExportsTrie(comm) => comm.cmd,
DyldChainedFixups(comm) => comm.cmd,
Note(comm) => comm.cmd,
Unimplemented(comm) => comm.cmd,
}
}

View File

@ -0,0 +1,116 @@
// Reference:
// https://learn.microsoft.com/en-us/windows-hardware/drivers/install/authenticode
// https://download.microsoft.com/download/9/c/5/9c5b2167-8017-4bae-9fde-d599bac8184a/Authenticode_PE.docx
// Authenticode works by omiting sections of the PE binary from the digest
// those sections are:
// - checksum
// - data directory entry for certtable
// - certtable
use core::ops::Range;
use super::PE;
impl PE<'_> {
/// [`authenticode_ranges`] returns the various ranges of the binary that are relevant for
/// signature.
pub fn authenticode_ranges(&self) -> ExcludedSectionsIter<'_> {
ExcludedSectionsIter {
pe: self,
state: IterState::default(),
}
}
}
/// [`ExcludedSections`] holds the various ranges of the binary that are expected to be
/// excluded from the authenticode computation.
#[derive(Debug, Clone, Default)]
pub(super) struct ExcludedSections {
checksum: Range<usize>,
datadir_entry_certtable: Range<usize>,
certtable: Option<Range<usize>>,
}
impl ExcludedSections {
pub(super) fn new(
checksum: Range<usize>,
datadir_entry_certtable: Range<usize>,
certtable: Option<Range<usize>>,
) -> Self {
Self {
checksum,
datadir_entry_certtable,
certtable,
}
}
}
pub struct ExcludedSectionsIter<'s> {
pe: &'s PE<'s>,
state: IterState,
}
#[derive(Debug, PartialEq)]
enum IterState {
Initial,
DatadirEntry(usize),
CertTable(usize),
Final(usize),
Done,
}
impl Default for IterState {
fn default() -> Self {
Self::Initial
}
}
impl<'s> Iterator for ExcludedSectionsIter<'s> {
type Item = &'s [u8];
fn next(&mut self) -> Option<Self::Item> {
let bytes = &self.pe.bytes;
if let Some(sections) = self.pe.authenticode_excluded_sections.as_ref() {
loop {
match self.state {
IterState::Initial => {
self.state = IterState::DatadirEntry(sections.checksum.end);
return Some(&bytes[..sections.checksum.start]);
}
IterState::DatadirEntry(start) => {
self.state = IterState::CertTable(sections.datadir_entry_certtable.end);
return Some(&bytes[start..sections.datadir_entry_certtable.start]);
}
IterState::CertTable(start) => {
if let Some(certtable) = sections.certtable.as_ref() {
self.state = IterState::Final(certtable.end);
return Some(&bytes[start..certtable.start]);
} else {
self.state = IterState::Final(start)
}
}
IterState::Final(start) => {
self.state = IterState::Done;
return Some(&bytes[start..]);
}
IterState::Done => return None,
}
}
} else {
loop {
match self.state {
IterState::Initial => {
self.state = IterState::Done;
return Some(bytes);
}
IterState::Done => return None,
_ => {
self.state = IterState::Done;
}
}
}
}
}
}

View File

@ -0,0 +1,164 @@
/// Implements parsing of pe32's Attribute Certificate Table
/// See reference:
/// https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#the-attribute-certificate-table-image-only
/// https://learn.microsoft.com/en-us/windows/win32/api/wintrust/ns-wintrust-win_certificate
use crate::error;
use scroll::Pread;
use alloc::string::ToString;
use alloc::vec::Vec;
#[repr(u16)]
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum AttributeCertificateRevision {
/// WIN_CERT_REVISION_1_0
Revision1_0 = 0x0100,
/// WIN_CERT_REVISION_2_0
Revision2_0 = 0x0200,
}
impl TryFrom<u16> for AttributeCertificateRevision {
type Error = error::Error;
fn try_from(value: u16) -> Result<Self, Self::Error> {
Ok(match value {
x if x == AttributeCertificateRevision::Revision1_0 as u16 => {
AttributeCertificateRevision::Revision1_0
}
x if x == AttributeCertificateRevision::Revision2_0 as u16 => {
AttributeCertificateRevision::Revision2_0
}
_ => {
return Err(error::Error::Malformed(
"Invalid certificate attribute revision".to_string(),
))
}
})
}
}
#[repr(u16)]
#[derive(Debug)]
pub enum AttributeCertificateType {
/// WIN_CERT_TYPE_X509
X509 = 0x0001,
/// WIN_CERT_TYPE_PKCS_SIGNED_DATA
PkcsSignedData = 0x0002,
/// WIN_CERT_TYPE_RESERVED_1
Reserved1 = 0x0003,
/// WIN_CERT_TYPE_TS_STACK_SIGNED
TsStackSigned = 0x0004,
}
impl TryFrom<u16> for AttributeCertificateType {
type Error = error::Error;
fn try_from(value: u16) -> Result<Self, Self::Error> {
Ok(match value {
x if x == AttributeCertificateType::X509 as u16 => AttributeCertificateType::X509,
x if x == AttributeCertificateType::PkcsSignedData as u16 => {
AttributeCertificateType::PkcsSignedData
}
x if x == AttributeCertificateType::Reserved1 as u16 => {
AttributeCertificateType::Reserved1
}
x if x == AttributeCertificateType::TsStackSigned as u16 => {
AttributeCertificateType::TsStackSigned
}
_ => {
return Err(error::Error::Malformed(
"Invalid attribute certificate type".to_string(),
))
}
})
}
}
#[derive(Clone, Pread)]
struct AttributeCertificateHeader {
/// dwLength
length: u32,
revision: u16,
certificate_type: u16,
}
const CERTIFICATE_DATA_OFFSET: u32 = 8;
#[derive(Debug)]
pub struct AttributeCertificate<'a> {
pub length: u32,
pub revision: AttributeCertificateRevision,
pub certificate_type: AttributeCertificateType,
pub certificate: &'a [u8],
}
impl<'a> AttributeCertificate<'a> {
pub fn parse(
bytes: &'a [u8],
current_offset: &mut usize,
) -> Result<AttributeCertificate<'a>, error::Error> {
// `current_offset` is moved sizeof(AttributeCertificateHeader) = 8 bytes further.
let header: AttributeCertificateHeader = bytes.gread_with(current_offset, scroll::LE)?;
let cert_size = usize::try_from(header.length.saturating_sub(CERTIFICATE_DATA_OFFSET))
.map_err(|_err| {
error::Error::Malformed(
"Attribute certificate size do not fit in usize".to_string(),
)
})?;
if let Some(bytes) = bytes.get(*current_offset..(*current_offset + cert_size)) {
let attr = Self {
length: header.length,
revision: header.revision.try_into()?,
certificate_type: header.certificate_type.try_into()?,
certificate: bytes,
};
// Moving past the certificate data.
// Prevent the current_offset to wrap and ensure current_offset is strictly increasing.
*current_offset = current_offset.saturating_add(cert_size);
// Round to the next 8-bytes.
*current_offset = (*current_offset + 7) & !7;
Ok(attr)
} else {
Err(error::Error::Malformed(format!(
"Unable to extract certificate. Probably cert_size:{} is malformed",
cert_size
)))
}
}
}
pub type CertificateDirectoryTable<'a> = Vec<AttributeCertificate<'a>>;
pub(crate) fn enumerate_certificates(
bytes: &[u8],
table_virtual_address: u32,
table_size: u32,
) -> Result<CertificateDirectoryTable, error::Error> {
let table_start_offset = usize::try_from(table_virtual_address).map_err(|_err| {
error::Error::Malformed("Certificate table RVA do not fit in a usize".to_string())
})?;
// Here, we do not want wrapping semantics as it means that a too big table size or table start
// offset will provide table_end_offset such that table_end_offset < table_start_offset, which
// is not desirable at all.
let table_end_offset =
table_start_offset.saturating_add(usize::try_from(table_size).map_err(|_err| {
error::Error::Malformed("Certificate table size do not fit in a usize".to_string())
})?);
let mut current_offset = table_start_offset;
let mut attrs = vec![];
// End offset cannot be further than the binary we have at hand.
if table_end_offset > bytes.len() {
return Err(error::Error::Malformed(
"End of attribute certificates table is after the end of the PE binary".to_string(),
));
}
// This is guaranteed to terminate, either by a malformed error being returned
// or because current_offset >= table_end_offset by virtue of current_offset being strictly
// increasing through `AttributeCertificate::parse`.
while current_offset < table_end_offset {
attrs.push(AttributeCertificate::parse(bytes, &mut current_offset)?);
}
Ok(attrs)
}

View File

@ -5,6 +5,8 @@
use alloc::vec::Vec;
pub mod authenticode;
pub mod certificate_table;
pub mod characteristic;
pub mod data_directories;
pub mod debug;
@ -28,6 +30,9 @@ use log::debug;
#[derive(Debug)]
/// An analyzed PE32/PE32+ binary
pub struct PE<'a> {
/// Underlying bytes
bytes: &'a [u8],
authenticode_excluded_sections: Option<authenticode::ExcludedSections>,
/// The PE header
pub header: header::Header,
/// A list of the sections in this PE binary
@ -58,6 +63,8 @@ pub struct PE<'a> {
pub debug_data: Option<debug::DebugData<'a>>,
/// Exception handling and stack unwind information, if any, contained in the PE header
pub exception_data: Option<exception::ExceptionData<'a>>,
/// Certificates present, if any, described by the Certificate Table
pub certificates: certificate_table::CertificateDirectoryTable<'a>,
}
impl<'a> PE<'a> {
@ -69,11 +76,15 @@ impl<'a> PE<'a> {
/// Reads a PE binary from the underlying `bytes`
pub fn parse_with_opts(bytes: &'a [u8], opts: &options::ParseOptions) -> error::Result<Self> {
let header = header::Header::parse(bytes)?;
let mut authenticode_excluded_sections = None;
debug!("{:#?}", header);
let offset = &mut (header.dos_header.pe_pointer as usize
let optional_header_offset = header.dos_header.pe_pointer as usize
+ header::SIZEOF_PE_MAGIC
+ header::SIZEOF_COFF_HEADER
+ header.coff_header.size_of_optional_header as usize);
+ header::SIZEOF_COFF_HEADER;
let offset =
&mut (optional_header_offset + header.coff_header.size_of_optional_header as usize);
let sections = header.coff_header.sections(bytes, offset)?;
let is_lib = characteristic::is_dll(header.coff_header.characteristics);
let mut entry = 0;
@ -86,8 +97,41 @@ impl<'a> PE<'a> {
let mut libraries = vec![];
let mut debug_data = None;
let mut exception_data = None;
let mut certificates = Default::default();
let mut is_64 = false;
if let Some(optional_header) = header.optional_header {
// Sections we are assembling through the parsing, eventually, it will be passed
// to the authenticode_sections attribute of `PE`.
let (checksum, datadir_entry_certtable) = match optional_header.standard_fields.magic {
optional_header::MAGIC_32 => {
let standard_field_offset =
optional_header_offset + optional_header::SIZEOF_STANDARD_FIELDS_32;
let checksum_field_offset =
standard_field_offset + optional_header::OFFSET_WINDOWS_FIELDS_32_CHECKSUM;
(
checksum_field_offset..checksum_field_offset + 4,
optional_header_offset + 128..optional_header_offset + 136,
)
}
optional_header::MAGIC_64 => {
let standard_field_offset =
optional_header_offset + optional_header::SIZEOF_STANDARD_FIELDS_64;
let checksum_field_offset =
standard_field_offset + optional_header::OFFSET_WINDOWS_FIELDS_64_CHECKSUM;
(
checksum_field_offset..checksum_field_offset + 4,
optional_header_offset + 144..optional_header_offset + 152,
)
}
magic => {
return Err(error::Error::Malformed(format!(
"Unsupported header magic ({:#x})",
magic
)))
}
};
entry = optional_header.standard_fields.address_of_entry_point as usize;
image_base = optional_header.windows_fields.image_base as usize;
is_64 = optional_header.container()? == container::Container::Big;
@ -177,8 +221,32 @@ impl<'a> PE<'a> {
)?);
}
}
let certtable = if let Some(certificate_table) =
*optional_header.data_directories.get_certificate_table()
{
certificates = certificate_table::enumerate_certificates(
bytes,
certificate_table.virtual_address,
certificate_table.size,
)?;
let start = certificate_table.virtual_address as usize;
let end = start + certificate_table.size as usize;
Some(start..end)
} else {
None
};
authenticode_excluded_sections = Some(authenticode::ExcludedSections::new(
checksum,
datadir_entry_certtable,
certtable,
));
}
Ok(PE {
bytes,
authenticode_excluded_sections,
header,
sections,
size: 0,
@ -194,6 +262,7 @@ impl<'a> PE<'a> {
libraries,
debug_data,
exception_data,
certificates,
})
}
}

View File

@ -120,6 +120,8 @@ pub struct WindowsFields32 {
}
pub const SIZEOF_WINDOWS_FIELDS_32: usize = 68;
/// Offset of the `check_sum` field in [`WindowsFields32`]
pub const OFFSET_WINDOWS_FIELDS_32_CHECKSUM: usize = 36;
/// 64-bit Windows specific fields
#[repr(C)]
@ -149,6 +151,8 @@ pub struct WindowsFields64 {
}
pub const SIZEOF_WINDOWS_FIELDS_64: usize = 88;
/// Offset of the `check_sum` field in [`WindowsFields64`]
pub const OFFSET_WINDOWS_FIELDS_64_CHECKSUM: usize = 40;
// /// Generic 32/64-bit Windows specific fields
// #[derive(Debug, PartialEq, Copy, Clone, Default)]

View File

@ -89,6 +89,9 @@ pub fn find_offset(
opts: &options::ParseOptions,
) -> Option<usize> {
if opts.resolve_rva {
if file_alignment == 0 || file_alignment & (file_alignment - 1) != 0 {
return None;
}
for (i, section) in sections.iter().enumerate() {
debug!(
"Checking {} for {:#x} ∈ {:#x}..{:#x}",

View File

@ -1 +1 @@
{"files":{"Cargo.toml":"d1193e8d228ceb5aa5792b8170c0cec8802489d66eb590bae693ae0a009c3bb9","LICENSE":"3234ac55816264ee7b6c7ee27efd61cf0a1fe775806870e3d9b4c41ea73c5cb1","README.md":"a673f0b4b5ac46034590a670572bd1a87837fdedb5170dabbea08d392e6cfa4b","build.rs":"df34c830dbb08eba3474304eed481bc2c8a29e897bc50f46d37b5dbb6e443a2b","src/lib.rs":"cc7f53556da6f53e5818e31330b488ad0de8d58096edf05f9f27e7f1159d1bfe","src/offset_of.rs":"9a2f9e8a7739a615df214738302bb74df584a53485a7f3536c0aca17ce936db3","src/raw_field.rs":"ef54087d5f507c2b639a4f61f2881eb1e41a46e22191ffd0e23b2fe9e3f17c25","src/span_of.rs":"b900faef2b852b52c37c55a172c05c9144bfff7d84dbc06e943fb0453d68adfc"},"package":"d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"}
{"files":{"Cargo.toml":"9039cd9c621a44a65a2fe19c2e883a7b06c6801c64bb9d2255c94adc2051880f","LICENSE":"3234ac55816264ee7b6c7ee27efd61cf0a1fe775806870e3d9b4c41ea73c5cb1","README.md":"a673f0b4b5ac46034590a670572bd1a87837fdedb5170dabbea08d392e6cfa4b","build.rs":"df34c830dbb08eba3474304eed481bc2c8a29e897bc50f46d37b5dbb6e443a2b","src/lib.rs":"a91af41fef48edc5295a99cf90b14273e04db201ed65d88dea101496112c8cd5","src/offset_of.rs":"501f3eb9ec2ada6bd9cd18c73eafdd8ac75bfbfe7e7b543428d9bb97e1cbc478","src/raw_field.rs":"ef54087d5f507c2b639a4f61f2881eb1e41a46e22191ffd0e23b2fe9e3f17c25","src/span_of.rs":"b900faef2b852b52c37c55a172c05c9144bfff7d84dbc06e943fb0453d68adfc"},"package":"5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"}

View File

@ -11,7 +11,7 @@
[package]
name = "memoffset"
version = "0.8.0"
version = "0.9.0"
authors = ["Gilad Naaman <gilad.naaman@gmail.com>"]
description = "offset_of functionality for Rust structs."
readme = "README.md"
@ -34,3 +34,4 @@ version = "1"
[features]
default = []
unstable_const = []
unstable_offset_of = []

View File

@ -61,6 +61,7 @@
feature(const_ptr_offset_from)
)]
#![cfg_attr(feature = "unstable_const", feature(const_refs_to_cell))]
#![cfg_attr(feature = "unstable_offset_of", feature(allow_internal_unstable))]
#[macro_use]
#[cfg(doctests)]

View File

@ -67,6 +67,28 @@ macro_rules! _memoffset_offset_from_unsafe {
($field as usize) - ($base as usize)
};
}
#[cfg(not(feature = "unstable_offset_of"))]
#[macro_export(local_inner_macros)]
#[doc(hidden)]
macro_rules! _memoffset__offset_of_impl {
($parent:path, $field:tt) => {{
// Get a base pointer (non-dangling if rustc supports `MaybeUninit`).
_memoffset__let_base_ptr!(base_ptr, $parent);
// Get field pointer.
let field_ptr = raw_field!(base_ptr, $parent, $field);
// Compute offset.
_memoffset_offset_from_unsafe!(field_ptr, base_ptr)
}};
}
#[cfg(feature = "unstable_offset_of")]
#[macro_export]
#[doc(hidden)]
#[allow_internal_unstable(offset_of)]
macro_rules! _memoffset__offset_of_impl {
($parent:path, $field:tt) => {{
$crate::__priv::mem::offset_of!($parent, $field)
}};
}
/// Calculates the offset of the specified field from the start of the named struct.
///
@ -98,14 +120,9 @@ macro_rules! _memoffset_offset_from_unsafe {
/// As a result, the value should not be retained and used between different compilations.
#[macro_export(local_inner_macros)]
macro_rules! offset_of {
($parent:path, $field:tt) => {{
// Get a base pointer (non-dangling if rustc supports `MaybeUninit`).
_memoffset__let_base_ptr!(base_ptr, $parent);
// Get field pointer.
let field_ptr = raw_field!(base_ptr, $parent, $field);
// Compute offset.
_memoffset_offset_from_unsafe!(field_ptr, base_ptr)
}};
($parent:path, $field:tt) => {
_memoffset__offset_of_impl!($parent, $field)
};
}
/// Calculates the offset of the specified field from the start of the tuple.
@ -131,6 +148,30 @@ macro_rules! offset_of_tuple {
}};
}
#[cfg(not(feature = "unstable_offset_of"))]
#[macro_export(local_inner_macros)]
#[doc(hidden)]
macro_rules! _memoffset__offset_of_union_impl {
($parent:path, $field:tt) => {{
// Get a base pointer (non-dangling if rustc supports `MaybeUninit`).
_memoffset__let_base_ptr!(base_ptr, $parent);
// Get field pointer.
let field_ptr = raw_field_union!(base_ptr, $parent, $field);
// Compute offset.
_memoffset_offset_from_unsafe!(field_ptr, base_ptr)
}};
}
#[cfg(feature = "unstable_offset_of")]
#[macro_export(local_inner_macros)]
#[doc(hidden)]
#[allow_internal_unstable(offset_of)]
macro_rules! _memoffset__offset_of_union_impl {
($parent:path, $field:tt) => {{
$crate::__priv::mem::offset_of!($parent, $field)
}};
}
/// Calculates the offset of the specified union member from the start of the union.
///
/// ## Examples
@ -155,12 +196,7 @@ macro_rules! offset_of_tuple {
#[macro_export(local_inner_macros)]
macro_rules! offset_of_union {
($parent:path, $field:tt) => {{
// Get a base pointer (non-dangling if rustc supports `MaybeUninit`).
_memoffset__let_base_ptr!(base_ptr, $parent);
// Get field pointer.
let field_ptr = raw_field_union!(base_ptr, $parent, $field);
// Compute offset.
_memoffset_offset_from_unsafe!(field_ptr, base_ptr)
_memoffset__offset_of_union_impl!($parent, $field)
}};
}
@ -312,7 +348,11 @@ mod tests {
assert_eq!(f_ptr as usize + 0, raw_field_union!(f_ptr, Foo, c) as usize);
}
#[cfg(any(feature = "unstable_const", stable_const))]
#[cfg(any(
feature = "unstable_const",
feature = "unstable_offset_of",
stable_const
))]
#[test]
fn const_offset() {
#[repr(C)]
@ -337,7 +377,11 @@ mod tests {
assert_eq!([0; offset_of!(Foo, b)].len(), 4);
}
#[cfg(any(feature = "unstable_const", stable_const))]
#[cfg(any(
feature = "unstable_const",
feature = "unstable_offset_of",
stable_const
))]
#[test]
fn const_fn_offset() {
const fn test_fn() -> usize {

View File

@ -1 +1 @@
{"files":{"Cargo.toml":"2e69a8cb1899e3681e9a9fc5dbdb3687b34e7a3a8e05039f2a96133d8f6a5dd6","README.md":"4c2a1448aab9177fd5f033faaf704af7bb222bf0804079fd3cff90fa1df4b812","src/errors/linux.rs":"daa23869d1ad317a7a20691a9e2712881952e1de6b2ae5c7e3c03c2775befd98","src/errors/macos.rs":"0283269623bacb90e15a9de90347a6a8cfaa4ac0784b516f9c6ff639e08f5060","src/errors/mod.rs":"f224af66124fd31a040c8da11bbab7b7795b48e4edea76e01c1f4dee537ea38a","src/errors/windows.rs":"15be6e938421f36eb082c1c6db8312c936a80b5d414925748f665f3c7a6245e5","src/format.rs":"7a243a4d9a5acc9853e53b0e6a97ee01416b8de6ef910c87a5c7f850c0ccccc7","src/lib.rs":"0900c00594b3c386b86127055889006f0d7d0004b08455fadb0e60d55a469cab","src/traits.rs":"93127ad69a849325ed66a0626e0bdae05868488f81c539d35c71a7bfbb9e51ac","src/utils.rs":"17e8777b05998a8149fc5168af3bca1e0f9aeffe28cb3d6dbfb89c546f75e5ed"},"package":"694717103b2c15f8c16ddfaec1333fe15673bc22b10ffa6164427415701974ba"}
{"files":{"Cargo.toml":"bb451578b35529f9bb5dd3543b3a29b99a29e1f849b72ce4a6a640642000b49a","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"4c2a1448aab9177fd5f033faaf704af7bb222bf0804079fd3cff90fa1df4b812","src/errors/linux.rs":"daa23869d1ad317a7a20691a9e2712881952e1de6b2ae5c7e3c03c2775befd98","src/errors/macos.rs":"0283269623bacb90e15a9de90347a6a8cfaa4ac0784b516f9c6ff639e08f5060","src/errors/mod.rs":"f224af66124fd31a040c8da11bbab7b7795b48e4edea76e01c1f4dee537ea38a","src/errors/windows.rs":"15be6e938421f36eb082c1c6db8312c936a80b5d414925748f665f3c7a6245e5","src/format.rs":"8d2447593f3a15228323389af31fb889759b5d2fb649811ad6ad0dfddbd75339","src/lib.rs":"0900c00594b3c386b86127055889006f0d7d0004b08455fadb0e60d55a469cab","src/traits.rs":"93127ad69a849325ed66a0626e0bdae05868488f81c539d35c71a7bfbb9e51ac","src/utils.rs":"17e8777b05998a8149fc5168af3bca1e0f9aeffe28cb3d6dbfb89c546f75e5ed"},"package":"9114b15d86ee5e5c3e3b4d05821d17237adbf98c11dd07fc8f5a9b037a010ee5"}

View File

@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "minidump-common"
version = "0.15.2"
version = "0.17.0"
authors = ["Ted Mielczarek <ted@mielczarek.org>"]
description = "Some common types for working with minidump files."
homepage = "https://github.com/rust-minidump/rust-minidump"

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2015-2023 rust-minidump contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -372,7 +372,7 @@ pub struct MINIDUMP_MODULE {
/// This struct matches the [Microsoft struct][msdn] of the same name.
///
/// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/minidumpapiset/ns-minidumpapiset-minidump_unloaded_module
#[derive(Debug, Clone, Default, Pread, SizeWith)]
#[derive(Debug, Clone, Default, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_UNLOADED_MODULE {
/// The base address of the executable image in memory (when it was loaded).
pub base_of_image: u64,
@ -659,7 +659,7 @@ impl<'a> scroll::ctx::TryFromCtx<'a, Endian> for CV_INFO_ELF {
}
/// Obsolete debug record type defined in WinNT.h.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct IMAGE_DEBUG_MISC {
pub data_type: u32,
pub length: u32,
@ -921,7 +921,7 @@ pub struct XMM_SAVE_AREA32 {
///
/// This is defined as an anonymous struct inside an anonymous union in
/// the x86-64 CONTEXT struct in WinNT.h.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct SSE_REGISTERS {
pub header: [u128; 2],
pub legacy: [u128; 8],
@ -1120,7 +1120,7 @@ pub struct CONTEXT_ARM64_OLD {
/// NOTE: if you ever decide to try to make this repr(C) and get really clever,
/// note that microsoft aligns this to 16 (and as of this writing, rust does
/// not consistently aling u128 as such).
#[derive(Debug, Default, Clone, Pread, SizeWith)]
#[derive(Debug, Default, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct CONTEXT_ARM64 {
pub context_flags: u32,
@ -1159,7 +1159,7 @@ impl Arm64RegisterNumbers {
}
/// MIPS floating point state
#[derive(Debug, Default, Clone, Pread, SizeWith)]
#[derive(Debug, Default, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct FLOATING_SAVE_AREA_MIPS {
pub regs: [u64; 32],
@ -1170,7 +1170,7 @@ pub struct FLOATING_SAVE_AREA_MIPS {
/// A MIPS CPU context
///
/// This is a Breakpad extension, as there is no definition of `CONTEXT` for MIPS in WinNT.h.
#[derive(Debug, Default, Clone, Pread, SizeWith)]
#[derive(Debug, Default, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct CONTEXT_MIPS {
pub context_flags: u32,
@ -1227,7 +1227,7 @@ impl MipsRegisterNumbers {
}
/// PPC floating point state
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct FLOATING_SAVE_AREA_PPC {
pub fpregs: [u64; 32],
@ -1236,7 +1236,7 @@ pub struct FLOATING_SAVE_AREA_PPC {
}
/// PPC vector state
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct VECTOR_SAVE_AREA_PPC {
pub save_vr: [u128; 32],
@ -1249,7 +1249,7 @@ pub struct VECTOR_SAVE_AREA_PPC {
/// A PPC CPU context
///
/// This is a Breakpad extension, as there is no definition of `CONTEXT` for PPC in WinNT.h.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct CONTEXT_PPC {
pub context_flags: u32,
@ -1276,7 +1276,7 @@ pub enum PpcRegisterNumbers {
/// A PPC64 CPU context
///
/// This is a Breakpad extension, as there is no definition of `CONTEXT` for PPC64 in WinNT.h.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct CONTEXT_PPC64 {
pub context_flags: u64,
@ -1300,7 +1300,7 @@ pub enum Ppc64RegisterNumbers {
}
/// SPARC floating point state
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct FLOATING_SAVE_AREA_SPARC {
pub regs: [u64; 32],
@ -1311,7 +1311,7 @@ pub struct FLOATING_SAVE_AREA_SPARC {
/// A SPARC CPU context
///
/// This is a Breakpad extension, as there is no definition of `CONTEXT` for SPARC in WinNT.h.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct CONTEXT_SPARC {
pub context_flags: u32,
@ -1403,7 +1403,7 @@ pub struct CPU_INFORMATION {
///
/// This struct matches the definition of the struct of the same name from minidumpapiset.h,
/// which is contained within the [`CPU_INFORMATION`] union.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct X86CpuInfo {
pub vendor_id: [u32; 3],
pub version_information: u32,
@ -1412,7 +1412,7 @@ pub struct X86CpuInfo {
}
/// Arm-specific CPU information (Breakpad extension)
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct ARMCpuInfo {
pub cpuid: u32,
/// Hardware capabilities
@ -1425,7 +1425,7 @@ pub struct ARMCpuInfo {
///
/// This struct matches the definition of the struct of the same name from minidumpapiset.h,
/// which is contained within the [`CPU_INFORMATION`] union.
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct OtherCpuInfo {
pub processor_features: [u64; 2],
}
@ -1816,7 +1816,7 @@ bitflags! {
/// This struct matches the [Microsoft struct][msdn] of the same name.
///
/// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/minidumpapiset/ns-minidumpapiset-minidump_memory_info_list
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_MEMORY_INFO_LIST {
/// The size of this header
pub size_of_header: u32,
@ -1831,7 +1831,7 @@ pub struct MINIDUMP_MEMORY_INFO_LIST {
/// This struct matches the [Microsoft struct][msdn] of the same name.
///
/// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/minidumpapiset/ns-minidumpapiset-minidump_memory_info
#[derive(Debug, Clone, PartialEq, Eq, Pread, SizeWith)]
#[derive(Debug, Clone, PartialEq, Eq, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_MEMORY_INFO {
/// The base address of the region of pages
pub base_address: u64,
@ -1930,7 +1930,7 @@ bitflags! {
/// Taken from the definition in Breakpad's [minidump_format.h][fmt].
///
/// [fmt]: https://chromium.googlesource.com/breakpad/breakpad/+/88d8114fda3e4a7292654bd6ac0c34d6c88a8121/src/google_breakpad/common/minidump_format.h#998
#[derive(Debug, Clone, Pread, SizeWith)]
#[derive(Debug, Clone, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_ASSERTION_INFO {
/// The assertion that failed, as a 0-terminated UTF16-LE string
pub expression: [u16; 128],
@ -2055,7 +2055,7 @@ impl<'a> scroll::ctx::TryFromCtx<'a, Endian> for MINIDUMP_UTF8_STRING {
/// A key-value pair.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpSimpleStringDictionaryEntry.html>
#[derive(Clone, Debug, Pread, SizeWith)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_SIMPLE_STRING_DICTIONARY_ENTRY {
/// RVA of a MinidumpUTF8String containing the key of a key-value pair.
pub key: RVA,
@ -2066,7 +2066,7 @@ pub struct MINIDUMP_SIMPLE_STRING_DICTIONARY_ENTRY {
/// A list of key-value pairs.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpSimpleStringDictionary.html>
#[derive(Clone, Debug, Pread)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_SIMPLE_STRING_DICTIONARY {
/// The number of key-value pairs present.
pub count: u32,
@ -2075,7 +2075,7 @@ pub struct MINIDUMP_SIMPLE_STRING_DICTIONARY {
/// A list of RVA pointers.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpRVAList.html>
#[derive(Clone, Debug, Pread)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_RVA_LIST {
/// The number of pointers present.
pub count: u32,
@ -2084,7 +2084,7 @@ pub struct MINIDUMP_RVA_LIST {
/// A typed annotation object.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpAnnotation.html>
#[derive(Clone, Debug, Pread)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_ANNOTATION {
/// RVA of a MinidumpUTF8String containing the name of the annotation.
pub name: RVA,
@ -2125,7 +2125,7 @@ impl MINIDUMP_ANNOTATION {
/// or not.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpModuleCrashpadInfo.html>
#[derive(Clone, Debug, Pread)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_MODULE_CRASHPAD_INFO {
/// The structures version number.
///
@ -2172,7 +2172,7 @@ impl MINIDUMP_MODULE_CRASHPAD_INFO {
/// module carried within a minidump file.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpModuleCrashpadInfoLink.html>
#[derive(Clone, Debug, Pread, SizeWith)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_MODULE_CRASHPAD_INFO_LINK {
/// A link to a MINIDUMP_MODULE structure in the module list stream.
///
@ -2200,7 +2200,7 @@ pub struct MINIDUMP_MODULE_CRASHPAD_INFO_LINK {
/// `MinidumpModuleCrashpadInfo` structure.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpModuleCrashpadInfoList.html>
#[derive(Clone, Debug, Pread)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_MODULE_CRASHPAD_INFO_LIST {
/// The number of key-value pairs present.
pub count: u32,
@ -2215,7 +2215,7 @@ pub struct MINIDUMP_MODULE_CRASHPAD_INFO_LIST {
/// or not.
///
/// See <https://crashpad.chromium.org/doxygen/structcrashpad_1_1MinidumpCrashpadInfo.html>
#[derive(Clone, Debug, Pread, SizeWith)]
#[derive(Clone, Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_CRASHPAD_INFO {
/// The structures version number.
///
@ -2258,7 +2258,7 @@ impl MINIDUMP_CRASHPAD_INFO {
///
/// This is the format of the [`MINIDUMP_STREAM_TYPE::MozMacosCrashInfoStream`]. The individual
/// [`MINIDUMP_MAC_CRASH_INFO_RECORD`] entries follow this header in the stream.
#[derive(Debug, Pread, SizeWith)]
#[derive(Debug, Pread, Pwrite, SizeWith)]
pub struct MINIDUMP_MAC_CRASH_INFO {
pub stream_type: u32,
/// The number of [`MINIDUMP_MAC_CRASH_INFO_RECORD`]s.

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
* @Jake-Shadle

View File

@ -0,0 +1,22 @@
name: Security audit
on:
schedule:
# Runs at 00:00 UTC everyday
- cron: "0 0 * * *"
push:
paths:
- "**/Cargo.toml"
- "**/Cargo.lock"
- "**/audit.toml"
jobs:
audit:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: deny audit
uses: EmbarkStudios/cargo-deny-action@v1
with:
command: check advisories

View File

@ -0,0 +1,86 @@
name: Continuous Integration
on:
push:
branches:
- main
- github-actions
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt,clippy
- name: rustfmt
run: cargo fmt --all -- --check
- name: clippy
run: cargo clippy --all-features --all-targets -- -D warnings
test:
name: Test
runs-on: ${{ matrix.job.os }}
strategy:
matrix:
job:
- { os: ubuntu-22.04, target: x86_64-unknown-linux-gnu }
- { os: ubuntu-22.04, target: x86_64-unknown-linux-musl }
- { os: windows-2022, target: x86_64-pc-windows-msvc }
- { os: macos-12, target: x86_64-apple-darwin }
# TODO: Add macos aarch64 here once it becomes available as a runner
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.job.target }}
- name: Fetch
run: cargo fetch --target ${{ matrix.job.target }}
- name: Build
run: cargo test --target ${{ matrix.job.target }} --no-run
- name: Test
run: cargo test --target ${{ matrix.job.target }}
# This job builds non-tier1 targets that aren't already tested
build_lower_tier:
name: Build sources
runs-on: ${{ matrix.job.os }}
strategy:
matrix:
job:
- { os: ubuntu-22.04, target: i686-unknown-linux-gnu, use-cross: true }
#- { os: ubuntu-latest, target: i686-unknown-linux-musl, use-cross: true }
- { os: ubuntu-22.04, target: aarch64-unknown-linux-gnu, use-cross: true }
- { os: ubuntu-22.04, target: aarch64-unknown-linux-musl, use-cross: true }
#- { os: ubuntu-22.04, target: aarch64-linux-android, use-cross: true }
- { os: ubuntu-22.04, target: arm-unknown-linux-gnueabi, use-cross: true }
- { os: ubuntu-22.04, target: arm-unknown-linux-musleabi, use-cross: true }
- { os: ubuntu-22.04, target: arm-linux-androideabi, use-cross: true }
- { os: ubuntu-22.04, target: arm-unknown-linux-gnueabihf, use-cross: true }
# TODO: Remove this when aarch64 macs can be used as runners
- { os: macos-12, target: aarch64-apple-darwin, use-cross: false }
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.job.target }}
#- name: Unit tests
- name: Build
uses: actions-rs/cargo@v1
with:
#command: test
command: build
use-cross: ${{ matrix.job.use-cross }}
args: --target ${{ matrix.job.target }} --verbose --all-targets
#args: --target ${{ matrix.job.target }} --verbose -- --nocapture

View File

@ -8,6 +8,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<!-- next-header -->
## [Unreleased] - ReleaseDate
## [0.8.1] - 2023-06-21
### Added
- [PR#70](https://github.com/rust-minidump/minidump-writer/pull/70) resolved [#8](https://github.com/rust-minidump/minidump-writer/issues/8) by adding support for writing `MemoryInfoListStream` on Linux/Android targets, this allows minidump consumers to process minidumps more easily without needing to parse and understand Linux-specific information. Thanks [@afranchuk](https://github.com/afranchuk)!
- [PR#81](https://github.com/rust-minidump/minidump-writer/pull/81) stabilized `arm` and `aarch64` support for `unknown-linux` and `linux-android`, as well as adding support for `x86_64-linux-android`.
### Changed
- [PR#70](https://github.com/rust-minidump/minidump-writer/pull/70) replaced the custom reading of procfs information used when generating a minidump on Linux to use the `procfs` crate instead, removing a bunch of code.
- [PR#80](https://github.com/rust-minidump/minidump-writer/pull/80) along with [PR#84](https://github.com/rust-minidump/minidump-writer/pull/84) replaced `procfs` with `procfs-core`, removing unneeded dependencies such as `windows-sys`.
### Fixed
- [PR#78](https://github.com/rust-minidump/minidump-writer/pull/78) resolved [#24](https://github.com/rust-minidump/minidump-writer/issues/24) by ignoring guard pages when dumping the stack to the minidump in the event of a stack overflow.
- [PR#83](https://github.com/rust-minidump/minidump-writer/pull/83) resolved [#82](https://github.com/rust-minidump/minidump-writer/issues/82) by correctly aligning a structure.
## [0.8.0] - 2023-04-03
### Removed
- [PR#77](https://github.com/rust-minidump/minidump-writer/pull/77) removed the dependency on `winapi`, all bindings are either part of `minidump-writer` or `crash-context` now.
@ -71,7 +84,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Initial release, including basic support for `x86_64-unknown-linux-gnu/musl` and `x86_64-pc-windows-msvc`
<!-- next-url -->
[Unreleased]: https://github.com/rust-minidump/minidump-writer/compare/0.8.0...HEAD
[Unreleased]: https://github.com/rust-minidump/minidump-writer/compare/0.8.1...HEAD
[0.8.1]: https://github.com/rust-minidump/minidump-writer/compare/0.8.0...0.8.1
[0.8.0]: https://github.com/rust-minidump/minidump-writer/compare/0.7.0...0.8.0
[0.7.0]: https://github.com/rust-minidump/minidump-writer/compare/0.6.0...0.7.0
[0.6.0]: https://github.com/rust-minidump/minidump-writer/compare/0.5.0...0.6.0

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@
[package]
edition = "2021"
name = "minidump-writer"
version = "0.8.0"
version = "0.8.1"
authors = ["Martin Sirringhaus"]
description = "Rust rewrite of Breakpad's minidump_writer"
homepage = "https://github.com/rust-minidump/minidump-writer"
@ -20,40 +20,25 @@ readme = "README.md"
license = "MIT"
repository = "https://github.com/rust-minidump/minidump-writer"
[dependencies.byteorder]
version = "1.3.2"
[dependencies]
bitflags = "2.0"
byteorder = "1.3.2"
cfg-if = "1.0"
crash-context = "0.6.1"
memoffset = "0.9"
minidump-common = "0.17.0"
scroll = "0.11"
tempfile = "3.1.0"
thiserror = "1.0.21"
[dependencies.cfg-if]
version = "1.0"
[dependencies.crash-context]
version = "0.6"
[dependencies.memoffset]
version = "0.8"
[dependencies.minidump-common]
version = "0.15"
[dependencies.scroll]
version = "0.11"
[dependencies.tempfile]
version = "3.1.0"
[dependencies.thiserror]
version = "1.0.21"
[dev-dependencies]
memmap2 = "0.5"
minidump = "0.17.0"
[dev-dependencies.futures]
version = "0.3"
features = ["executor"]
[dev-dependencies.memmap2]
version = "0.5"
[dev-dependencies.minidump]
version = "0.15"
[target."cfg(any(target_os = \"linux\", target_os = \"android\"))".dependencies.nix]
version = "0.26"
features = [
@ -64,31 +49,33 @@ features = [
]
default-features = false
[target."cfg(target_os = \"macos\")".dependencies.mach2]
version = "0.4"
[target."cfg(any(target_os = \"linux\", target_os = \"android\"))".dependencies.procfs-core]
version = "0.16.0-RC1"
default-features = false
[target."cfg(target_os = \"macos\")".dependencies]
mach2 = "0.4"
[target."cfg(target_os = \"macos\")".dev-dependencies]
similar-asserts = "1.2"
uuid = "1.0"
[target."cfg(target_os = \"macos\")".dev-dependencies.dump_syms]
version = "2.0.0"
default-features = false
[target."cfg(target_os = \"macos\")".dev-dependencies.minidump-processor]
version = "0.15"
version = "0.17.0"
default-features = false
[target."cfg(target_os = \"macos\")".dev-dependencies.similar-asserts]
version = "1.2"
[target."cfg(target_os = \"macos\")".dev-dependencies.minidump-unwind]
version = "0.17"
features = ["debuginfo"]
[target."cfg(target_os = \"macos\")".dev-dependencies.uuid]
version = "1.0"
[target."cfg(target_os = \"windows\")".dependencies]
bitflags = "2.0"
[target."cfg(target_os = \"windows\")".dependencies.bitflags]
version = "2.0"
[target."cfg(unix)".dependencies.goblin]
version = "0.6"
[target."cfg(unix)".dependencies.libc]
version = "0.2.74"
[target."cfg(unix)".dependencies.memmap2]
version = "0.5"
[target."cfg(unix)".dependencies]
goblin = "0.7"
libc = "0.2.74"
memmap2 = "0.5"

View File

@ -109,7 +109,11 @@ mod linux {
let dumper = PtraceDumper::new(getppid().as_raw())?;
let mut mapping_count = 0;
for map in &dumper.mappings {
if map.name == Some(path.clone()) {
if map
.name
.as_ref()
.map_or(false, |name| name.to_string_lossy().starts_with(&path))
{
mapping_count += 1;
// This mapping should encompass the entire original mapped
// range.
@ -127,7 +131,7 @@ mod linux {
let mut dumper = PtraceDumper::new(ppid)?;
let mut found_linux_gate = false;
for mut mapping in dumper.mappings.clone() {
if mapping.name.as_deref() == Some(LINUX_GATE_LIBRARY_NAME) {
if mapping.name == Some(LINUX_GATE_LIBRARY_NAME.into()) {
found_linux_gate = true;
dumper.suspend_threads()?;
let id = PtraceDumper::elf_identifier_for_mapping(&mut mapping, ppid)?;
@ -148,7 +152,7 @@ mod linux {
test!(linux_gate_loc != 0, "linux_gate_loc == 0")?;
let mut found_linux_gate = false;
for mapping in &dumper.mappings {
if mapping.name.as_deref() == Some(LINUX_GATE_LIBRARY_NAME) {
if mapping.name == Some(LINUX_GATE_LIBRARY_NAME.into()) {
found_linux_gate = true;
test!(
linux_gate_loc == mapping.start_address.try_into()?,

View File

@ -116,9 +116,9 @@ fn parse_loaded_elf_program_headers(
pub fn late_process_mappings(pid: Pid, mappings: &mut [MappingInfo]) -> Result<()> {
// Only consider exec mappings that indicate a file path was mapped, and
// where the ELF header indicates a mapped shared library.
for mut map in mappings
for map in mappings
.iter_mut()
.filter(|m| m.executable && m.name.as_ref().map_or(false, |n| n.starts_with("/")))
.filter(|m| m.is_executable() && m.name_is_path())
{
let ehdr_opt = PtraceDumper::copy_from_process(
pid,

View File

@ -25,7 +25,7 @@ pub struct AuxvPair {
pub value: AuxvType,
}
/// An iterator across auxv pairs froom procfs.
/// An iterator across auxv pairs from procfs.
pub struct ProcfsAuxvIter {
pair_size: usize,
buf: Vec<u8>,
@ -85,11 +85,11 @@ impl Iterator for ProcfsAuxvIter {
};
let at_null;
#[cfg(target_arch = "arm")]
#[cfg(any(target_arch = "arm", all(target_os = "android", target_arch = "x86")))]
{
at_null = 0;
}
#[cfg(not(target_arch = "arm"))]
#[cfg(not(any(target_arch = "arm", all(target_os = "android", target_arch = "x86"))))]
{
at_null = libc::AT_NULL;
}

View File

@ -41,7 +41,7 @@ impl CrashContext {
{
let fs = &self.inner.float_state;
let mut out = &mut out.float_save;
let out = &mut out.float_save;
out.control_word = fs.cw;
out.status_word = fs.sw;
out.tag_word = fs.tag;

View File

@ -81,12 +81,12 @@ pub fn write_dso_debug_stream(
) -> Result<MDRawDirectory> {
let at_phnum;
let at_phdr;
#[cfg(target_arch = "arm")]
#[cfg(any(target_arch = "arm", all(target_os = "android", target_arch = "x86")))]
{
at_phdr = 3;
at_phnum = 5;
}
#[cfg(not(target_arch = "arm"))]
#[cfg(not(any(target_arch = "arm", all(target_os = "android", target_arch = "x86"))))]
{
at_phdr = libc::AT_PHDR;
at_phnum = libc::AT_PHNUM;

View File

@ -67,7 +67,7 @@ pub fn write_cpu_information(sys_info: &mut MDRawSystemInfo) -> Result<()> {
};
let mut is_first_entry = true;
for mut entry in cpu_info_table.iter_mut() {
for entry in cpu_info_table.iter_mut() {
if !is_first_entry && entry.found {
// except for the 'processor' field, ignore repeated values.
continue;

View File

@ -3,6 +3,8 @@ use crate::maps_reader::MappingInfo;
use crate::mem_writer::MemoryWriterError;
use crate::thread_info::Pid;
use goblin;
use nix::errno::Errno;
use std::ffi::OsString;
use thiserror::Error;
#[derive(Debug, Error)]
@ -15,6 +17,8 @@ pub enum InitError {
PrincipalMappingNotReferenced,
#[error("Failed Android specific late init")]
AndroidLateInitError(#[from] AndroidError),
#[error("Failed to read the page size")]
PageSizeError(#[from] Errno),
}
#[derive(Error, Debug)]
@ -28,20 +32,22 @@ pub enum MapsReaderError {
LinuxGateNotConvertable(#[from] std::num::TryFromIntError),
// get_mmap()
#[error("Not safe to open mapping {0}")]
NotSafeToOpenMapping(String),
#[error("Not safe to open mapping {}", .0.to_string_lossy())]
NotSafeToOpenMapping(OsString),
#[error("IO Error")]
FileError(#[from] std::io::Error),
#[error("Mmapped file empty or not an ELF file")]
MmapSanityCheckFailed,
#[error("Symlink does not match ({0} vs. {1}")]
#[error("Symlink does not match ({0} vs. {1})")]
SymlinkError(std::path::PathBuf, std::path::PathBuf),
// handle_deleted_file_in_mapping()
// fixup_deleted_file()
#[error("Couldn't parse as ELF file")]
ELFParsingFailed(#[from] goblin::error::Error),
#[error("No soname found (filename: {0}")]
NoSoName(String),
#[error("An anonymous mapping has no associated file")]
AnonymousMapping,
#[error("No soname found (filename: {})", .0.to_string_lossy())]
NoSoName(OsString),
}
#[derive(Debug, Error)]
@ -114,8 +120,8 @@ pub enum DumperError {
ELFParsingFailed(#[from] goblin::error::Error),
#[error("No build-id found")]
NoBuildIDFound,
#[error("Not safe to open mapping: {0}")]
NotSafeToOpenMapping(String),
#[error("Not safe to open mapping: {}", .0.to_string_lossy())]
NotSafeToOpenMapping(OsString),
#[error("Failed integer conversion")]
TryFromIntError(#[from] std::num::TryFromIntError),
#[error("Maps reader error")]
@ -144,6 +150,14 @@ pub enum SectionMappingsError {
GetEffectivePathError(MappingInfo, #[source] MapsReaderError),
}
#[derive(Debug, Error)]
pub enum SectionMemInfoListError {
#[error("Failed to write to memory")]
MemoryWriterError(#[from] MemoryWriterError),
#[error("Failed to read from procfs")]
ProcfsError(#[from] procfs_core::ProcError),
}
#[derive(Debug, Error)]
pub enum SectionMemListError {
#[error("Failed to write to memory")]
@ -210,6 +224,8 @@ pub enum WriterError {
SectionMemListError(#[from] SectionMemListError),
#[error("Failed when writing section SystemInfo")]
SectionSystemInfoError(#[from] SectionSystemInfoError),
#[error("Failed when writing section MemoryInfoList")]
SectionMemoryInfoListError(#[from] SectionMemInfoListError),
#[error("Failed when writing section ThreadList")]
SectionThreadListError(#[from] SectionThreadListError),
#[error("Failed when writing section ThreadNameList")]

View File

@ -4,11 +4,13 @@ use crate::thread_info::Pid;
use byteorder::{NativeEndian, ReadBytesExt};
use goblin::elf;
use memmap2::{Mmap, MmapOptions};
use procfs_core::process::{MMPermissions, MMapPath, MemoryMaps};
use std::ffi::{OsStr, OsString};
use std::os::unix::ffi::OsStrExt;
use std::{fs::File, mem::size_of, path::PathBuf};
pub const LINUX_GATE_LIBRARY_NAME: &str = "linux-gate.so";
pub const DELETED_SUFFIX: &str = " (deleted)";
pub const RESERVED_FLAGS: &str = "---p";
pub const DELETED_SUFFIX: &[u8] = b" (deleted)";
type Result<T> = std::result::Result<T, MapsReaderError>;
@ -34,9 +36,9 @@ pub struct MappingInfo {
// address range. The following structure holds the original mapping
// address range as reported by the operating system.
pub system_mapping_info: SystemMappingInfo,
pub offset: usize, // offset into the backed file.
pub executable: bool, // true if the mapping has the execute bit set.
pub name: Option<String>,
pub offset: usize, // offset into the backed file.
pub permissions: MMPermissions, // read, write and execute permissions.
pub name: Option<OsString>,
// pub elf_obj: Option<elf::Elf>,
}
@ -55,133 +57,94 @@ pub enum MappingInfoParsingResult {
Success(MappingInfo),
}
fn is_mapping_a_path(pathname: Option<&str>) -> bool {
fn is_mapping_a_path(pathname: Option<&OsStr>) -> bool {
match pathname {
Some(x) => x.contains('/'),
Some(x) => x.as_bytes().contains(&b'/'),
None => false,
}
}
impl MappingInfo {
pub fn parse_from_line(
line: &str,
linux_gate_loc: AuxvType,
last_mapping: Option<&mut MappingInfo>,
) -> Result<MappingInfoParsingResult> {
let mut last_whitespace = false;
// There is no `line.splitn_whitespace(6)`, so we have to do it somewhat manually
// Split at the first whitespace, trim of the rest.
let mut splits = line
.trim()
.splitn(6, |c: char| {
if c.is_whitespace() {
if last_whitespace {
return false;
}
last_whitespace = true;
true
} else {
last_whitespace = false;
false
}
})
.map(str::trim);
let address = splits
.next()
.ok_or(MapsReaderError::MapEntryMalformed("address"))?;
let perms = splits
.next()
.ok_or(MapsReaderError::MapEntryMalformed("permissions"))?;
let mut offset = usize::from_str_radix(
splits
.next()
.ok_or(MapsReaderError::MapEntryMalformed("offset"))?,
16,
)?;
let _dev = splits
.next()
.ok_or(MapsReaderError::MapEntryMalformed("dev"))?;
let _inode = splits
.next()
.ok_or(MapsReaderError::MapEntryMalformed("inode"))?;
let mut pathname = splits.next(); // Optional
// Due to our ugly `splitn_whitespace()` hack from above, we might have
// only trailing whitespaces as the name, so we it might still be "Some()"
if let Some(x) = pathname {
if x.is_empty() {
pathname = None;
}
}
let mut addresses = address.split('-');
let start_address = usize::from_str_radix(addresses.next().unwrap(), 16)?;
let end_address = usize::from_str_radix(addresses.next().unwrap(), 16)?;
let executable = perms.contains('x');
// Only copy name if the name is a valid path name, or if
// it's the VDSO image.
let is_path = is_mapping_a_path(pathname);
if !is_path && linux_gate_loc != 0 && start_address == linux_gate_loc.try_into()? {
pathname = Some(LINUX_GATE_LIBRARY_NAME);
offset = 0;
}
match (pathname, last_mapping) {
(Some(_name), Some(module)) => {
// Merge adjacent mappings into one module, assuming they're a single
// library mapped by the dynamic linker.
if (start_address == module.start_address + module.size)
&& (pathname == module.name.as_deref())
{
module.system_mapping_info.end_address = end_address;
module.size = end_address - module.start_address;
module.executable |= executable;
return Ok(MappingInfoParsingResult::SkipLine);
}
}
(None, Some(module)) => {
// Also merge mappings that result from address ranges that the
// linker reserved but which a loaded library did not use. These
// appear as an anonymous private mapping with no access flags set
// and which directly follow an executable mapping.
let module_end_address = module.start_address + module.size;
if (start_address == module_end_address)
&& module.executable
&& is_mapping_a_path(module.name.as_deref())
&& (offset == 0 || offset == module_end_address)
&& perms == RESERVED_FLAGS
{
module.size = end_address - module.start_address;
return Ok(MappingInfoParsingResult::SkipLine);
}
}
_ => (),
}
let name = pathname.map(ToOwned::to_owned);
let info = MappingInfo {
start_address,
size: end_address - start_address,
system_mapping_info: SystemMappingInfo {
start_address,
end_address,
},
offset,
executable,
name,
// elf_obj,
};
Ok(MappingInfoParsingResult::Success(info))
/// Return whether the `name` field is a path (contains a `/`).
pub fn name_is_path(&self) -> bool {
is_mapping_a_path(self.name.as_deref())
}
pub fn get_mmap(name: &Option<String>, offset: usize) -> Result<Mmap> {
pub fn aggregate(memory_maps: MemoryMaps, linux_gate_loc: AuxvType) -> Result<Vec<Self>> {
let mut infos = Vec::<Self>::new();
for mm in memory_maps {
let start_address: usize = mm.address.0.try_into()?;
let end_address: usize = mm.address.1.try_into()?;
let mut offset: usize = mm.offset.try_into()?;
let mut pathname: Option<OsString> = match mm.pathname {
MMapPath::Path(p) => Some(p.into()),
MMapPath::Heap => Some("[heap]".into()),
MMapPath::Stack => Some("[stack]".into()),
MMapPath::TStack(i) => Some(format!("[stack:{i}]").into()),
MMapPath::Vdso => Some("[vdso]".into()),
MMapPath::Vvar => Some("[vvar]".into()),
MMapPath::Vsyscall => Some("[vsyscall]".into()),
MMapPath::Rollup => Some("[rollup]".into()),
MMapPath::Vsys(i) => Some(format!("/SYSV{i:x}").into()),
MMapPath::Other(n) => Some(format!("[{n}]").into()),
MMapPath::Anonymous => None,
};
let is_path = is_mapping_a_path(pathname.as_deref());
if !is_path && linux_gate_loc != 0 && start_address == linux_gate_loc.try_into()? {
pathname = Some(LINUX_GATE_LIBRARY_NAME.into());
offset = 0;
}
// Merge adjacent mappings into one module, assuming they're a single
// library mapped by the dynamic linker.
if let Some(module) = infos.last_mut() {
if pathname.is_some() {
if (start_address == module.start_address + module.size)
&& (pathname == module.name)
{
module.system_mapping_info.end_address = end_address;
module.size = end_address - module.start_address;
module.permissions |= mm.perms;
continue;
}
} else {
// Also merge mappings that result from address ranges that the
// linker reserved but which a loaded library did not use. These
// appear as an anonymous private mapping with no access flags set
// and which directly follow an executable mapping.
let module_end_address = module.start_address + module.size;
if (start_address == module_end_address)
&& module.is_executable()
&& is_mapping_a_path(module.name.as_deref())
&& (offset == 0 || offset == module_end_address)
&& mm.perms == MMPermissions::PRIVATE
{
module.size = end_address - module.start_address;
continue;
}
}
}
infos.push(MappingInfo {
start_address,
size: end_address - start_address,
system_mapping_info: SystemMappingInfo {
start_address,
end_address,
},
offset,
permissions: mm.perms,
name: pathname,
});
}
Ok(infos)
}
pub fn get_mmap(name: &Option<OsString>, offset: usize) -> Result<Mmap> {
if !MappingInfo::is_mapped_file_safe_to_open(name) {
return Err(MapsReaderError::NotSafeToOpenMapping(
name.clone().unwrap_or_default(),
@ -203,12 +166,25 @@ impl MappingInfo {
Ok(mapped_file)
}
pub fn handle_deleted_file_in_mapping(path: &str, pid: Pid) -> Result<String> {
/// Check whether the mapping refers to a deleted file, and if so try to find the file
/// elsewhere and return that path.
///
/// Currently this only supports fixing a deleted file that was the main exe of the given
/// `pid`.
///
/// Returns a tuple, where the first element is the file path (which is possibly different than
/// `self.name`), and the second element is the original file path if a different path was
/// used. If no mapping name exists, returns an error.
pub fn fixup_deleted_file(&self, pid: Pid) -> Result<(OsString, Option<&OsStr>)> {
// Check for ' (deleted)' in |path|.
// |path| has to be at least as long as "/x (deleted)".
if !path.ends_with(DELETED_SUFFIX) {
return Ok(path.to_string());
}
let Some(path) = &self.name else {
return Err(MapsReaderError::AnonymousMapping);
};
let Some(old_path) = path.as_bytes().strip_suffix(DELETED_SUFFIX) else {
return Ok((path.clone(), None));
};
// Check |path| against the /proc/pid/exe 'symlink'.
let exe_link = format!("/proc/{}/exe", pid);
@ -218,7 +194,7 @@ impl MappingInfo {
// if (!GetMappingAbsolutePath(new_mapping, new_path))
// return false;
if link_path != PathBuf::from(path) {
if &link_path != path {
return Err(MapsReaderError::SymlinkError(
PathBuf::from(path),
link_path,
@ -233,7 +209,7 @@ impl MappingInfo {
// return Err("".into());
// }
// }
Ok(exe_link)
Ok((exe_link.into(), Some(OsStr::from_bytes(old_path))))
}
pub fn stack_has_pointer_to_mapping(&self, stack_copy: &[u8], sp_offset: usize) -> bool {
@ -269,13 +245,13 @@ impl MappingInfo {
false
}
pub fn is_mapped_file_safe_to_open(name: &Option<String>) -> bool {
pub fn is_mapped_file_safe_to_open(name: &Option<OsString>) -> bool {
// It is unsafe to attempt to open a mapped file that lives under /dev,
// because the semantics of the open may be driver-specific so we'd risk
// hanging the crash dumper. And a file in /dev/ almost certainly has no
// ELF file identifier anyways.
if let Some(name) = name {
if name.starts_with("/dev/") {
if name.as_bytes().starts_with(b"/dev/") {
return false;
}
}
@ -291,13 +267,13 @@ impl MappingInfo {
let elf_obj = elf::Elf::parse(&mapped_file)?;
let soname = elf_obj.soname.ok_or_else(|| {
MapsReaderError::NoSoName(self.name.clone().unwrap_or_else(|| "None".to_string()))
MapsReaderError::NoSoName(self.name.clone().unwrap_or_else(|| "None".into()))
})?;
Ok(soname.to_string())
}
pub fn get_mapping_effective_name_and_path(&self) -> Result<(String, String)> {
let mut file_path = self.name.clone().unwrap_or_default();
pub fn get_mapping_effective_path_and_name(&self) -> Result<(PathBuf, String)> {
let mut file_path = PathBuf::from(self.name.clone().unwrap_or_default());
// Tools such as minidump_stackwalk use the name of the module to look up
// symbols produced by dump_syms. dump_syms will prefer to use a module's
@ -310,28 +286,24 @@ impl MappingInfo {
} else {
// file_path := /path/to/libname.so
// file_name := libname.so
// SAFETY: The unwrap is safe as rsplit always returns at least one item
let file_name = file_path.rsplit('/').next().unwrap().to_owned();
let file_name = file_path
.file_name()
.map(|s| s.to_string_lossy().into_owned())
.unwrap_or_default();
return Ok((file_path, file_name));
};
if self.executable && self.offset != 0 {
if self.is_executable() && self.offset != 0 {
// If an executable is mapped from a non-zero offset, this is likely because
// the executable was loaded directly from inside an archive file (e.g., an
// apk on Android).
// In this case, we append the file_name to the mapped archive path:
// file_name := libname.so
// file_path := /path/to/ARCHIVE.APK/libname.so
file_path = format!("{}/{}", file_path, file_name);
file_path.push(&file_name);
} else {
// Otherwise, replace the basename with the SONAME.
let split: Vec<_> = file_path.rsplitn(2, '/').collect();
if split.len() == 2 {
// NOTE: rsplitn reverses the order, so the remainder is the last item
file_path = format!("{}/{}", split[1], file_name);
} else {
file_path = file_name.clone();
}
file_path.set_file_name(&file_name);
}
Ok((file_path, file_name))
@ -356,7 +328,7 @@ impl MappingInfo {
self.name.is_some() &&
// Only want to include one mapping per shared lib.
// Avoid filtering executable mappings.
(self.offset == 0 || self.executable) &&
(self.offset == 0 || self.is_executable()) &&
// big enough to get a signature for.
self.size >= 4096
}
@ -365,84 +337,92 @@ impl MappingInfo {
self.system_mapping_info.start_address <= address
&& address < self.system_mapping_info.end_address
}
pub fn is_executable(&self) -> bool {
self.permissions.contains(MMPermissions::EXECUTE)
}
pub fn is_readable(&self) -> bool {
self.permissions.contains(MMPermissions::READ)
}
pub fn is_writable(&self) -> bool {
self.permissions.contains(MMPermissions::WRITE)
}
}
#[cfg(test)]
#[cfg(target_pointer_width = "64")] // All addresses are 64 bit and I'm currently too lazy to adjust it to work for both
mod tests {
use super::*;
use procfs_core::FromRead;
fn get_lines_and_loc() -> (Vec<&'static str>, u64) {
(vec![
"5597483fc000-5597483fe000 r--p 00000000 00:31 4750073 /usr/bin/cat",
"5597483fe000-559748402000 r-xp 00002000 00:31 4750073 /usr/bin/cat",
"559748402000-559748404000 r--p 00006000 00:31 4750073 /usr/bin/cat",
"559748404000-559748405000 r--p 00007000 00:31 4750073 /usr/bin/cat",
"559748405000-559748406000 rw-p 00008000 00:31 4750073 /usr/bin/cat",
"559749b0e000-559749b2f000 rw-p 00000000 00:00 0 [heap]",
"7efd968d3000-7efd968f5000 rw-p 00000000 00:00 0",
"7efd968f5000-7efd9694a000 r--p 00000000 00:31 5004638 /usr/lib/locale/en_US.utf8/LC_CTYPE",
"7efd9694a000-7efd96bc2000 r--p 00000000 00:31 5004373 /usr/lib/locale/en_US.utf8/LC_COLLATE",
"7efd96bc2000-7efd96bc4000 rw-p 00000000 00:00 0",
"7efd96bc4000-7efd96bea000 r--p 00000000 00:31 4996104 /lib64/libc-2.32.so",
"7efd96bea000-7efd96d39000 r-xp 00026000 00:31 4996104 /lib64/libc-2.32.so",
"7efd96d39000-7efd96d85000 r--p 00175000 00:31 4996104 /lib64/libc-2.32.so",
"7efd96d85000-7efd96d86000 ---p 001c1000 00:31 4996104 /lib64/libc-2.32.so",
"7efd96d86000-7efd96d89000 r--p 001c1000 00:31 4996104 /lib64/libc-2.32.so",
"7efd96d89000-7efd96d8c000 rw-p 001c4000 00:31 4996104 /lib64/libc-2.32.so",
"7efd96d8c000-7efd96d92000 ---p 00000000 00:00 0",
"7efd96da0000-7efd96da1000 r--p 00000000 00:31 5004379 /usr/lib/locale/en_US.utf8/LC_NUMERIC",
"7efd96da1000-7efd96da2000 r--p 00000000 00:31 5004382 /usr/lib/locale/en_US.utf8/LC_TIME",
"7efd96da2000-7efd96da3000 r--p 00000000 00:31 5004377 /usr/lib/locale/en_US.utf8/LC_MONETARY",
"7efd96da3000-7efd96da4000 r--p 00000000 00:31 5004376 /usr/lib/locale/en_US.utf8/LC_MESSAGES/SYS_LC_MESSAGES",
"7efd96da4000-7efd96da5000 r--p 00000000 00:31 5004380 /usr/lib/locale/en_US.utf8/LC_PAPER",
"7efd96da5000-7efd96da6000 r--p 00000000 00:31 5004378 /usr/lib/locale/en_US.utf8/LC_NAME",
"7efd96da6000-7efd96da7000 r--p 00000000 00:31 5004372 /usr/lib/locale/en_US.utf8/LC_ADDRESS",
"7efd96da7000-7efd96da8000 r--p 00000000 00:31 5004381 /usr/lib/locale/en_US.utf8/LC_TELEPHONE",
"7efd96da8000-7efd96da9000 r--p 00000000 00:31 5004375 /usr/lib/locale/en_US.utf8/LC_MEASUREMENT",
"7efd96da9000-7efd96db0000 r--s 00000000 00:31 5004639 /usr/lib64/gconv/gconv-modules.cache",
"7efd96db0000-7efd96db1000 r--p 00000000 00:31 5004374 /usr/lib/locale/en_US.utf8/LC_IDENTIFICATION",
"7efd96db1000-7efd96db2000 r--p 00000000 00:31 4996100 /lib64/ld-2.32.so",
"7efd96db2000-7efd96dd3000 r-xp 00001000 00:31 4996100 /lib64/ld-2.32.so",
"7efd96dd3000-7efd96ddc000 r--p 00022000 00:31 4996100 /lib64/ld-2.32.so",
"7efd96ddc000-7efd96ddd000 r--p 0002a000 00:31 4996100 /lib64/ld-2.32.so",
"7efd96ddd000-7efd96ddf000 rw-p 0002b000 00:31 4996100 /lib64/ld-2.32.so",
"7ffc6dfda000-7ffc6dffb000 rw-p 00000000 00:00 0 [stack]",
"7ffc6e0f3000-7ffc6e0f7000 r--p 00000000 00:00 0 [vvar]",
"7ffc6e0f7000-7ffc6e0f9000 r-xp 00000000 00:00 0 [vdso]",
"ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]"
], 0x7ffc6e0f7000)
fn get_mappings_for(map: &str, linux_gate_loc: u64) -> Vec<MappingInfo> {
MappingInfo::aggregate(
MemoryMaps::from_read(map.as_bytes()).expect("failed to read mapping info"),
linux_gate_loc,
)
.unwrap_or_default()
}
const LINES: &str = "\
5597483fc000-5597483fe000 r--p 00000000 00:31 4750073 /usr/bin/cat
5597483fe000-559748402000 r-xp 00002000 00:31 4750073 /usr/bin/cat
559748402000-559748404000 r--p 00006000 00:31 4750073 /usr/bin/cat
559748404000-559748405000 r--p 00007000 00:31 4750073 /usr/bin/cat
559748405000-559748406000 rw-p 00008000 00:31 4750073 /usr/bin/cat
559749b0e000-559749b2f000 rw-p 00000000 00:00 0 [heap]
7efd968d3000-7efd968f5000 rw-p 00000000 00:00 0
7efd968f5000-7efd9694a000 r--p 00000000 00:31 5004638 /usr/lib/locale/en_US.utf8/LC_CTYPE
7efd9694a000-7efd96bc2000 r--p 00000000 00:31 5004373 /usr/lib/locale/en_US.utf8/LC_COLLATE
7efd96bc2000-7efd96bc4000 rw-p 00000000 00:00 0
7efd96bc4000-7efd96bea000 r--p 00000000 00:31 4996104 /lib64/libc-2.32.so
7efd96bea000-7efd96d39000 r-xp 00026000 00:31 4996104 /lib64/libc-2.32.so
7efd96d39000-7efd96d85000 r--p 00175000 00:31 4996104 /lib64/libc-2.32.so
7efd96d85000-7efd96d86000 ---p 001c1000 00:31 4996104 /lib64/libc-2.32.so
7efd96d86000-7efd96d89000 r--p 001c1000 00:31 4996104 /lib64/libc-2.32.so
7efd96d89000-7efd96d8c000 rw-p 001c4000 00:31 4996104 /lib64/libc-2.32.so
7efd96d8c000-7efd96d92000 ---p 00000000 00:00 0
7efd96da0000-7efd96da1000 r--p 00000000 00:31 5004379 /usr/lib/locale/en_US.utf8/LC_NUMERIC
7efd96da1000-7efd96da2000 r--p 00000000 00:31 5004382 /usr/lib/locale/en_US.utf8/LC_TIME
7efd96da2000-7efd96da3000 r--p 00000000 00:31 5004377 /usr/lib/locale/en_US.utf8/LC_MONETARY
7efd96da3000-7efd96da4000 r--p 00000000 00:31 5004376 /usr/lib/locale/en_US.utf8/LC_MESSAGES/SYS_LC_MESSAGES
7efd96da4000-7efd96da5000 r--p 00000000 00:31 5004380 /usr/lib/locale/en_US.utf8/LC_PAPER
7efd96da5000-7efd96da6000 r--p 00000000 00:31 5004378 /usr/lib/locale/en_US.utf8/LC_NAME
7efd96da6000-7efd96da7000 r--p 00000000 00:31 5004372 /usr/lib/locale/en_US.utf8/LC_ADDRESS
7efd96da7000-7efd96da8000 r--p 00000000 00:31 5004381 /usr/lib/locale/en_US.utf8/LC_TELEPHONE
7efd96da8000-7efd96da9000 r--p 00000000 00:31 5004375 /usr/lib/locale/en_US.utf8/LC_MEASUREMENT
7efd96da9000-7efd96db0000 r--s 00000000 00:31 5004639 /usr/lib64/gconv/gconv-modules.cache
7efd96db0000-7efd96db1000 r--p 00000000 00:31 5004374 /usr/lib/locale/en_US.utf8/LC_IDENTIFICATION
7efd96db1000-7efd96db2000 r--p 00000000 00:31 4996100 /lib64/ld-2.32.so
7efd96db2000-7efd96dd3000 r-xp 00001000 00:31 4996100 /lib64/ld-2.32.so
7efd96dd3000-7efd96ddc000 r--p 00022000 00:31 4996100 /lib64/ld-2.32.so
7efd96ddc000-7efd96ddd000 r--p 0002a000 00:31 4996100 /lib64/ld-2.32.so
7efd96ddd000-7efd96ddf000 rw-p 0002b000 00:31 4996100 /lib64/ld-2.32.so
7ffc6dfda000-7ffc6dffb000 rw-p 00000000 00:00 0 [stack]
7ffc6e0f3000-7ffc6e0f7000 r--p 00000000 00:00 0 [vvar]
7ffc6e0f7000-7ffc6e0f9000 r-xp 00000000 00:00 0 [vdso]
ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]";
const LINUX_GATE_LOC: u64 = 0x7ffc6e0f7000;
fn get_all_mappings() -> Vec<MappingInfo> {
let mut mappings: Vec<MappingInfo> = Vec::new();
let (lines, linux_gate_loc) = get_lines_and_loc();
// Only /usr/bin/cat and [heap]
for line in lines {
match MappingInfo::parse_from_line(line, linux_gate_loc, mappings.last_mut())
.expect("failed to read mapping info")
{
MappingInfoParsingResult::Success(map) => mappings.push(map),
MappingInfoParsingResult::SkipLine => continue,
}
}
assert_eq!(mappings.len(), 23);
mappings
get_mappings_for(LINES, LINUX_GATE_LOC)
}
#[test]
fn test_merged() {
let mut mappings: Vec<MappingInfo> = Vec::new();
let (lines, linux_gate_loc) = get_lines_and_loc();
// Only /usr/bin/cat and [heap]
for line in lines[0..=6].iter() {
match MappingInfo::parse_from_line(line, linux_gate_loc, mappings.last_mut())
.expect("failed to read mapping info")
{
MappingInfoParsingResult::Success(map) => mappings.push(map),
MappingInfoParsingResult::SkipLine => continue,
}
}
let mappings = get_mappings_for(
"\
5597483fc000-5597483fe000 r--p 00000000 00:31 4750073 /usr/bin/cat
5597483fe000-559748402000 r-xp 00002000 00:31 4750073 /usr/bin/cat
559748402000-559748404000 r--p 00006000 00:31 4750073 /usr/bin/cat
559748404000-559748405000 r--p 00007000 00:31 4750073 /usr/bin/cat
559748405000-559748406000 rw-p 00008000 00:31 4750073 /usr/bin/cat
559749b0e000-559749b2f000 rw-p 00000000 00:00 0 [heap]
7efd968d3000-7efd968f5000 rw-p 00000000 00:00 0 ",
0x7ffc6e0f7000,
);
assert_eq!(mappings.len(), 3);
let cat_map = MappingInfo {
@ -453,8 +433,11 @@ mod tests {
end_address: 0x559748406000,
},
offset: 0,
executable: true,
name: Some("/usr/bin/cat".to_string()),
permissions: MMPermissions::READ
| MMPermissions::WRITE
| MMPermissions::EXECUTE
| MMPermissions::PRIVATE,
name: Some("/usr/bin/cat".into()),
};
assert_eq!(mappings[0], cat_map);
@ -467,8 +450,8 @@ mod tests {
end_address: 0x559749b2f000,
},
offset: 0,
executable: false,
name: Some("[heap]".to_string()),
permissions: MMPermissions::READ | MMPermissions::WRITE | MMPermissions::PRIVATE,
name: Some("[heap]".into()),
};
assert_eq!(mappings[1], heap_map);
@ -481,7 +464,7 @@ mod tests {
end_address: 0x7efd968f5000,
},
offset: 0,
executable: false,
permissions: MMPermissions::READ | MMPermissions::WRITE | MMPermissions::PRIVATE,
name: None,
};
@ -500,8 +483,8 @@ mod tests {
end_address: 0x7ffc6e0f9000,
},
offset: 0,
executable: true,
name: Some("linux-gate.so".to_string()),
permissions: MMPermissions::READ | MMPermissions::EXECUTE | MMPermissions::PRIVATE,
name: Some("linux-gate.so".into()),
};
assert_eq!(mappings[21], gate_map);
@ -511,35 +494,35 @@ mod tests {
fn test_reading_all() {
let mappings = get_all_mappings();
let found_items = vec![
Some("/usr/bin/cat".to_string()),
Some("[heap]".to_string()),
let found_items: Vec<Option<OsString>> = vec![
Some("/usr/bin/cat".into()),
Some("[heap]".into()),
None,
Some("/usr/lib/locale/en_US.utf8/LC_CTYPE".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_COLLATE".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_CTYPE".into()),
Some("/usr/lib/locale/en_US.utf8/LC_COLLATE".into()),
None,
Some("/lib64/libc-2.32.so".to_string()),
Some("/lib64/libc-2.32.so".into()),
// The original shows a None here, but this is an address ranges that the
// linker reserved but which a loaded library did not use. These
// appear as an anonymous private mapping with no access flags set
// and which directly follow an executable mapping.
Some("/usr/lib/locale/en_US.utf8/LC_NUMERIC".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_TIME".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_MONETARY".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_MESSAGES/SYS_LC_MESSAGES".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_PAPER".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_NAME".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_ADDRESS".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_TELEPHONE".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_MEASUREMENT".to_string()),
Some("/usr/lib64/gconv/gconv-modules.cache".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_IDENTIFICATION".to_string()),
Some("/lib64/ld-2.32.so".to_string()),
Some("[stack]".to_string()),
Some("[vvar]".to_string()),
Some("/usr/lib/locale/en_US.utf8/LC_NUMERIC".into()),
Some("/usr/lib/locale/en_US.utf8/LC_TIME".into()),
Some("/usr/lib/locale/en_US.utf8/LC_MONETARY".into()),
Some("/usr/lib/locale/en_US.utf8/LC_MESSAGES/SYS_LC_MESSAGES".into()),
Some("/usr/lib/locale/en_US.utf8/LC_PAPER".into()),
Some("/usr/lib/locale/en_US.utf8/LC_NAME".into()),
Some("/usr/lib/locale/en_US.utf8/LC_ADDRESS".into()),
Some("/usr/lib/locale/en_US.utf8/LC_TELEPHONE".into()),
Some("/usr/lib/locale/en_US.utf8/LC_MEASUREMENT".into()),
Some("/usr/lib64/gconv/gconv-modules.cache".into()),
Some("/usr/lib/locale/en_US.utf8/LC_IDENTIFICATION".into()),
Some("/lib64/ld-2.32.so".into()),
Some("[stack]".into()),
Some("[vvar]".into()),
// This is rewritten from [vdso] to linux-gate.so
Some("linux-gate.so".to_string()),
Some("[vsyscall]".to_string()),
Some("linux-gate.so".into()),
Some("[vsyscall]".into()),
];
assert_eq!(
@ -560,8 +543,11 @@ mod tests {
end_address: 0x7efd96d8c000, // ..but this is not visible here
},
offset: 0,
executable: true,
name: Some("/lib64/libc-2.32.so".to_string()),
permissions: MMPermissions::READ
| MMPermissions::WRITE
| MMPermissions::EXECUTE
| MMPermissions::PRIVATE,
name: Some("/lib64/libc-2.32.so".into()),
};
assert_eq!(mappings[6], gate_map);
@ -569,89 +555,40 @@ mod tests {
#[test]
fn test_get_mapping_effective_name() {
let lines = vec![
"7f0b97b6f000-7f0b97b70000 r--p 00000000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so",
"7f0b97b70000-7f0b97b71000 r-xp 00000000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so",
"7f0b97b71000-7f0b97b73000 r--p 00000000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so",
"7f0b97b73000-7f0b97b74000 rw-p 00001000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so",
];
let linux_gate_loc = 0x7ffe091bf000;
let mut mappings: Vec<MappingInfo> = Vec::new();
for line in lines {
match MappingInfo::parse_from_line(line, linux_gate_loc, mappings.last_mut())
.expect("failed to read mapping info")
{
MappingInfoParsingResult::Success(map) => mappings.push(map),
MappingInfoParsingResult::SkipLine => continue,
}
}
let mappings = get_mappings_for(
"\
7f0b97b6f000-7f0b97b70000 r--p 00000000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so
7f0b97b70000-7f0b97b71000 r-xp 00000000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so
7f0b97b71000-7f0b97b73000 r--p 00000000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so
7f0b97b73000-7f0b97b74000 rw-p 00001000 00:3e 27136458 /home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so",
0x7ffe091bf000,
);
assert_eq!(mappings.len(), 1);
let (file_path, file_name) = mappings[0]
.get_mapping_effective_name_and_path()
.get_mapping_effective_path_and_name()
.expect("Couldn't get effective name for mapping");
assert_eq!(file_name, "libmozgtk.so");
assert_eq!(file_path, "/home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so");
}
#[test]
fn test_whitespaces_in_maps() {
let lines = vec![
" 7f0b97b6f000-7f0b97b70000 r--p 00000000 00:3e 27136458 libmozgtk.so",
"7f0b97b70000-7f0b97b71000 r-xp 00000000 00:3e 27136458 libmozgtk.so ",
"7f0b97b71000-7f0b97b73000 r--p 00000000 00:3e 27136458\t\t\tlibmozgtk.so",
];
let linux_gate_loc = 0x7ffe091bf000;
let mut mappings: Vec<MappingInfo> = Vec::new();
for line in lines {
match MappingInfo::parse_from_line(line, linux_gate_loc, mappings.last_mut())
.expect("failed to read mapping info")
{
MappingInfoParsingResult::Success(map) => mappings.push(map),
MappingInfoParsingResult::SkipLine => continue,
}
}
assert_eq!(mappings.len(), 1);
let expected_map = MappingInfo {
start_address: 0x7f0b97b6f000,
size: 16384,
system_mapping_info: SystemMappingInfo {
start_address: 0x7f0b97b6f000,
end_address: 0x7f0b97b73000,
},
offset: 0,
executable: true,
name: Some("libmozgtk.so".to_string()),
};
assert_eq!(expected_map, mappings[0]);
assert_eq!(file_path, PathBuf::from("/home/martin/Documents/mozilla/devel/mozilla-central/obj/widget/gtk/mozgtk/gtk3/libmozgtk.so"));
}
#[test]
fn test_whitespaces_in_name() {
let lines = vec![
"10000000-20000000 r--p 00000000 00:3e 27136458 libmoz gtk.so",
"20000000-30000000 r--p 00000000 00:3e 27136458 libmozgtk.so (deleted)",
"30000000-40000000 r--p 00000000 00:3e 27136458 \"libmoz gtk.so (deleted)\"",
"30000000-40000000 r--p 00000000 00:3e 27136458 ",
];
let linux_gate_loc = 0x7ffe091bf000;
let mut mappings: Vec<MappingInfo> = Vec::new();
for line in lines {
match MappingInfo::parse_from_line(line, linux_gate_loc, mappings.last_mut())
.expect("failed to read mapping info")
{
MappingInfoParsingResult::Success(map) => mappings.push(map),
MappingInfoParsingResult::SkipLine => continue,
}
}
let mappings = get_mappings_for(
"\
10000000-20000000 r--p 00000000 00:3e 27136458 libmoz gtk.so
20000000-30000000 r--p 00000000 00:3e 27136458 libmozgtk.so (deleted)
30000000-40000000 r--p 00000000 00:3e 27136458 \"libmoz gtk.so (deleted)\"
30000000-40000000 r--p 00000000 00:3e 27136458 ",
0x7ffe091bf000,
);
assert_eq!(mappings.len(), 4);
assert_eq!(mappings[0].name, Some("libmoz gtk.so".to_string()));
assert_eq!(mappings[1].name, Some("libmozgtk.so (deleted)".to_string()));
assert_eq!(mappings[0].name, Some("libmoz gtk.so".into()));
assert_eq!(mappings[1].name, Some("libmozgtk.so (deleted)".into()));
assert_eq!(
mappings[2].name,
Some("\"libmoz gtk.so (deleted)\"".to_string())
Some("\"libmoz gtk.so (deleted)\"".into())
);
assert_eq!(mappings[3].name, None);
}

View File

@ -156,15 +156,16 @@ impl MinidumpWriter {
return true;
}
let (stack_ptr, stack_len) = match dumper.get_stack_info(stack_pointer) {
let (valid_stack_pointer, stack_len) = match dumper.get_stack_info(stack_pointer) {
Ok(x) => x,
Err(_) => {
return false;
}
};
let stack_copy = match PtraceDumper::copy_from_process(
self.blamed_thread,
stack_ptr as *mut libc::c_void,
valid_stack_pointer as *mut libc::c_void,
stack_len,
) {
Ok(x) => x,
@ -173,7 +174,7 @@ impl MinidumpWriter {
}
};
let sp_offset = stack_pointer - stack_ptr;
let sp_offset = stack_pointer.saturating_sub(valid_stack_pointer);
self.principal_mapping
.as_ref()
.unwrap()
@ -187,8 +188,8 @@ impl MinidumpWriter {
destination: &mut (impl Write + Seek),
) -> Result<()> {
// A minidump file contains a number of tagged streams. This is the number
// of stream which we write.
let num_writers = 14u32;
// of streams which we write.
let num_writers = 15u32;
let mut header_section = MemoryWriter::<MDRawHeader>::alloc(buffer)?;
@ -237,6 +238,10 @@ impl MinidumpWriter {
// Write section to file
dir_section.write_to_file(buffer, Some(dirent))?;
let dirent = memory_info_list_stream::write(self, buffer)?;
// Write section to file
dir_section.write_to_file(buffer, Some(dirent))?;
let dirent = match self.write_file(buffer, "/proc/cpuinfo") {
Ok(location) => MDRawDirectory {
stream_type: MDStreamType::LinuxCpuInfo as u32,
@ -322,8 +327,7 @@ impl MinidumpWriter {
// Write section to file
dir_section.write_to_file(buffer, Some(dirent))?;
// If you add more directory entries, don't forget to update kNumWriters,
// above.
// If you add more directory entries, don't forget to update num_writers, above.
Ok(())
}

View File

@ -1,10 +1,12 @@
#[cfg(target_os = "android")]
use crate::linux::android::late_process_mappings;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::thread_info;
use crate::{
linux::{
auxv_reader::{AuxvType, ProcfsAuxvIter},
errors::{DumperError, InitError, ThreadInfoError},
maps_reader::{MappingInfo, MappingInfoParsingResult, DELETED_SUFFIX},
maps_reader::MappingInfo,
thread_info::{Pid, ThreadInfo},
LINUX_GATE_LIBRARY_NAME,
},
@ -15,13 +17,8 @@ use nix::{
errno::Errno,
sys::{ptrace, wait},
};
use std::{
collections::HashMap,
ffi::c_void,
io::{BufRead, BufReader},
path,
result::Result,
};
use procfs_core::process::MMPermissions;
use std::{collections::HashMap, ffi::c_void, io::BufReader, path, result::Result};
#[derive(Debug, Clone)]
pub struct Thread {
@ -36,6 +33,7 @@ pub struct PtraceDumper {
pub threads: Vec<Thread>,
pub auxv: HashMap<AuxvType, AuxvType>,
pub mappings: Vec<MappingInfo>,
pub page_size: usize,
}
#[cfg(target_pointer_width = "32")]
@ -76,6 +74,7 @@ impl PtraceDumper {
threads: Vec::new(),
auxv: HashMap::new(),
mappings: Vec::new(),
page_size: 0,
};
dumper.init()?;
Ok(dumper)
@ -86,6 +85,10 @@ impl PtraceDumper {
self.read_auxv()?;
self.enumerate_threads()?;
self.enumerate_mappings()?;
self.page_size = nix::unistd::sysconf(nix::unistd::SysconfVar::PAGE_SIZE)?
.expect("page size apparently unlimited: doesn't make sense.")
as usize;
Ok(())
}
@ -146,7 +149,7 @@ impl PtraceDumper {
// We thus test the stack pointer and exclude any threads that are part of
// the seccomp sandbox's trusted code.
let skip_thread;
let regs = ptrace::getregs(pid);
let regs = thread_info::ThreadInfo::getregs(pid.into());
if let Ok(regs) = regs {
#[cfg(target_arch = "x86_64")]
{
@ -265,11 +268,11 @@ impl PtraceDumper {
// guaranteed (see http://crosbug.com/25355); therefore, try to use the
// actual entry point to find the mapping.
let at_entry;
#[cfg(target_arch = "arm")]
#[cfg(any(target_arch = "arm", all(target_os = "android", target_arch = "x86")))]
{
at_entry = 9;
}
#[cfg(not(target_arch = "arm"))]
#[cfg(not(any(target_arch = "arm", all(target_os = "android", target_arch = "x86"))))]
{
at_entry = libc::AT_ENTRY;
}
@ -280,15 +283,11 @@ impl PtraceDumper {
let maps_path = path::PathBuf::from(&filename);
let maps_file = std::fs::File::open(maps_path).map_err(errmap)?;
for line in BufReader::new(maps_file).lines() {
// /proc/<pid>/maps looks like this
// 7fe34a863000-7fe34a864000 rw-p 00009000 00:31 4746408 /usr/lib64/libogg.so.0.8.4
let line = line.map_err(errmap)?;
match MappingInfo::parse_from_line(&line, linux_gate_loc, self.mappings.last_mut()) {
Ok(MappingInfoParsingResult::Success(map)) => self.mappings.push(map),
Ok(MappingInfoParsingResult::SkipLine) | Err(_) => continue,
}
}
use procfs_core::FromRead;
self.mappings = procfs_core::process::MemoryMaps::from_read(maps_file)
.ok()
.and_then(|maps| MappingInfo::aggregate(maps, linux_gate_loc).ok())
.unwrap_or_default();
if entry_point_loc != 0 {
let mut swap_idx = None;
@ -324,28 +323,52 @@ impl PtraceDumper {
ThreadInfo::create(self.pid, self.threads[index].tid)
}
// Get information about the stack, given the stack pointer. We don't try to
// walk the stack since we might not have all the information needed to do
// unwind. So we just grab, up to, 32k of stack.
// Returns a valid stack pointer and the mapping that contains the stack.
// The stack pointer will usually point within this mapping, but it might
// not in case of stack overflows, hence the returned pointer might be
// different from the one that was passed in.
pub fn get_stack_info(&self, int_stack_pointer: usize) -> Result<(usize, usize), DumperError> {
// Move the stack pointer to the bottom of the page that it's in.
// NOTE: original code uses getpagesize(), which a) isn't there in Rust and
// b) shouldn't be used, as its not portable (see man getpagesize)
let page_size = nix::unistd::sysconf(nix::unistd::SysconfVar::PAGE_SIZE)?
.expect("page size apparently unlimited: doesn't make sense.");
let stack_pointer = int_stack_pointer & !(page_size as usize - 1);
// Round the stack pointer to the nearest page, this will cause us to
// capture data below the stack pointer which might still be relevant.
let mut stack_pointer = int_stack_pointer & !(self.page_size - 1);
let mut mapping = self.find_mapping(stack_pointer);
// The number of bytes of stack which we try to capture.
let stack_to_capture = 32 * 1024;
// The guard page has been 1 MiB in size since kernel 4.12, older
// kernels used a 4 KiB one instead.
let guard_page_max_addr = stack_pointer + (1024 * 1024);
let mapping = self
.find_mapping(stack_pointer)
.ok_or(DumperError::NoStackPointerMapping)?;
let offset = stack_pointer - mapping.start_address;
let distance_to_end = mapping.size - offset;
let stack_len = std::cmp::min(distance_to_end, stack_to_capture);
// If we found no mapping, or the mapping we found has no permissions
// then we might have hit a guard page, try looking for a mapping in
// addresses past the stack pointer. Stack grows towards lower addresses
// on the platforms we care about so the stack should appear after the
// guard page.
while !Self::may_be_stack(mapping) && (stack_pointer <= guard_page_max_addr) {
stack_pointer += self.page_size;
mapping = self.find_mapping(stack_pointer);
}
Ok((stack_pointer, stack_len))
mapping
.map(|mapping| {
let valid_stack_pointer = if mapping.contains_address(stack_pointer) {
stack_pointer
} else {
mapping.start_address
};
let stack_len = mapping.size - (valid_stack_pointer - mapping.start_address);
(valid_stack_pointer, stack_len)
})
.ok_or(DumperError::NoStackPointerMapping)
}
fn may_be_stack(mapping: Option<&MappingInfo>) -> bool {
if let Some(mapping) = mapping {
return mapping
.permissions
.intersects(MMPermissions::READ | MMPermissions::WRITE);
}
false
}
pub fn sanitize_stack_copy(
@ -374,7 +397,7 @@ impl PtraceDumper {
// the bitfield length is 2^test_bits long.
let test_bits = 11;
// byte length of the corresponding array.
let array_size = 1 << (test_bits - 3);
let array_size: usize = 1 << (test_bits - 3);
let array_mask = array_size - 1;
// The amount to right shift pointers by. This captures the top bits
// on 32 bit architectures. On 64 bit architectures this would be
@ -394,7 +417,7 @@ impl PtraceDumper {
// bit, modulo the bitfield size, is not set then there does not
// exist a mapping in mappings that would contain that pointer.
for mapping in &self.mappings {
if !mapping.executable {
if !mapping.is_executable() {
continue;
}
// For each mapping, work out the (unmodulo'ed) range of bits to
@ -441,7 +464,7 @@ impl PtraceDumper {
let test = addr >> shift;
if could_hit_mapping[(test >> 3) & array_mask] & (1 << (test & 7)) != 0 {
if let Some(hit_mapping) = self.find_mapping_no_bias(addr) {
if hit_mapping.executable {
if hit_mapping.is_executable() {
last_hit_mapping = Some(hit_mapping);
continue;
}
@ -553,7 +576,7 @@ impl PtraceDumper {
}
// Special-case linux-gate because it's not a real file.
if mapping.name.as_deref() == Some(LINUX_GATE_LIBRARY_NAME) {
if mapping.name.as_deref() == Some(LINUX_GATE_LIBRARY_NAME.as_ref()) {
if pid == std::process::id().try_into()? {
let mem_slice = unsafe {
std::slice::from_raw_parts(mapping.start_address as *const u8, mapping.size)
@ -568,26 +591,16 @@ impl PtraceDumper {
return Self::elf_file_identifier_from_mapped_file(&mem_slice);
}
}
let new_name = MappingInfo::handle_deleted_file_in_mapping(
mapping.name.as_deref().unwrap_or_default(),
pid,
)?;
let mem_slice = MappingInfo::get_mmap(&Some(new_name.clone()), mapping.offset)?;
let (filename, old_name) = mapping.fixup_deleted_file(pid)?;
let mem_slice = MappingInfo::get_mmap(&Some(filename), mapping.offset)?;
let build_id = Self::elf_file_identifier_from_mapped_file(&mem_slice)?;
// This means we switched from "/my/binary" to "/proc/1234/exe", because /my/binary
// was deleted and thus has a "/my/binary (deleted)" entry. We found the mapping anyway
// so we remove the "(deleted)".
if let Some(old_name) = &mapping.name {
if &new_name != old_name {
mapping.name = Some(
old_name
.trim_end_matches(DELETED_SUFFIX)
.trim_end()
.to_string(),
);
}
// This means we switched from "/my/binary" to "/proc/1234/exe", change the mapping to
// remove the " (deleted)" portion.
if let Some(old_name) = old_name {
mapping.name = Some(old_name.into());
}
Ok(build_id)
}

View File

@ -1,6 +1,7 @@
pub mod app_memory;
pub mod exception_stream;
pub mod mappings;
pub mod memory_info_list_stream;
pub mod memory_list_stream;
pub mod systeminfo_stream;
pub mod thread_list_stream;

View File

@ -84,9 +84,9 @@ fn fill_raw_module(
};
let (file_path, _) = mapping
.get_mapping_effective_name_and_path()
.get_mapping_effective_path_and_name()
.map_err(|e| errors::SectionMappingsError::GetEffectivePathError(mapping.clone(), e))?;
let name_header = write_string_to_location(buffer, &file_path)?;
let name_header = write_string_to_location(buffer, file_path.to_string_lossy().as_ref())?;
Ok(MDRawModule {
base_of_image: mapping.start_address as u64,

View File

@ -0,0 +1,68 @@
use super::*;
use minidump_common::format::{MemoryProtection, MemoryState, MemoryType};
use procfs_core::{process::MMPermissions, FromRead};
/// Write a MemoryInfoListStream using information from procfs.
pub fn write(
config: &mut MinidumpWriter,
buffer: &mut DumpBuf,
) -> Result<MDRawDirectory, errors::SectionMemInfoListError> {
let maps = procfs_core::process::MemoryMaps::from_file(std::path::PathBuf::from(format!(
"/proc/{}/maps",
config.blamed_thread
)))?;
let list_header = MemoryWriter::alloc_with_val(
buffer,
MDMemoryInfoList {
size_of_header: std::mem::size_of::<MDMemoryInfoList>() as u32,
size_of_entry: std::mem::size_of::<MDMemoryInfo>() as u32,
number_of_entries: maps.len() as u64,
},
)?;
let mut dirent = MDRawDirectory {
stream_type: MDStreamType::MemoryInfoListStream as u32,
location: list_header.location(),
};
let block_list = MemoryArrayWriter::<MDMemoryInfo>::alloc_from_iter(
buffer,
maps.iter().map(|mm| MDMemoryInfo {
base_address: mm.address.0,
allocation_base: mm.address.0,
allocation_protection: get_memory_protection(mm.perms).bits(),
__alignment1: 0,
region_size: mm.address.1 - mm.address.0,
state: MemoryState::MEM_COMMIT.bits(),
protection: get_memory_protection(mm.perms).bits(),
_type: if mm.perms.contains(MMPermissions::PRIVATE) {
MemoryType::MEM_PRIVATE
} else {
MemoryType::MEM_MAPPED
}
.bits(),
__alignment2: 0,
}),
)?;
dirent.location.data_size += block_list.location().data_size;
Ok(dirent)
}
fn get_memory_protection(permissions: MMPermissions) -> MemoryProtection {
let read = permissions.contains(MMPermissions::READ);
let write = permissions.contains(MMPermissions::WRITE);
let exec = permissions.contains(MMPermissions::EXECUTE);
match (read, write, exec) {
(false, false, false) => MemoryProtection::PAGE_NOACCESS,
(false, false, true) => MemoryProtection::PAGE_EXECUTE,
(true, false, false) => MemoryProtection::PAGE_READONLY,
(true, false, true) => MemoryProtection::PAGE_EXECUTE_READ,
// No support for write-only
(true | false, true, false) => MemoryProtection::PAGE_READWRITE,
// No support for execute+write-only
(true | false, true, true) => MemoryProtection::PAGE_EXECUTE_READWRITE,
}
}

View File

@ -1,3 +1,5 @@
use std::cmp::min;
use super::*;
use crate::{minidump_cpu::RawContextCPU, minidump_writer::CrashingThreadContext};
@ -185,27 +187,19 @@ fn fill_thread_stack(
thread.stack.memory.data_size = 0;
thread.stack.memory.rva = buffer.position() as u32;
if let Ok((mut stack, mut stack_len)) = dumper.get_stack_info(stack_ptr) {
if let MaxStackLen::Len(max_stack_len) = max_stack_len {
if stack_len > max_stack_len {
stack_len = max_stack_len;
// Skip empty chunks of length max_stack_len.
// Meaning != 0
if stack_len > 0 {
while stack + stack_len < stack_ptr {
stack += stack_len;
}
}
}
}
if let Ok((valid_stack_ptr, stack_len)) = dumper.get_stack_info(stack_ptr) {
let stack_len = if let MaxStackLen::Len(max_stack_len) = max_stack_len {
min(stack_len, max_stack_len)
} else {
stack_len
};
let mut stack_bytes = PtraceDumper::copy_from_process(
thread.thread_id.try_into()?,
stack as *mut libc::c_void,
valid_stack_ptr as *mut libc::c_void,
stack_len,
)?;
let stack_pointer_offset = stack_ptr - stack;
let stack_pointer_offset = stack_ptr.saturating_sub(valid_stack_ptr);
if config.skip_stacks_if_mapping_unreferenced {
if let Some(principal_mapping) = &config.principal_mapping {
let low_addr = principal_mapping.system_mapping_info.start_address;
@ -230,7 +224,7 @@ fn fill_thread_stack(
rva: buffer.position() as u32,
};
buffer.write_all(&stack_bytes);
thread.stack.start_of_memory_range = stack as u64;
thread.stack.start_of_memory_range = valid_stack_ptr as u64;
thread.stack.memory = stack_location;
config.memory_blocks.push(thread.stack);
}

View File

@ -34,6 +34,7 @@ enum NT_Elf {
//NT_PRPSINFO = 3,
//NT_TASKSTRUCT = 4,
//NT_AUXV = 6,
NT_ARM_VFP = 0x400, // ARM VFP/NEON registers
}
#[inline]
@ -96,14 +97,14 @@ trait CommonThreadInfo {
/// and therefore use the data field to return values. This function handles these
/// requests.
fn ptrace_get_data<T>(
request: ptrace::Request,
request: ptrace::RequestType,
flag: Option<NT_Elf>,
pid: nix::unistd::Pid,
) -> Result<T> {
let mut data = std::mem::MaybeUninit::uninit();
let res = unsafe {
libc::ptrace(
request as ptrace::RequestType,
request,
libc::pid_t::from(pid),
flag.unwrap_or(NT_Elf::NT_NONE),
data.as_mut_ptr(),
@ -119,7 +120,7 @@ trait CommonThreadInfo {
/// and therefore use the data field to return values. This function handles these
/// requests.
fn ptrace_get_data_via_io<T>(
request: ptrace::Request,
request: ptrace::RequestType,
flag: Option<NT_Elf>,
pid: nix::unistd::Pid,
) -> Result<T> {
@ -130,7 +131,7 @@ trait CommonThreadInfo {
};
let res = unsafe {
libc::ptrace(
request as ptrace::RequestType,
request,
libc::pid_t::from(pid),
flag.unwrap_or(NT_Elf::NT_NONE),
&io as *const _,
@ -142,19 +143,14 @@ trait CommonThreadInfo {
/// COPY FROM CRATE nix BECAUSE ITS NOT PUBLIC
fn ptrace_peek(
request: ptrace::Request,
request: ptrace::RequestType,
pid: unistd::Pid,
addr: ptrace::AddressType,
data: *mut libc::c_void,
) -> nix::Result<libc::c_long> {
let ret = unsafe {
Errno::clear();
libc::ptrace(
request as ptrace::RequestType,
libc::pid_t::from(pid),
addr,
data,
)
libc::ptrace(request, libc::pid_t::from(pid), addr, data)
};
match Errno::result(ret) {
Ok(..) | Err(Errno::UnknownErrno) => Ok(ret),

View File

@ -1,9 +1,8 @@
use super::{CommonThreadInfo, Pid};
use super::{CommonThreadInfo, NT_Elf, Pid};
use crate::{
errors::ThreadInfoError,
minidump_cpu::{RawContextCPU, FP_REG_COUNT, GP_REG_COUNT},
};
#[cfg(not(target_os = "android"))]
use nix::sys::ptrace;
/// https://github.com/rust-lang/libc/pull/2719
@ -34,55 +33,40 @@ impl ThreadInfoAarch64 {
self.regs.pc as usize
}
// nix currently doesn't support PTRACE_GETFPREGS, so we have to do it ourselves
fn getfpregs(pid: Pid) -> Result<user_fpsimd_struct> {
cfg_if::cfg_if! {
if #[cfg(target_os = "android")] {
// TODO: nix restricts PTRACE_GETFPREGS to arm android for some reason
let mut data = std::mem::MaybeUninit::<user_fpsimd_struct>::uninit();
let res = unsafe {
libc::ptrace(
14,
libc::pid_t::from(pid),
super::NT_Elf::NT_NONE,
data.as_mut_ptr(),
)
};
nix::errno::Errno::result(res)?;
Ok(unsafe { data.assume_init() })
} else {
Self::ptrace_get_data_via_io::<user_fpsimd_struct>(
ptrace::Request::PTRACE_GETREGSET,
Some(super::NT_Elf::NT_PRFPREGSET),
nix::unistd::Pid::from_raw(pid),
)
}
}
// nix currently doesn't support PTRACE_GETREGSET, so we have to do it ourselves
fn getregset(pid: Pid) -> Result<libc::user_regs_struct> {
Self::ptrace_get_data_via_io(
0x4204 as ptrace::RequestType, // PTRACE_GETREGSET
Some(NT_Elf::NT_PRSTATUS),
nix::unistd::Pid::from_raw(pid),
)
}
fn getregs(pid: Pid) -> Result<libc::user_regs_struct> {
cfg_if::cfg_if! {
if #[cfg(target_os = "android")] {
// TODO: nix restricts PTRACE_GETREGS to arm android for some reason
let mut data = std::mem::MaybeUninit::<libc::user_regs_struct>::uninit();
let res = unsafe {
libc::ptrace(
12,
libc::pid_t::from(pid),
super::NT_Elf::NT_NONE,
data.as_mut_ptr(),
)
};
nix::errno::Errno::result(res)?;
Ok(unsafe { data.assume_init() })
} else {
Self::ptrace_get_data_via_io::<libc::user_regs_struct>(
ptrace::Request::PTRACE_GETREGSET,
Some(super::NT_Elf::NT_PRSTATUS),
nix::unistd::Pid::from_raw(pid),
)
}
}
// TODO: nix restricts PTRACE_GETREGS to arm android for some reason
Self::ptrace_get_data(
12 as ptrace::RequestType, // PTRACE_GETREGS
None,
nix::unistd::Pid::from_raw(pid),
)
}
// nix currently doesn't support PTRACE_GETREGSET, so we have to do it ourselves
fn getfpregset(pid: Pid) -> Result<user_fpsimd_struct> {
Self::ptrace_get_data_via_io(
0x4204 as ptrace::RequestType, // PTRACE_GETREGSET
Some(NT_Elf::NT_PRFPREGSET),
nix::unistd::Pid::from_raw(pid),
)
}
// nix currently doesn't support PTRACE_GETFPREGS, so we have to do it ourselves
fn getfpregs(pid: Pid) -> Result<user_fpsimd_struct> {
Self::ptrace_get_data(
14 as ptrace::RequestType, // PTRACE_GETFPREGS
None,
nix::unistd::Pid::from_raw(pid),
)
}
pub fn fill_cpu_context(&self, out: &mut RawContextCPU) {
@ -104,8 +88,8 @@ impl ThreadInfoAarch64 {
pub fn create_impl(_pid: Pid, tid: Pid) -> Result<Self> {
let (ppid, tgid) = Self::get_ppid_and_tgid(tid)?;
let regs = Self::getregs(tid)?;
let fpregs = Self::getfpregs(tid)?;
let regs = Self::getregset(tid).or_else(|_| Self::getregs(tid))?;
let fpregs = Self::getfpregset(tid).or_else(|_| Self::getfpregs(tid))?;
let stack_pointer = regs.sp as usize;

View File

@ -1,43 +1,21 @@
use super::{CommonThreadInfo, Pid};
use super::{CommonThreadInfo, NT_Elf, Pid};
use crate::{errors::ThreadInfoError, minidump_cpu::RawContextCPU};
use nix::sys::ptrace;
type Result<T> = std::result::Result<T, ThreadInfoError>;
// These are not (yet) part of the libc-crate
// #[repr(C)]
// #[derive(Debug, Eq, Hash, PartialEq, Copy, Clone, Default)]
// pub struct fp_reg {
// // TODO: No bitfields at the moment, just the next best integer-type
// sign1: u8,
// unused: u16,
// sign2: u8,
// exponent: u16,
// j: u8,
// mantissa1: u32,
// mantissa2: u32,
// // unsigned int sign1:1;
// // unsigned int unused:15;
// // unsigned int sign2:1;
// // unsigned int exponent:14;
// // unsigned int j:1;
// // unsigned int mantissa1:31;
// // unsigned int mantissa0:32;
// }
// Not defined by libc because this works only for cores support VFP
#[allow(non_camel_case_types)]
#[repr(C)]
#[derive(Debug, Eq, Hash, PartialEq, Copy, Clone, Default)]
pub struct user_fpregs {
// fpregs: [fp_reg; 8],
fpregs: [u32; 8 * 3], // Fields not used, so shortening the struct to 3 x u32
fpsr: u32,
fpcr: u32,
ftype: [u8; 8],
init_flag: u32,
pub struct user_fpregs_struct {
pub fpregs: [u64; 32],
pub fpscr: u32,
}
#[repr(C)]
#[derive(Debug, Eq, Hash, PartialEq, Copy, Clone, Default)]
pub struct user_regs {
pub struct user_regs_struct {
uregs: [u32; 18],
}
@ -46,26 +24,26 @@ pub struct ThreadInfoArm {
pub stack_pointer: usize,
pub tgid: Pid, // thread group id
pub ppid: Pid, // parent process
pub regs: user_regs,
pub fpregs: user_fpregs,
pub regs: user_regs_struct,
pub fpregs: user_fpregs_struct,
}
impl CommonThreadInfo for ThreadInfoArm {}
impl ThreadInfoArm {
// nix currently doesn't support PTRACE_GETFPREGS, so we have to do it ourselves
fn getfpregs(pid: Pid) -> Result<user_fpregs> {
Self::ptrace_get_data::<user_fpregs>(
ptrace::Request::PTRACE_GETFPREGS,
None,
fn getfpregs(pid: Pid) -> Result<user_fpregs_struct> {
Self::ptrace_get_data_via_io(
0x4204 as ptrace::RequestType, // PTRACE_GETREGSET
Some(NT_Elf::NT_ARM_VFP),
nix::unistd::Pid::from_raw(pid),
)
}
// nix currently doesn't support PTRACE_GETFPREGS, so we have to do it ourselves
fn getregs(pid: Pid) -> Result<user_regs> {
Self::ptrace_get_data::<user_regs>(
ptrace::Request::PTRACE_GETFPREGS,
// nix currently doesn't support PTRACE_GETREGS, so we have to do it ourselves
fn getregs(pid: Pid) -> Result<user_regs_struct> {
Self::ptrace_get_data::<user_regs_struct>(
ptrace::Request::PTRACE_GETREGS as ptrace::RequestType,
None,
nix::unistd::Pid::from_raw(pid),
)
@ -80,19 +58,15 @@ impl ThreadInfoArm {
crate::minidump_format::format::ContextFlagsArm::CONTEXT_ARM_FULL.bits();
out.iregs.copy_from_slice(&self.regs.uregs[..16]);
// No CPSR register in ThreadInfo(it's not accessible via ptrace)
out.cpsr = 0;
#[cfg(not(target_os = "android"))]
{
out.float_save.fpscr = self.fpregs.fpsr as u64 | ((self.fpregs.fpcr as u64) << 32);
}
out.cpsr = self.regs.uregs[16];
out.float_save.fpscr = self.fpregs.fpscr as u64;
out.float_save.regs = self.fpregs.fpregs;
}
pub fn create_impl(_pid: Pid, tid: Pid) -> Result<Self> {
let (ppid, tgid) = Self::get_ppid_and_tgid(tid)?;
let regs = Self::getregs(tid)?;
let fpregs = Self::getfpregs(tid)?;
let fpregs = Self::getfpregs(tid).unwrap_or(Default::default());
let stack_pointer = regs.uregs[13] as usize;

View File

@ -1,53 +1,145 @@
use super::{CommonThreadInfo, NT_Elf, Pid};
use crate::{errors::ThreadInfoError, minidump_cpu::RawContextCPU, minidump_format::format};
use core::mem::size_of_val;
use libc::user;
use nix::{sys::ptrace, unistd};
#[cfg(all(not(target_os = "android"), target_arch = "x86"))]
use libc::user_fpxregs_struct;
#[cfg(not(all(target_os = "android", target_arch = "x86")))]
use libc::{user, user_fpregs_struct, user_regs_struct};
use nix::sys::ptrace;
use scroll::Pwrite;
type Result<T> = std::result::Result<T, ThreadInfoError>;
// Not defined by libc on Android
#[cfg(all(target_os = "android", target_arch = "x86"))]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct user_regs_struct {
pub ebx: libc::c_long,
pub ecx: libc::c_long,
pub edx: libc::c_long,
pub esi: libc::c_long,
pub edi: libc::c_long,
pub ebp: libc::c_long,
pub eax: libc::c_long,
pub xds: libc::c_long,
pub xes: libc::c_long,
pub xfs: libc::c_long,
pub xgs: libc::c_long,
pub orig_eax: libc::c_long,
pub eip: libc::c_long,
pub xcs: libc::c_long,
pub eflags: libc::c_long,
pub esp: libc::c_long,
pub xss: libc::c_long,
}
// Not defined by libc on Android
#[cfg(all(target_os = "android", target_arch = "x86"))]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct user_fpxregs_struct {
pub cwd: libc::c_ushort,
pub swd: libc::c_ushort,
pub twd: libc::c_ushort,
pub fop: libc::c_ushort,
pub fip: libc::c_long,
pub fcs: libc::c_long,
pub foo: libc::c_long,
pub fos: libc::c_long,
pub mxcsr: libc::c_long,
__reserved: libc::c_long,
pub st_space: [libc::c_long; 32],
pub xmm_space: [libc::c_long; 32],
padding: [libc::c_long; 56],
}
// Not defined by libc on Android
#[cfg(all(target_os = "android", target_arch = "x86"))]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct user_fpregs_struct {
pub cwd: libc::c_long,
pub swd: libc::c_long,
pub twd: libc::c_long,
pub fip: libc::c_long,
pub fcs: libc::c_long,
pub foo: libc::c_long,
pub fos: libc::c_long,
pub st_space: [libc::c_long; 20],
}
#[cfg(all(target_os = "android", target_arch = "x86"))]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct user {
pub regs: user_regs_struct,
pub u_fpvalid: libc::c_long,
pub i387: user_fpregs_struct,
pub u_tsize: libc::c_ulong,
pub u_dsize: libc::c_ulong,
pub u_ssize: libc::c_ulong,
pub start_code: libc::c_ulong,
pub start_stack: libc::c_ulong,
pub signal: libc::c_long,
__reserved: libc::c_int,
pub u_ar0: *mut user_regs_struct,
pub u_fpstate: *mut user_fpregs_struct,
pub magic: libc::c_ulong,
pub u_comm: [libc::c_char; 32],
pub u_debugreg: [libc::c_int; 8],
}
const NUM_DEBUG_REGISTERS: usize = 8;
pub struct ThreadInfoX86 {
pub stack_pointer: usize,
pub tgid: Pid, // thread group id
pub ppid: Pid, // parent process
pub regs: libc::user_regs_struct,
pub fpregs: libc::user_fpregs_struct,
pub regs: user_regs_struct,
pub fpregs: user_fpregs_struct,
#[cfg(target_arch = "x86_64")]
pub dregs: [libc::c_ulonglong; NUM_DEBUG_REGISTERS],
#[cfg(target_arch = "x86")]
pub dregs: [libc::c_int; NUM_DEBUG_REGISTERS],
#[cfg(target_arch = "x86")]
pub fpxregs: libc::user_fpxregs_struct,
pub fpxregs: user_fpxregs_struct,
}
impl CommonThreadInfo for ThreadInfoX86 {}
impl ThreadInfoX86 {
// nix currently doesn't support PTRACE_GETREGSET, so we have to do it ourselves
fn getregset(pid: Pid) -> Result<libc::user_regs_struct> {
Self::ptrace_get_data_via_io::<libc::user_regs_struct>(
ptrace::Request::PTRACE_GETREGSET,
fn getregset(pid: Pid) -> Result<user_regs_struct> {
Self::ptrace_get_data_via_io(
0x4204 as ptrace::RequestType, // PTRACE_GETREGSET
Some(NT_Elf::NT_PRSTATUS),
nix::unistd::Pid::from_raw(pid),
)
}
pub fn getregs(pid: Pid) -> Result<user_regs_struct> {
// TODO: nix restricts PTRACE_GETREGS to arm android for some reason
Self::ptrace_get_data(
12 as ptrace::RequestType, // PTRACE_GETREGS
None,
nix::unistd::Pid::from_raw(pid),
)
}
// nix currently doesn't support PTRACE_GETREGSET, so we have to do it ourselves
fn getfpregset(pid: Pid) -> Result<libc::user_fpregs_struct> {
Self::ptrace_get_data_via_io::<libc::user_fpregs_struct>(
ptrace::Request::PTRACE_GETREGSET,
fn getfpregset(pid: Pid) -> Result<user_fpregs_struct> {
Self::ptrace_get_data_via_io(
0x4204 as ptrace::RequestType, // PTRACE_GETREGSET
Some(NT_Elf::NT_PRFPREGSET),
nix::unistd::Pid::from_raw(pid),
)
}
// nix currently doesn't support PTRACE_GETFPREGS, so we have to do it ourselves
fn getfpregs(pid: Pid) -> Result<libc::user_fpregs_struct> {
Self::ptrace_get_data::<libc::user_fpregs_struct>(
ptrace::Request::PTRACE_GETFPREGS,
fn getfpregs(pid: Pid) -> Result<user_fpregs_struct> {
Self::ptrace_get_data(
14 as ptrace::RequestType, // PTRACE_GETFPREGS
None,
nix::unistd::Pid::from_raw(pid),
)
@ -55,9 +147,9 @@ impl ThreadInfoX86 {
// nix currently doesn't support PTRACE_GETFPXREGS, so we have to do it ourselves
#[cfg(target_arch = "x86")]
fn getfpxregs(pid: Pid) -> Result<libc::user_fpxregs_struct> {
Self::ptrace_get_data::<libc::user_fpxregs_struct>(
ptrace::Request::PTRACE_GETFPXREGS,
fn getfpxregs(pid: Pid) -> Result<user_fpxregs_struct> {
Self::ptrace_get_data(
18 as ptrace::RequestType, // PTRACE_GETFPXREGS
None,
nix::unistd::Pid::from_raw(pid),
)
@ -65,7 +157,7 @@ impl ThreadInfoX86 {
fn peek_user(pid: Pid, addr: ptrace::AddressType) -> nix::Result<libc::c_long> {
Self::ptrace_peek(
ptrace::Request::PTRACE_PEEKUSER,
ptrace::Request::PTRACE_PEEKUSER as ptrace::RequestType,
nix::unistd::Pid::from_raw(pid),
addr,
std::ptr::null_mut(),
@ -74,10 +166,10 @@ impl ThreadInfoX86 {
pub fn create_impl(_pid: Pid, tid: Pid) -> Result<Self> {
let (ppid, tgid) = Self::get_ppid_and_tgid(tid)?;
let regs = Self::getregset(tid).or_else(|_| ptrace::getregs(unistd::Pid::from_raw(tid)))?;
let regs = Self::getregset(tid).or_else(|_| Self::getregs(tid))?;
let fpregs = Self::getfpregset(tid).or_else(|_| Self::getfpregs(tid))?;
#[cfg(target_arch = "x86")]
let fpxregs: libc::user_fpxregs_struct;
let fpxregs: user_fpxregs_struct;
#[cfg(target_arch = "x86")]
{
if cfg!(target_feature = "fxsr") {

View File

@ -234,6 +234,7 @@ cfg_if::cfg_if! {
}
}
#[repr(C, align(8))]
pub struct ThreadState {
pub state: [u32; THREAD_STATE_MAX],
pub state_size: u32,

View File

@ -3,7 +3,8 @@ pub use minidump_common::format::{
ProcessorArchitecture as MDCPUArchitecture, GUID, MINIDUMP_DIRECTORY as MDRawDirectory,
MINIDUMP_EXCEPTION as MDException, MINIDUMP_EXCEPTION_STREAM as MDRawExceptionStream,
MINIDUMP_HEADER as MDRawHeader, MINIDUMP_LOCATION_DESCRIPTOR as MDLocationDescriptor,
MINIDUMP_MEMORY_DESCRIPTOR as MDMemoryDescriptor, MINIDUMP_MODULE as MDRawModule,
MINIDUMP_MEMORY_DESCRIPTOR as MDMemoryDescriptor, MINIDUMP_MEMORY_INFO as MDMemoryInfo,
MINIDUMP_MEMORY_INFO_LIST as MDMemoryInfoList, MINIDUMP_MODULE as MDRawModule,
MINIDUMP_SIGNATURE as MD_HEADER_SIGNATURE, MINIDUMP_STREAM_TYPE as MDStreamType,
MINIDUMP_SYSTEM_INFO as MDRawSystemInfo, MINIDUMP_THREAD as MDRawThread,
MINIDUMP_THREAD_NAME as MDRawThreadName, MINIDUMP_VERSION as MD_HEADER_VERSION,

View File

@ -13,6 +13,7 @@ use minidump_writer::{
thread_info::Pid,
};
use nix::{errno::Errno, sys::signal::Signal};
use procfs_core::process::MMPermissions;
use std::collections::HashSet;
use std::{
@ -26,12 +27,23 @@ use common::*;
#[derive(Debug, PartialEq)]
enum Context {
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
With,
Without,
}
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
impl Context {
pub fn minidump_writer(&self, pid: Pid) -> MinidumpWriter {
let mut mw = MinidumpWriter::new(pid, pid);
#[cfg(not(target_arch = "mips"))]
if self == &Context::With {
let crash_context = get_crash_context(pid);
mw.set_crash_context(crash_context);
}
mw
}
}
#[cfg(not(target_arch = "mips"))]
fn get_ucontext() -> Result<crash_context::ucontext_t> {
let mut context = std::mem::MaybeUninit::uninit();
unsafe {
@ -42,10 +54,11 @@ fn get_ucontext() -> Result<crash_context::ucontext_t> {
}
}
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[cfg(not(target_arch = "mips"))]
fn get_crash_context(tid: Pid) -> CrashContext {
let siginfo: libc::signalfd_siginfo = unsafe { std::mem::zeroed() };
let context = get_ucontext().expect("Failed to get ucontext");
#[cfg(not(target_arch = "arm"))]
let float_state = unsafe { std::mem::zeroed() };
CrashContext {
inner: crash_context::CrashContext {
@ -53,248 +66,414 @@ fn get_crash_context(tid: Pid) -> CrashContext {
pid: std::process::id() as _,
tid,
context,
#[cfg(not(target_arch = "arm"))]
float_state,
},
}
}
fn test_write_dump_helper(context: Context) {
let num_of_threads = 3;
let mut child = start_child_and_wait_for_threads(num_of_threads);
let pid = child.id() as i32;
macro_rules! contextual_tests {
() => {};
( fn $name:ident ($ctx:ident : Context) $body:block $($rest:tt)* ) => {
mod $name {
use super::*;
let mut tmpfile = tempfile::Builder::new()
.prefix("write_dump")
.tempfile()
.unwrap();
fn test($ctx: Context) $body
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
#[test]
fn run() {
test(Context::Without)
}
#[cfg(not(target_arch = "mips"))]
#[test]
fn run_with_context() {
test(Context::With)
}
}
contextual_tests! { $($rest)* }
}
let in_memory_buffer = tmp.dump(&mut tmpfile).expect("Could not write minidump");
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let meta = std::fs::metadata(tmpfile.path()).expect("Couldn't get metadata for tempfile");
assert!(meta.len() > 0);
let mem_slice = std::fs::read(tmpfile.path()).expect("Failed to minidump");
assert_eq!(mem_slice.len(), in_memory_buffer.len());
assert_eq!(mem_slice, in_memory_buffer);
}
#[test]
fn test_write_dump() {
test_write_dump_helper(Context::Without)
}
contextual_tests! {
fn test_write_dump(context: Context) {
let num_of_threads = 3;
let mut child = start_child_and_wait_for_threads(num_of_threads);
let pid = child.id() as i32;
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_write_dump_with_context() {
test_write_dump_helper(Context::With)
}
let mut tmpfile = tempfile::Builder::new()
.prefix("write_dump")
.tempfile()
.unwrap();
fn test_write_and_read_dump_from_parent_helper(context: Context) {
let mut child = start_child_and_return(&["spawn_mmap_wait"]);
let pid = child.id() as i32;
let mut tmp = context.minidump_writer(pid);
let in_memory_buffer = tmp.dump(&mut tmpfile).expect("Could not write minidump");
child.kill().expect("Failed to kill process");
let mut tmpfile = tempfile::Builder::new()
.prefix("write_and_read_dump")
.tempfile()
.unwrap();
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let mut f = BufReader::new(child.stdout.as_mut().expect("Can't open stdout"));
let mut buf = String::new();
let _ = f
.read_line(&mut buf)
.expect("Couldn't read address provided by child");
let mut output = buf.split_whitespace();
let mmap_addr = output
.next()
.unwrap()
.parse()
.expect("unable to parse mmap_addr");
let memory_size = output
.next()
.unwrap()
.parse()
.expect("unable to parse memory_size");
// Add information about the mapped memory.
let mapping = MappingInfo {
start_address: mmap_addr,
size: memory_size,
offset: 0,
executable: false,
name: Some("a fake mapping".to_string()),
system_mapping_info: SystemMappingInfo {
let meta = std::fs::metadata(tmpfile.path()).expect("Couldn't get metadata for tempfile");
assert!(meta.len() > 0);
let mem_slice = std::fs::read(tmpfile.path()).expect("Failed to minidump");
assert_eq!(mem_slice.len(), in_memory_buffer.len());
assert_eq!(mem_slice, in_memory_buffer);
}
fn test_write_and_read_dump_from_parent(context: Context) {
let mut child = start_child_and_return(&["spawn_mmap_wait"]);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("write_and_read_dump")
.tempfile()
.unwrap();
let mut f = BufReader::new(child.stdout.as_mut().expect("Can't open stdout"));
let mut buf = String::new();
let _ = f
.read_line(&mut buf)
.expect("Couldn't read address provided by child");
let mut output = buf.split_whitespace();
let mmap_addr = output
.next()
.unwrap()
.parse()
.expect("unable to parse mmap_addr");
let memory_size = output
.next()
.unwrap()
.parse()
.expect("unable to parse memory_size");
// Add information about the mapped memory.
let mapping = MappingInfo {
start_address: mmap_addr,
end_address: mmap_addr + memory_size,
},
};
size: memory_size,
offset: 0,
permissions: MMPermissions::READ | MMPermissions::WRITE,
name: Some("a fake mapping".into()),
system_mapping_info: SystemMappingInfo {
start_address: mmap_addr,
end_address: mmap_addr + memory_size,
},
};
let identifier = vec![
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE,
0xFF,
];
let entry = MappingEntry {
mapping,
identifier,
};
let identifier = vec![
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE,
0xFF,
];
let entry = MappingEntry {
mapping,
identifier,
};
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
let mut tmp = context.minidump_writer(pid);
tmp.set_user_mapping_list(vec![entry])
.dump(&mut tmpfile)
.expect("Could not write minidump");
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let module_list: MinidumpModuleList = dump
.get_stream()
.expect("Couldn't find stream MinidumpModuleList");
let module = module_list
.module_at_address(mmap_addr as u64)
.expect("Couldn't find user mapping module");
assert_eq!(module.base_address(), mmap_addr as u64);
assert_eq!(module.size(), memory_size as u64);
assert_eq!(module.code_file(), "a fake mapping");
assert_eq!(
module.debug_identifier(),
Some("33221100554477668899AABBCCDDEEFF0".parse().unwrap())
);
let _: MinidumpException = dump.get_stream().expect("Couldn't find MinidumpException");
let _: MinidumpThreadList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
let _: MinidumpMemoryList = dump.get_stream().expect("Couldn't find MinidumpMemoryList");
let _: MinidumpSystemInfo = dump.get_stream().expect("Couldn't find MinidumpSystemInfo");
let _ = dump
.get_raw_stream(LinuxCpuInfo as u32)
.expect("Couldn't find LinuxCpuInfo");
let _ = dump
.get_raw_stream(LinuxProcStatus as u32)
.expect("Couldn't find LinuxProcStatus");
let _ = dump
.get_raw_stream(LinuxCmdLine as u32)
.expect("Couldn't find LinuxCmdLine");
let _ = dump
.get_raw_stream(LinuxEnviron as u32)
.expect("Couldn't find LinuxEnviron");
let _ = dump
.get_raw_stream(LinuxAuxv as u32)
.expect("Couldn't find LinuxAuxv");
let _ = dump
.get_raw_stream(LinuxMaps as u32)
.expect("Couldn't find LinuxMaps");
let _ = dump
.get_raw_stream(LinuxDsoDebug as u32)
.expect("Couldn't find LinuxDsoDebug");
}
tmp.set_user_mapping_list(vec![entry])
.dump(&mut tmpfile)
.expect("Could not write minidump");
fn test_write_with_additional_memory(context: Context) {
let mut child = start_child_and_return(&["spawn_alloc_wait"]);
let pid = child.id() as i32;
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let mut tmpfile = tempfile::Builder::new()
.prefix("additional_memory")
.tempfile()
.unwrap();
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let module_list: MinidumpModuleList = dump
.get_stream()
.expect("Couldn't find stream MinidumpModuleList");
let module = module_list
.module_at_address(mmap_addr as u64)
.expect("Couldn't find user mapping module");
assert_eq!(module.base_address(), mmap_addr as u64);
assert_eq!(module.size(), memory_size as u64);
assert_eq!(module.code_file(), "a fake mapping");
assert_eq!(
module.debug_identifier(),
Some("33221100554477668899AABBCCDDEEFF0".parse().unwrap())
);
let mut f = BufReader::new(child.stdout.as_mut().expect("Can't open stdout"));
let mut buf = String::new();
let _ = f
.read_line(&mut buf)
.expect("Couldn't read address provided by child");
let mut output = buf.split_whitespace();
let memory_addr = usize::from_str_radix(output.next().unwrap().trim_start_matches("0x"), 16)
.expect("unable to parse mmap_addr");
let memory_size = output
.next()
.unwrap()
.parse()
.expect("unable to parse memory_size");
let _: MinidumpException = dump.get_stream().expect("Couldn't find MinidumpException");
let _: MinidumpThreadList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
let _: MinidumpMemoryList = dump.get_stream().expect("Couldn't find MinidumpMemoryList");
let _: MinidumpSystemInfo = dump.get_stream().expect("Couldn't find MinidumpSystemInfo");
let _ = dump
.get_raw_stream(LinuxCpuInfo as u32)
.expect("Couldn't find LinuxCpuInfo");
let _ = dump
.get_raw_stream(LinuxProcStatus as u32)
.expect("Couldn't find LinuxProcStatus");
let _ = dump
.get_raw_stream(LinuxCmdLine as u32)
.expect("Couldn't find LinuxCmdLine");
let _ = dump
.get_raw_stream(LinuxEnviron as u32)
.expect("Couldn't find LinuxEnviron");
let _ = dump
.get_raw_stream(LinuxAuxv as u32)
.expect("Couldn't find LinuxAuxv");
let _ = dump
.get_raw_stream(LinuxMaps as u32)
.expect("Couldn't find LinuxMaps");
let _ = dump
.get_raw_stream(LinuxDsoDebug as u32)
.expect("Couldn't find LinuxDsoDebug");
}
let app_memory = AppMemory {
ptr: memory_addr,
length: memory_size,
};
#[test]
fn test_write_and_read_dump_from_parent() {
test_write_and_read_dump_from_parent_helper(Context::Without)
}
let mut tmp = context.minidump_writer(pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_write_and_read_dump_from_parent_with_context() {
test_write_and_read_dump_from_parent_helper(Context::With)
}
tmp.set_app_memory(vec![app_memory])
.dump(&mut tmpfile)
.expect("Could not write minidump");
fn test_write_with_additional_memory_helper(context: Context) {
let mut child = start_child_and_return(&["spawn_alloc_wait"]);
let pid = child.id() as i32;
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let mut tmpfile = tempfile::Builder::new()
.prefix("additional_memory")
.tempfile()
.unwrap();
// Read dump file and check its contents
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let mut f = BufReader::new(child.stdout.as_mut().expect("Can't open stdout"));
let mut buf = String::new();
let _ = f
.read_line(&mut buf)
.expect("Couldn't read address provided by child");
let mut output = buf.split_whitespace();
let memory_addr = usize::from_str_radix(output.next().unwrap().trim_start_matches("0x"), 16)
.expect("unable to parse mmap_addr");
let memory_size = output
.next()
.unwrap()
.parse()
.expect("unable to parse memory_size");
let section: MinidumpMemoryList = dump.get_stream().expect("Couldn't find MinidumpMemoryList");
let region = section
.memory_at_address(memory_addr as u64)
.expect("Couldn't find memory region");
let app_memory = AppMemory {
ptr: memory_addr,
length: memory_size,
};
assert_eq!(region.base_address, memory_addr as u64);
assert_eq!(region.size, memory_size as u64);
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
let mut values = Vec::<u8>::with_capacity(memory_size);
for idx in 0..memory_size {
values.push((idx % 255) as u8);
}
// Verify memory contents.
assert_eq!(region.bytes, values);
}
tmp.set_app_memory(vec![app_memory])
.dump(&mut tmpfile)
.expect("Could not write minidump");
fn test_skip_if_requested(context: Context) {
let num_of_threads = 1;
let mut child = start_child_and_wait_for_threads(num_of_threads);
let pid = child.id() as i32;
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let mut tmpfile = tempfile::Builder::new()
.prefix("skip_if_requested")
.tempfile()
.unwrap();
// Read dump file and check its contents
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let mut tmp = context.minidump_writer(pid);
let section: MinidumpMemoryList = dump.get_stream().expect("Couldn't find MinidumpMemoryList");
let region = section
.memory_at_address(memory_addr as u64)
.expect("Couldn't find memory region");
let pr_mapping_addr;
#[cfg(target_pointer_width = "64")]
{
pr_mapping_addr = 0x0102030405060708;
}
#[cfg(target_pointer_width = "32")]
{
pr_mapping_addr = 0x010203040;
};
let res = tmp
.skip_stacks_if_mapping_unreferenced()
.set_principal_mapping_address(pr_mapping_addr)
.dump(&mut tmpfile);
child.kill().expect("Failed to kill process");
assert_eq!(region.base_address, memory_addr as u64);
assert_eq!(region.size, memory_size as u64);
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
let mut values = Vec::<u8>::with_capacity(memory_size);
for idx in 0..memory_size {
values.push((idx % 255) as u8);
assert!(res.is_err());
}
// Verify memory contents.
assert_eq!(region.bytes, values);
}
fn test_sanitized_stacks(context: Context) {
if context == Context::With {
// FIXME the context's stack pointer very often doesn't lie in mapped memory, resulting
// in the stack memory having 0 size (so no slice will match `defaced` in the
// assertion).
return;
}
let num_of_threads = 1;
let mut child = start_child_and_wait_for_threads(num_of_threads);
let pid = child.id() as i32;
#[test]
fn test_write_with_additional_memory() {
test_write_with_additional_memory_helper(Context::Without)
}
let mut tmpfile = tempfile::Builder::new()
.prefix("sanitized_stacks")
.tempfile()
.unwrap();
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_write_with_additional_memory_with_context() {
test_write_with_additional_memory_helper(Context::With)
let mut tmp = context.minidump_writer(pid);
tmp.sanitize_stack()
.dump(&mut tmpfile)
.expect("Faild to dump minidump");
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
// Read dump file and check its contents
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let dump_array = std::fs::read(tmpfile.path()).expect("Failed to read minidump as vec");
let thread_list: MinidumpThreadList =
dump.get_stream().expect("Couldn't find MinidumpThreadList");
let defaced;
#[cfg(target_pointer_width = "64")]
{
defaced = 0x0defaced0defacedusize.to_ne_bytes();
}
#[cfg(target_pointer_width = "32")]
{
defaced = 0x0defacedusize.to_ne_bytes()
};
for thread in thread_list.threads {
let mem = thread.raw.stack.memory;
let start = mem.rva as usize;
let end = (mem.rva + mem.data_size) as usize;
let slice = &dump_array.as_slice()[start..end];
assert!(slice.windows(defaced.len()).any(|window| window == defaced));
}
}
fn test_write_early_abort(context: Context) {
let mut child = start_child_and_return(&["spawn_alloc_wait"]);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("additional_memory")
.tempfile()
.unwrap();
let mut f = BufReader::new(child.stdout.as_mut().expect("Can't open stdout"));
let mut buf = String::new();
let _ = f
.read_line(&mut buf)
.expect("Couldn't read address provided by child");
let mut output = buf.split_whitespace();
// We do not read the actual memory_address, but use NULL, which
// should create an error during dumping and lead to a truncated minidump.
let _ = usize::from_str_radix(output.next().unwrap().trim_start_matches("0x"), 16)
.expect("unable to parse mmap_addr");
let memory_addr = 0;
let memory_size = output
.next()
.unwrap()
.parse()
.expect("unable to parse memory_size");
let app_memory = AppMemory {
ptr: memory_addr,
length: memory_size,
};
let mut tmp = context.minidump_writer(pid);
// This should fail, because during the dump an error is detected (try_from fails)
match tmp.set_app_memory(vec![app_memory]).dump(&mut tmpfile) {
Err(WriterError::SectionAppMemoryError(_)) => (),
_ => panic!("Wrong kind of error returned"),
}
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
// Read dump file and check its contents. There should be a truncated minidump available
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
// Should be there
let _: MinidumpThreadList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
let _: MinidumpModuleList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
// Should be missing:
assert!(dump.get_stream::<MinidumpMemoryList>().is_err());
}
fn test_named_threads(context: Context) {
let num_of_threads = 5;
let mut child = start_child_and_wait_for_named_threads(num_of_threads);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("named_threads")
.tempfile()
.unwrap();
let mut tmp = context.minidump_writer(pid);
let _ = tmp.dump(&mut tmpfile).expect("Could not write minidump");
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
// Read dump file and check its contents. There should be a truncated minidump available
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let threads: MinidumpThreadList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
let thread_names: MinidumpThreadNames = dump
.get_stream()
.expect("Couldn't find MinidumpThreadNames");
let thread_ids: Vec<_> = threads.threads.iter().map(|t| t.raw.thread_id).collect();
let names: HashSet<_> = thread_ids
.iter()
.map(|id| thread_names.get_name(*id).unwrap_or_default())
.map(|cow| cow.into_owned())
.collect();
let mut expected = HashSet::new();
expected.insert("test".to_string());
for id in 1..num_of_threads {
expected.insert(format!("thread_{}", id));
}
assert_eq!(expected, names);
}
}
#[test]
@ -356,14 +535,14 @@ fn test_minidump_size_limit() {
// large enough value -- the limit-checking code in minidump_writer.rs
// does just a rough estimate.
// TODO: Fix this properly
// There are occasionally CI failures where the sizes are off by 1 due
// some minor difference in (probably) a string somewhere in the dump
// since the state capture is not going to be 100% the same
//assert_eq!(meta.len(), normal_file_size);
let min = std::cmp::min(meta.len(), normal_file_size);
let max = std::cmp::max(meta.len(), normal_file_size);
assert!(max - min < 10);
// Setting a stack limit limits the size of non-main stacks even before
// the limit is reached. This will cause slight variations in size
// between a limited and an unlimited minidump.
assert!(max - min < 1024, "max = {max:} min = {min:}");
}
// Third, write a minidump with a size limit small enough to be triggered.
@ -528,248 +707,24 @@ fn test_with_deleted_binary() {
assert_eq!(main_module.debug_identifier(), filtered.parse().ok());
}
fn test_skip_if_requested_helper(context: Context) {
let num_of_threads = 1;
let mut child = start_child_and_wait_for_threads(num_of_threads);
#[test]
fn test_memory_info_list_stream() {
let mut child = start_child_and_wait_for_threads(1);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("skip_if_requested")
.prefix("memory_info_list_stream")
.tempfile()
.unwrap();
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
}
let pr_mapping_addr;
#[cfg(target_pointer_width = "64")]
{
pr_mapping_addr = 0x0102030405060708;
}
#[cfg(target_pointer_width = "32")]
{
pr_mapping_addr = 0x010203040;
};
let res = tmp
.skip_stacks_if_mapping_unreferenced()
.set_principal_mapping_address(pr_mapping_addr)
.dump(&mut tmpfile);
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
assert!(res.is_err());
}
#[test]
fn test_skip_if_requested() {
test_skip_if_requested_helper(Context::Without)
}
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_skip_if_requested_with_context() {
test_skip_if_requested_helper(Context::With)
}
fn test_sanitized_stacks_helper(context: Context) {
let num_of_threads = 1;
let mut child = start_child_and_wait_for_threads(num_of_threads);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("skip_if_requested")
.tempfile()
.unwrap();
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
}
tmp.sanitize_stack()
// Write a minidump
MinidumpWriter::new(pid, pid)
.dump(&mut tmpfile)
.expect("Faild to dump minidump");
.expect("cound not write minidump");
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
// Read dump file and check its contents
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let dump_array = std::fs::read(tmpfile.path()).expect("Failed to read minidump as vec");
let thread_list: MinidumpThreadList =
dump.get_stream().expect("Couldn't find MinidumpThreadList");
let defaced;
#[cfg(target_pointer_width = "64")]
{
defaced = 0x0defaced0defacedusize.to_ne_bytes();
}
#[cfg(target_pointer_width = "32")]
{
defaced = 0x0defacedusize.to_ne_bytes()
};
for thread in thread_list.threads {
let mem = thread.raw.stack.memory;
let start = mem.rva as usize;
let end = (mem.rva + mem.data_size) as usize;
let slice = &dump_array.as_slice()[start..end];
assert!(slice.windows(defaced.len()).any(|window| window == defaced));
}
}
#[test]
fn test_sanitized_stacks() {
test_sanitized_stacks_helper(Context::Without)
}
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_sanitized_stacks_with_context() {
test_sanitized_stacks_helper(Context::Without)
}
fn test_write_early_abort_helper(context: Context) {
let mut child = start_child_and_return(&["spawn_alloc_wait"]);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("additional_memory")
.tempfile()
.unwrap();
let mut f = BufReader::new(child.stdout.as_mut().expect("Can't open stdout"));
let mut buf = String::new();
let _ = f
.read_line(&mut buf)
.expect("Couldn't read address provided by child");
let mut output = buf.split_whitespace();
// We do not read the actual memory_address, but use NULL, which
// should create an error during dumping and lead to a truncated minidump.
let _ = usize::from_str_radix(output.next().unwrap().trim_start_matches("0x"), 16)
.expect("unable to parse mmap_addr");
let memory_addr = 0;
let memory_size = output
.next()
.unwrap()
.parse()
.expect("unable to parse memory_size");
let app_memory = AppMemory {
ptr: memory_addr,
length: memory_size,
};
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
}
// This should fail, because during the dump an error is detected (try_from fails)
match tmp.set_app_memory(vec![app_memory]).dump(&mut tmpfile) {
Err(WriterError::SectionAppMemoryError(_)) => (),
_ => panic!("Wrong kind of error returned"),
}
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
// Read dump file and check its contents. There should be a truncated minidump available
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
// Should be there
let _: MinidumpThreadList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
let _: MinidumpModuleList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
// Should be missing:
assert!(dump.get_stream::<MinidumpMemoryList>().is_err());
}
#[test]
fn test_write_early_abort() {
test_write_early_abort_helper(Context::Without)
}
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_write_early_abort_with_context() {
test_write_early_abort_helper(Context::With)
}
fn test_named_threads_helper(context: Context) {
let num_of_threads = 5;
let mut child = start_child_and_wait_for_named_threads(num_of_threads);
let pid = child.id() as i32;
let mut tmpfile = tempfile::Builder::new()
.prefix("named_threads")
.tempfile()
.unwrap();
let mut tmp = MinidumpWriter::new(pid, pid);
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
if context == Context::With {
let crash_context = get_crash_context(pid);
tmp.set_crash_context(crash_context);
}
let _ = tmp.dump(&mut tmpfile).expect("Could not write minidump");
child.kill().expect("Failed to kill process");
// Reap child
let waitres = child.wait().expect("Failed to wait for child");
let status = waitres.signal().expect("Child did not die due to signal");
assert_eq!(waitres.code(), None);
assert_eq!(status, Signal::SIGKILL as i32);
// Read dump file and check its contents. There should be a truncated minidump available
let dump = Minidump::read_path(tmpfile.path()).expect("Failed to read minidump");
let threads: MinidumpThreadList = dump.get_stream().expect("Couldn't find MinidumpThreadList");
let thread_names: MinidumpThreadNames = dump
.get_stream()
.expect("Couldn't find MinidumpThreadNames");
let thread_ids: Vec<_> = threads.threads.iter().map(|t| t.raw.thread_id).collect();
let names: HashSet<_> = thread_ids
.iter()
.map(|id| thread_names.get_name(*id).unwrap_or_default())
.map(|cow| cow.into_owned())
.collect();
let mut expected = HashSet::new();
expected.insert("test".to_string());
for id in 1..num_of_threads {
expected.insert(format!("thread_{}", id));
}
assert_eq!(expected, names);
}
#[test]
fn test_named_threads() {
test_named_threads_helper(Context::Without)
}
#[cfg(not(any(target_arch = "mips", target_arch = "arm")))]
#[test]
fn test_named_threads_with_context() {
test_named_threads_helper(Context::With)
// Ensure the minidump has a MemoryInfoListStream present and has at least one entry.
let dump = Minidump::read_path(tmpfile.path()).expect("failed to read minidump");
let list: MinidumpMemoryInfoList = dump.get_stream().expect("no memory info list");
assert!(list.iter().count() > 1);
}

View File

@ -182,10 +182,9 @@ fn stackwalks() {
)
.expect("failed to dump symbols");
let provider =
minidump_processor::Symbolizer::new(minidump_processor::simple_symbol_supplier(vec![
".test-symbols".into(),
]));
let provider = minidump_unwind::Symbolizer::new(minidump_unwind::simple_symbol_supplier(vec![
".test-symbols".into(),
]));
let state = futures::executor::block_on(async {
minidump_processor::process_minidump(&md.minidump, &provider).await

View File

@ -39,9 +39,10 @@ fn test_thread_list_from_parent() {
let info = dumper
.get_thread_info_by_index(idx)
.expect("Could not get thread info by index");
let (_stack_ptr, stack_len) = dumper
let (_valid_stack_ptr, stack_len) = dumper
.get_stack_info(info.stack_pointer)
.expect("Could not get stack_pointer");
assert!(stack_len > 0);
// TODO: I currently know of no way to write the thread_id into the registers using Rust,
@ -259,7 +260,7 @@ fn test_sanitize_stack_copy() {
let mapping_info = dumper
.find_mapping_no_bias(instr_ptr)
.expect("Failed to find mapping info");
assert!(mapping_info.executable);
assert!(mapping_info.is_executable());
// Pointers to code shouldn't be sanitized.
simulated_stack = vec![0u8; 2 * size_of::<usize>()];

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"8c6372d8759fa1ab13b1187712e43dbf3065ab32f8bf5340b6bc24b90f94748e","README.md":"ac6e93e07291e148ba9e1913ceabee01d8882e0b7e263ee9287b20a2e500f599","src/cgroups.rs":"fda4941006913801ba0653d3b04924fd6e7cbebf5759814eef4e1176a94b4e91","src/cpuinfo.rs":"2ae02c7183d3ac50625a1958c447fa6d7da351c94e43fec388517a8d01718059","src/diskstats.rs":"e554dbaa8772a6e7cb857a052bd69191058a19b5c0aee4340f260ad8d53a6fba","src/iomem.rs":"2abfd4428ad6f4354ebcca210a8b765dc8b07d5e30f060dcf77cf8296f1e4d38","src/keyring.rs":"a7c156895c70454e453b7701b7036820e673aefe08d5a63ee517b65a4e66bf67","src/lib.rs":"c61c522b23743a601c609682a2f95d7db01bfa712cf9843a19b64f6beb6f06da","src/locks.rs":"5c26a0b39a4dae5951ef79b013856db3c69d8b279981065c71100a9e216a566a","src/meminfo.rs":"5bf226a7773c94db47bd53cbf642b15e72d67292429d20d7fbcd14c891c32622","src/mounts.rs":"181ae262cdd3cf08203fe73f2cd397da8b3db009c0a0c8acb6b22da13149fa67","src/net.rs":"4a465e3c0134286234811bac8f0b138eae52ee98a14d3eaa4ed16b9e60a97a79","src/pressure.rs":"33a22c29d7f5c01603ad515e102d594235d01d176213668da547ee5634558992","src/process/clear_refs.rs":"59c885bfb839b72e6c2234c148a977146da02af8aadc6d7d24b5936aa42e4ebe","src/process/limit.rs":"3f449b0b266418099f3003c65b04c468a4c92845350aa35dd28cc13d7b7f656e","src/process/mod.rs":"2bd84aadb16a04b7c3ff48b9395ba36b85de979543a90d136c8d63aef0bd004d","src/process/mount.rs":"4ed4c468bd0f37c012b17154d69c214a46e52f059dd9121e07d2d29561fe52ee","src/process/namespaces.rs":"36b65fce5e1554dcce9019acb4cdc0dcc47420ef4336a7d9d97d8fc455ab3297","src/process/pagemap.rs":"0f1a7908dba734a26fbe044510f61c0df7ad00dbfcba6a2279064d2ca7aa8278","src/process/schedstat.rs":"cebbd598e03a765bcf59a22989e846fa78d660c4cb87623d07e594849cf0cf66","src/process/smaps_rollup.rs":"8d7d2c69fc6d54856d240a14ed6dfa4dcc92b8b29c3139fe651d18f2011a7575","src/process/stat.rs":"13ead27893f6371e38a859a1d1f101dcded0d074092fac4e871bcb7b625bc4c8","src/process/status.rs":"81de4bd29df76aef0792e23f4f802fff1134ada059f2f4293d9a771a23aa5f02","src/sys/kernel/mod.rs":"54437f0a32eba1e6eaffa11ad041a2e914a16726cb2904af61131dbaa4bae31c","src/sys/mod.rs":"a7b744630e859005307046558f5e827e07b73cb0b5f1dd89acb02f21059e775c","src/sysvipc_shm.rs":"794b2ed9d2b20d25c6da220d6c7ac0b1b54219c9ebb0a1bf6ff9a11dd091f2a7","src/uptime.rs":"ffea251a2ba4da2a311b6a51606102771e55b2db1e33ced5b3569dde319b264b"},"package":"8ee00a90a41543fce203e6a8771bad043bfd6d88de8fd4e3118435a233d0c3c4"}

63
third_party/rust/procfs-core/Cargo.toml vendored Normal file
View File

@ -0,0 +1,63 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.48"
name = "procfs-core"
version = "0.16.0-RC1"
authors = ["Andrew Chin <achin@eminence32.net>"]
description = "Data structures and parsing for the linux procfs pseudo-filesystem"
documentation = "https://docs.rs/procfs-core/"
readme = "README.md"
keywords = [
"procfs",
"proc",
"linux",
"process",
]
categories = [
"os::unix-apis",
"filesystem",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/eminence/procfs"
[package.metadata.docs.rs]
all-features = true
[dependencies.backtrace]
version = "0.3"
optional = true
[dependencies.bitflags]
version = "2"
[dependencies.chrono]
version = "0.4.20"
features = ["clock"]
optional = true
default-features = false
[dependencies.hex]
version = "0.4"
[dependencies.serde]
version = "1.0"
features = ["derive"]
optional = true
[features]
default = ["chrono"]
serde1 = [
"serde",
"bitflags/serde",
]

109
third_party/rust/procfs-core/README.md vendored Normal file
View File

@ -0,0 +1,109 @@
procfs
======
[![Crate](https://img.shields.io/crates/v/procfs.svg)](https://crates.io/crates/procfs)
[![Docs](https://docs.rs/procfs/badge.svg)](https://docs.rs/procfs)
[![Minimum rustc version](https://img.shields.io/badge/rustc-1.48+-lightgray.svg)](https://github.com/eminence/procfs#minimum-rust-version)
This crate is an interface to the `proc` pseudo-filesystem on linux, which is normally mounted as `/proc`.
Long-term, this crate aims to be fairly feature complete, but at the moment not all files are exposed.
See the docs for info on what's supported, or view the [support.md](https://github.com/eminence/procfs/blob/master/support.md)
file in the code repository.
## Examples
There are several examples in the docs and in the [examples folder](https://github.com/eminence/procfs/tree/master/procfs/examples)
of the code repository.
Here's a small example that prints out all processes that are running on the same tty as the calling
process. This is very similar to what "ps" does in its default mode:
```rust
fn main() {
let me = procfs::process::Process::myself().unwrap();
let me_stat = me.stat().unwrap();
let tps = procfs::ticks_per_second().unwrap();
println!("{: >5} {: <8} {: >8} {}", "PID", "TTY", "TIME", "CMD");
let tty = format!("pty/{}", me_stat.tty_nr().1);
for prc in procfs::process::all_processes().unwrap() {
let prc = prc.unwrap();
let stat = prc.stat().unwrap();
if stat.tty_nr == me_stat.tty_nr {
// total_time is in seconds
let total_time =
(stat.utime + stat.stime) as f32 / (tps as f32);
println!(
"{: >5} {: <8} {: >8} {}",
stat.pid, tty, total_time, stat.comm
);
}
}
}
```
Here's another example that shows how to get the current memory usage of the current process:
```rust
use procfs::process::Process;
fn main() {
let me = Process::myself().unwrap();
let me_stat = me.stat().unwrap();
println!("PID: {}", me.pid);
let page_size = procfs::page_size();
println!("Memory page size: {}", page_size);
println!("== Data from /proc/self/stat:");
println!("Total virtual memory used: {} bytes", me_stat.vsize);
println!(
"Total resident set: {} pages ({} bytes)",
me_stat.rss,
me_stat.rss * page_size
);
}
```
There are a few ways to get this data, so also checkout the longer
[self_memory](https://github.com/eminence/procfs/blob/master/procfs/examples/self_memory.rs) example for more
details.
## Cargo features
The following cargo features are available:
* `chrono` -- Default. Optional. This feature enables a few methods that return values as `DateTime` objects.
* `flate2` -- Default. Optional. This feature enables parsing gzip compressed `/proc/config.gz` file via the `procfs::kernel_config` method.
* `backtrace` -- Optional. This feature lets you get a stack trace whenever an `InternalError` is raised.
* `serde1` -- Optional. This feature allows most structs to be serialized and deserialized using serde 1.0. Note, this
feature requires a version of rust newer than 1.48.0 (which is the MSRV for procfs). The exact version required is not
specified here, since serde does not not have an MSRV policy.
## Minimum Rust Version
This crate is only tested against the latest stable rustc compiler, but may
work with older compilers. See [msrv.md](msrv.md) for more details.
## License
The procfs library is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
For additional copyright information regarding documentation, please also see the COPYRIGHT.txt file.
### Contribution
Contributions are welcome, especially in the areas of documentation and testing on older kernels.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@ -0,0 +1,133 @@
use crate::ProcResult;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::BufRead;
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
/// Container group controller information.
pub struct CGroupController {
/// The name of the controller.
pub name: String,
/// The unique ID of the cgroup hierarchy on which this controller is mounted.
///
/// If multiple cgroups v1 controllers are bound to the same hierarchy, then each will show
/// the same hierarchy ID in this field. The value in this field will be 0 if:
///
/// * the controller is not mounted on a cgroups v1 hierarchy;
/// * the controller is bound to the cgroups v2 single unified hierarchy; or
/// * the controller is disabled (see below).
pub hierarchy: u32,
/// The number of control groups in this hierarchy using this controller.
pub num_cgroups: u32,
/// This field contains the value `true` if this controller is enabled, or `false` if it has been disabled
pub enabled: bool,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
/// Container group controller information.
// This contains a vector, but if each subsystem name is unique, maybe this can be a
// hashmap instead
pub struct CGroupControllers(pub Vec<CGroupController>);
impl crate::FromBufRead for CGroupControllers {
fn from_buf_read<R: BufRead>(reader: R) -> ProcResult<Self> {
let mut vec = Vec::new();
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
let mut s = line.split_whitespace();
let name = expect!(s.next(), "name").to_owned();
let hierarchy = from_str!(u32, expect!(s.next(), "hierarchy"));
let num_cgroups = from_str!(u32, expect!(s.next(), "num_cgroups"));
let enabled = expect!(s.next(), "enabled") == "1";
vec.push(CGroupController {
name,
hierarchy,
num_cgroups,
enabled,
});
}
Ok(CGroupControllers(vec))
}
}
/// Information about a process cgroup
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ProcessCGroup {
/// For cgroups version 1 hierarchies, this field contains a unique hierarchy ID number
/// that can be matched to a hierarchy ID in /proc/cgroups. For the cgroups version 2
/// hierarchy, this field contains the value 0.
pub hierarchy: u32,
/// For cgroups version 1 hierarchies, this field contains a comma-separated list of the
/// controllers bound to the hierarchy.
///
/// For the cgroups version 2 hierarchy, this field is empty.
pub controllers: Vec<String>,
/// This field contains the pathname of the control group in the hierarchy to which the process
/// belongs.
///
/// This pathname is relative to the mount point of the hierarchy.
pub pathname: String,
}
/// Information about process cgroups.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ProcessCGroups(pub Vec<ProcessCGroup>);
impl crate::FromBufRead for ProcessCGroups {
fn from_buf_read<R: BufRead>(reader: R) -> ProcResult<Self> {
let mut vec = Vec::new();
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
let mut s = line.splitn(3, ':');
let hierarchy = from_str!(u32, expect!(s.next(), "hierarchy"));
let controllers = expect!(s.next(), "controllers")
.split(',')
.map(|s| s.to_owned())
.collect();
let pathname = expect!(s.next(), "path").to_owned();
vec.push(ProcessCGroup {
hierarchy,
controllers,
pathname,
});
}
Ok(ProcessCGroups(vec))
}
}
impl IntoIterator for ProcessCGroups {
type IntoIter = std::vec::IntoIter<ProcessCGroup>;
type Item = ProcessCGroup;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a> IntoIterator for &'a ProcessCGroups {
type IntoIter = std::slice::Iter<'a, ProcessCGroup>;
type Item = &'a ProcessCGroup;
fn into_iter(self) -> Self::IntoIter {
self.0.iter()
}
}

View File

@ -0,0 +1,211 @@
use crate::{expect, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, io::BufRead};
/// Represents the data from `/proc/cpuinfo`.
///
/// The `fields` field stores the fields that are common among all CPUs. The `cpus` field stores
/// CPU-specific info.
///
/// For common fields, there are methods that will return the data, converted to a more appropriate
/// data type. These methods will all return `None` if the field doesn't exist, or is in some
/// unexpected format (in that case, you'll have to access the string data directly).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct CpuInfo {
/// This stores fields that are common among all CPUs
pub fields: HashMap<String, String>,
pub cpus: Vec<HashMap<String, String>>,
}
impl crate::FromBufRead for CpuInfo {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut list = Vec::new();
let mut map = Some(HashMap::new());
// the first line of a cpu block must start with "processor"
let mut found_first = false;
for line in r.lines().flatten() {
if !line.is_empty() {
let mut s = line.split(':');
let key = expect!(s.next());
if !found_first && key.trim() == "processor" {
found_first = true;
}
if !found_first {
continue;
}
if let Some(value) = s.next() {
let key = key.trim().to_owned();
let value = value.trim().to_owned();
map.get_or_insert(HashMap::new()).insert(key, value);
}
} else if let Some(map) = map.take() {
list.push(map);
found_first = false;
}
}
if let Some(map) = map.take() {
list.push(map);
}
// find properties that are the same for all cpus
assert!(!list.is_empty());
let common_fields: Vec<String> = list[0]
.iter()
.filter_map(|(key, val)| {
if list.iter().all(|map| map.get(key).map_or(false, |v| v == val)) {
Some(key.clone())
} else {
None
}
})
.collect();
let mut common_map = HashMap::new();
for (k, v) in &list[0] {
if common_fields.contains(k) {
common_map.insert(k.clone(), v.clone());
}
}
for map in &mut list {
map.retain(|k, _| !common_fields.contains(k));
}
Ok(CpuInfo {
fields: common_map,
cpus: list,
})
}
}
impl CpuInfo {
/// Get the total number of cpu cores.
///
/// This is the number of entries in the `/proc/cpuinfo` file.
pub fn num_cores(&self) -> usize {
self.cpus.len()
}
/// Get info for a specific cpu.
///
/// This will merge the common fields with the cpu-specific fields.
///
/// Returns None if the requested cpu index is not found.
pub fn get_info(&self, cpu_num: usize) -> Option<HashMap<&str, &str>> {
self.cpus.get(cpu_num).map(|info| {
self.fields
.iter()
.chain(info.iter())
.map(|(k, v)| (k.as_ref(), v.as_ref()))
.collect()
})
}
/// Get the content of a specific field associated to a CPU
///
/// Returns None if the requested cpu index is not found.
pub fn get_field(&self, cpu_num: usize, field_name: &str) -> Option<&str> {
self.cpus.get(cpu_num).and_then(|cpu_fields| {
cpu_fields
.get(field_name)
.or_else(|| self.fields.get(field_name))
.map(|s| s.as_ref())
})
}
pub fn model_name(&self, cpu_num: usize) -> Option<&str> {
self.get_field(cpu_num, "model name")
}
pub fn vendor_id(&self, cpu_num: usize) -> Option<&str> {
self.get_field(cpu_num, "vendor_id")
}
/// May not be available on some older 2.6 kernels
pub fn physical_id(&self, cpu_num: usize) -> Option<u32> {
self.get_field(cpu_num, "physical id").and_then(|s| s.parse().ok())
}
pub fn flags(&self, cpu_num: usize) -> Option<Vec<&str>> {
self.get_field(cpu_num, "flags")
.map(|flags| flags.split_whitespace().collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cpuinfo_rpi() {
// My rpi system includes some stuff at the end of /proc/cpuinfo that we shouldn't parse
let data = r#"processor : 0
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 1
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 2
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
processor : 3
model name : ARMv7 Processor rev 4 (v7l)
BogoMIPS : 38.40
Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xd03
CPU revision : 4
Hardware : BCM2835
Revision : a020d3
Serial : 0000000012345678
Model : Raspberry Pi 3 Model B Plus Rev 1.3
"#;
let r = std::io::Cursor::new(data.as_bytes());
use crate::FromRead;
let info = CpuInfo::from_read(r).unwrap();
assert_eq!(info.num_cores(), 4);
let info = info.get_info(0).unwrap();
assert!(info.get("model name").is_some());
assert!(info.get("BogoMIPS").is_some());
assert!(info.get("Features").is_some());
assert!(info.get("CPU implementer").is_some());
assert!(info.get("CPU architecture").is_some());
assert!(info.get("CPU variant").is_some());
assert!(info.get("CPU part").is_some());
assert!(info.get("CPU revision").is_some());
}
}

View File

@ -0,0 +1,156 @@
use crate::{expect, from_str, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::BufRead;
/// Disk IO stat information
///
/// To fully understand these fields, please see the [iostats.txt](https://www.kernel.org/doc/Documentation/iostats.txt)
/// kernel documentation.
///
/// For an example, see the [diskstats.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
// Doc reference: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
// Doc reference: https://www.kernel.org/doc/Documentation/iostats.txt
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct DiskStat {
/// The device major number
pub major: i32,
/// The device minor number
pub minor: i32,
/// Device name
pub name: String,
/// Reads completed successfully
///
/// This is the total number of reads completed successfully
pub reads: u64,
/// Reads merged
///
/// The number of adjacent reads that have been merged for efficiency.
pub merged: u64,
/// Sectors read successfully
///
/// This is the total number of sectors read successfully.
pub sectors_read: u64,
/// Time spent reading (ms)
pub time_reading: u64,
/// writes completed
pub writes: u64,
/// writes merged
///
/// The number of adjacent writes that have been merged for efficiency.
pub writes_merged: u64,
/// Sectors written successfully
pub sectors_written: u64,
/// Time spent writing (ms)
pub time_writing: u64,
/// I/Os currently in progress
pub in_progress: u64,
/// Time spent doing I/Os (ms)
pub time_in_progress: u64,
/// Weighted time spent doing I/Os (ms)
pub weighted_time_in_progress: u64,
/// Discards completed successfully
///
/// (since kernel 4.18)
pub discards: Option<u64>,
/// Discards merged
pub discards_merged: Option<u64>,
/// Sectors discarded
pub sectors_discarded: Option<u64>,
/// Time spent discarding
pub time_discarding: Option<u64>,
/// Flush requests completed successfully
///
/// (since kernel 5.5)
pub flushes: Option<u64>,
/// Time spent flushing
pub time_flushing: Option<u64>,
}
/// A list of disk stats.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct DiskStats(pub Vec<DiskStat>);
impl crate::FromBufRead for DiskStats {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut v = Vec::new();
for line in r.lines() {
let line = line?;
v.push(DiskStat::from_line(&line)?);
}
Ok(DiskStats(v))
}
}
impl DiskStat {
pub fn from_line(line: &str) -> ProcResult<DiskStat> {
let mut s = line.split_whitespace();
let major = from_str!(i32, expect!(s.next()));
let minor = from_str!(i32, expect!(s.next()));
let name = expect!(s.next()).to_string();
let reads = from_str!(u64, expect!(s.next()));
let merged = from_str!(u64, expect!(s.next()));
let sectors_read = from_str!(u64, expect!(s.next()));
let time_reading = from_str!(u64, expect!(s.next()));
let writes = from_str!(u64, expect!(s.next()));
let writes_merged = from_str!(u64, expect!(s.next()));
let sectors_written = from_str!(u64, expect!(s.next()));
let time_writing = from_str!(u64, expect!(s.next()));
let in_progress = from_str!(u64, expect!(s.next()));
let time_in_progress = from_str!(u64, expect!(s.next()));
let weighted_time_in_progress = from_str!(u64, expect!(s.next()));
let discards = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let discards_merged = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let sectors_discarded = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let time_discarding = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let flushes = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
let time_flushing = s.next().and_then(|s| u64::from_str_radix(s, 10).ok());
Ok(DiskStat {
major,
minor,
name,
reads,
merged,
sectors_read,
time_reading,
writes,
writes_merged,
sectors_written,
time_writing,
in_progress,
time_in_progress,
weighted_time_in_progress,
discards,
discards_merged,
sectors_discarded,
time_discarding,
flushes,
time_flushing,
})
}
}

View File

@ -0,0 +1,65 @@
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::BufRead;
use super::ProcResult;
use crate::{process::Pfn, split_into_num};
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Iomem(pub Vec<(usize, PhysicalMemoryMap)>);
impl crate::FromBufRead for Iomem {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut vec = Vec::new();
for line in r.lines() {
let line = expect!(line);
let (indent, map) = PhysicalMemoryMap::from_line(&line)?;
vec.push((indent, map));
}
Ok(Iomem(vec))
}
}
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct PhysicalMemoryMap {
/// The address space in the process that the mapping occupies.
pub address: (u64, u64),
pub name: String,
}
impl PhysicalMemoryMap {
fn from_line(line: &str) -> ProcResult<(usize, PhysicalMemoryMap)> {
let indent = line.chars().take_while(|c| *c == ' ').count() / 2;
let line = line.trim();
let mut s = line.split(" : ");
let address = expect!(s.next());
let name = expect!(s.next());
Ok((
indent,
PhysicalMemoryMap {
address: split_into_num(address, '-', 16)?,
name: String::from(name),
},
))
}
/// Get the PFN range for the mapping
///
/// First element of the tuple (start) is included.
/// Second element (end) is excluded
pub fn get_range(&self) -> impl crate::WithSystemInfo<Output = (Pfn, Pfn)> {
move |si: &crate::SystemInfo| {
let start = self.address.0 / si.page_size();
let end = (self.address.1 + 1) / si.page_size();
(Pfn(start), Pfn(end))
}
}
}

View File

@ -0,0 +1,423 @@
//! Functions related to the in-kernel key management and retention facility
//!
//! For more details on this facility, see the `keyrings(7)` man page.
use crate::{build_internal_error, expect, from_str, ProcResult};
use bitflags::bitflags;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, io::BufRead, time::Duration};
bitflags! {
/// Various key flags
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct KeyFlags: u32 {
/// The key has been instantiated
const INSTANTIATED = 0x01;
/// THe key has been revoked
const REVOKED = 0x02;
/// The key is dead
///
/// I.e. the key type has been unregistered. A key may be briefly in this state during garbage collection.
const DEAD = 0x04;
/// The key contributes to the user's quota
const QUOTA = 0x08;
/// The key is under construction via a callback to user space
const UNDER_CONSTRUCTION = 0x10;
/// The key is negatively instantiated
const NEGATIVE = 0x20;
/// The key has been invalidated
const INVALID = 0x40;
}
}
bitflags! {
/// Bitflags that represent the permissions for a key
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct PermissionFlags: u32 {
/// The attributes of the key may be read
///
/// This includes the type, description, and access rights (excluding the security label)
const VIEW = 0x01;
/// For a key: the payload of the key may be read. For a keyring: the list of serial numbers (keys) to which the keyring has links may be read.
const READ = 0x02;
/// The payload of the key may be updated and the key may be revoked.
///
/// For a keyring, links may be added to or removed from the keyring, and the keyring
/// may be cleared completely (all links are removed).
const WRITE = 0x04;
/// The key may be found by a search.
///
/// For keyrings: keys and keyrings that are linked to by the keyring may be searched.
const SEARCH = 0x08;
/// Links may be created from keyrings to the key.
///
/// The initial link to a key that is established when the key is created doesn't require this permission.
const LINK = 0x10;
/// The ownership details and security label of the key may be changed, the key's expiration
/// time may be set, and the key may be revoked.
const SETATTR = 0x20;
const ALL = Self::VIEW.bits() | Self::READ.bits() | Self::WRITE.bits() | Self::SEARCH.bits() | Self::LINK.bits() | Self::SETATTR.bits();
}
}
impl KeyFlags {
fn from_str(s: &str) -> KeyFlags {
let mut me = KeyFlags::empty();
let mut chars = s.chars();
match chars.next() {
Some(c) if c == 'I' => me.insert(KeyFlags::INSTANTIATED),
_ => {}
}
match chars.next() {
Some(c) if c == 'R' => me.insert(KeyFlags::REVOKED),
_ => {}
}
match chars.next() {
Some(c) if c == 'D' => me.insert(KeyFlags::DEAD),
_ => {}
}
match chars.next() {
Some(c) if c == 'Q' => me.insert(KeyFlags::QUOTA),
_ => {}
}
match chars.next() {
Some(c) if c == 'U' => me.insert(KeyFlags::UNDER_CONSTRUCTION),
_ => {}
}
match chars.next() {
Some(c) if c == 'N' => me.insert(KeyFlags::NEGATIVE),
_ => {}
}
match chars.next() {
Some(c) if c == 'i' => me.insert(KeyFlags::INVALID),
_ => {}
}
me
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Permissions {
pub possessor: PermissionFlags,
pub user: PermissionFlags,
pub group: PermissionFlags,
pub other: PermissionFlags,
}
impl Permissions {
fn from_str(s: &str) -> ProcResult<Permissions> {
let possessor = PermissionFlags::from_bits(from_str!(u32, &s[0..2], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
let user = PermissionFlags::from_bits(from_str!(u32, &s[2..4], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
let group = PermissionFlags::from_bits(from_str!(u32, &s[4..6], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
let other = PermissionFlags::from_bits(from_str!(u32, &s[6..8], 16))
.ok_or_else(|| build_internal_error!(format!("Unable to parse {:?} as PermissionFlags", s)))?;
Ok(Permissions {
possessor,
user,
group,
other,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum KeyTimeout {
Permanent,
Expired,
Timeout(Duration),
}
impl KeyTimeout {
fn from_str(s: &str) -> ProcResult<KeyTimeout> {
if s == "perm" {
Ok(KeyTimeout::Permanent)
} else if s == "expd" {
Ok(KeyTimeout::Expired)
} else {
let (val, unit) = s.split_at(s.len() - 1);
let val = from_str!(u64, val);
match unit {
"s" => Ok(KeyTimeout::Timeout(Duration::from_secs(val))),
"m" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60))),
"h" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60 * 60))),
"d" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60 * 60 * 24))),
"w" => Ok(KeyTimeout::Timeout(Duration::from_secs(val * 60 * 60 * 24 * 7))),
_ => Err(build_internal_error!(format!("Unable to parse keytimeout of {:?}", s))),
}
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum KeyType {
/// This is a general-purpose key type.
///
/// The key is kept entirely within kernel memory. The payload may be read and updated by
/// user-space applications. The payload for keys of this type is a blob of arbitrary
/// data of up to 32,767 bytes.
/// The description may be any valid string, though it is preferred that it start
/// with a colon-delimited prefix representing the service to which the key is of
/// interest (for instance "afs:mykey").
User,
/// Keyrings are special keys which store a set of links to other keys (including
/// other keyrings), analogous to a directory holding links to files. The main
/// purpose of a keyring is to prevent other keys from being garbage collected
/// because nothing refers to them.
///
/// Keyrings with descriptions (names) that begin with a period ('.') are re
/// served to the implementation.
Keyring,
/// This key type is essentially the same as "user", but it does not provide
/// reading (i.e., the keyctl(2) KEYCTL_READ operation), meaning that the key
/// payload is never visible from user space. This is suitable for storing user
/// name-password pairs that should not be readable from user space.
///
/// The description of a "logon" key must start with a non-empty colon-delimited
/// prefix whose purpose is to identify the service to which the key belongs.
/// (Note that this differs from keys of the "user" type, where the inclusion of
/// a prefix is recommended but is not enforced.)
Logon,
/// This key type is similar to the "user" key type, but it may hold a payload of
/// up to 1 MiB in size. This key type is useful for purposes such as holding
/// Kerberos ticket caches.
///
/// The payload data may be stored in a tmpfs filesystem, rather than in kernel
/// memory, if the data size exceeds the overhead of storing the data in the
/// filesystem. (Storing the data in a filesystem requires filesystem structures
/// to be allocated in the kernel. The size of these structures determines the
/// size threshold above which the tmpfs storage method is used.) Since Linux
/// 4.8, the payload data is encrypted when stored in tmpfs, thereby preventing
/// it from being written unencrypted into swap space.
BigKey,
/// Other specialized, but rare keys types
Other(String),
}
impl KeyType {
fn from_str(s: &str) -> KeyType {
match s {
"keyring" => KeyType::Keyring,
"user" => KeyType::User,
"logon" => KeyType::Logon,
"big_key" => KeyType::BigKey,
other => KeyType::Other(other.to_string()),
}
}
}
/// A key
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Key {
/// The ID (serial number) of the key
pub id: u64,
/// A set of flags describing the state of the key
pub flags: KeyFlags,
/// Count of the number of kernel credential structures that are
/// pinning the key (approximately: the number of threads and open file
/// references that refer to this key).
pub usage: u32,
/// Key timeout
pub timeout: KeyTimeout,
/// Key permissions
pub permissions: Permissions,
/// The user ID of the key owner
pub uid: u32,
/// The group ID of the key.
///
/// The value of `None` here means that the key has no group ID; this can occur in certain circumstances for
/// keys created by the kernel.
pub gid: Option<u32>,
/// The type of key
pub key_type: KeyType,
/// The key description
pub description: String,
}
impl Key {
fn from_line(s: &str) -> ProcResult<Key> {
let mut s = s.split_whitespace();
let id = from_str!(u64, expect!(s.next()), 16);
let s_flags = expect!(s.next());
let usage = from_str!(u32, expect!(s.next()));
let s_timeout = expect!(s.next());
let s_perms = expect!(s.next());
let uid = from_str!(u32, expect!(s.next()));
let s_gid = expect!(s.next());
let s_type = expect!(s.next());
let desc: Vec<_> = s.collect();
Ok(Key {
id,
flags: KeyFlags::from_str(s_flags),
usage,
timeout: KeyTimeout::from_str(s_timeout)?,
permissions: Permissions::from_str(s_perms)?,
uid,
gid: if s_gid == "-1" {
None
} else {
Some(from_str!(u32, s_gid))
},
key_type: KeyType::from_str(s_type),
description: desc.join(" "),
})
}
}
/// A set of keys.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Keys(pub Vec<Key>);
impl crate::FromBufRead for Keys {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut v = Vec::new();
for line in r.lines() {
let line = line?;
v.push(Key::from_line(&line)?);
}
Ok(Keys(v))
}
}
/// Information about a user with at least one key
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct KeyUser {
/// The user that owns the key
pub uid: u32,
/// The kernel-internal usage count for the kernel structure used to record key users
pub usage: u32,
/// The total number of keys owned by the user
pub nkeys: u32,
/// THe number of keys that have been instantiated
pub nikeys: u32,
/// The number of keys owned by the user
pub qnkeys: u32,
/// The maximum number of keys that the user may own
pub maxkeys: u32,
/// The number of bytes consumed in playloads of the keys owned by this user
pub qnbytes: u32,
/// The upper limit on the number of bytes in key payloads for this user
pub maxbytes: u32,
}
impl KeyUser {
fn from_str(s: &str) -> ProcResult<KeyUser> {
let mut s = s.split_whitespace();
let uid = expect!(s.next());
let usage = from_str!(u32, expect!(s.next()));
let keys = expect!(s.next());
let qkeys = expect!(s.next());
let qbytes = expect!(s.next());
let (nkeys, nikeys) = {
let mut s = keys.split('/');
(from_str!(u32, expect!(s.next())), from_str!(u32, expect!(s.next())))
};
let (qnkeys, maxkeys) = {
let mut s = qkeys.split('/');
(from_str!(u32, expect!(s.next())), from_str!(u32, expect!(s.next())))
};
let (qnbytes, maxbytes) = {
let mut s = qbytes.split('/');
(from_str!(u32, expect!(s.next())), from_str!(u32, expect!(s.next())))
};
Ok(KeyUser {
uid: from_str!(u32, &uid[0..uid.len() - 1]),
usage,
nkeys,
nikeys,
qnkeys,
maxkeys,
qnbytes,
maxbytes,
})
}
}
/// Information about a set of users with at least one key.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct KeyUsers(pub HashMap<u32, KeyUser>);
impl crate::FromBufRead for KeyUsers {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut map = HashMap::new();
for line in r.lines() {
let line = line?;
let user = KeyUser::from_str(&line)?;
map.insert(user.uid, user);
}
Ok(KeyUsers(map))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn key_flags() {
assert_eq!(KeyFlags::from_str("I------"), KeyFlags::INSTANTIATED);
assert_eq!(KeyFlags::from_str("IR"), KeyFlags::INSTANTIATED | KeyFlags::REVOKED);
assert_eq!(KeyFlags::from_str("IRDQUNi"), KeyFlags::all());
}
#[test]
fn timeout() {
assert_eq!(KeyTimeout::from_str("perm").unwrap(), KeyTimeout::Permanent);
assert_eq!(KeyTimeout::from_str("expd").unwrap(), KeyTimeout::Expired);
assert_eq!(
KeyTimeout::from_str("2w").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("14d").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("336h").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("20160m").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
assert_eq!(
KeyTimeout::from_str("1209600s").unwrap(),
KeyTimeout::Timeout(Duration::from_secs(1209600))
);
}
}

1190
third_party/rust/procfs-core/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,237 @@
use crate::{expect, from_str, ProcResult};
use std::io::BufRead;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// The type of a file lock
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LockType {
/// A BSD file lock created using `flock`
FLock,
/// A POSIX byte-range lock created with `fcntl`
Posix,
/// An Open File Description (ODF) lock created with `fnctl`
ODF,
/// Some other unknown lock type
Other(String),
}
impl LockType {
pub fn as_str(&self) -> &str {
match self {
LockType::FLock => "FLOCK",
LockType::Posix => "POSIX",
LockType::ODF => "ODF",
LockType::Other(s) => s.as_ref(),
}
}
}
impl From<&str> for LockType {
fn from(s: &str) -> LockType {
match s {
"FLOCK" => LockType::FLock,
"OFDLCK" => LockType::ODF,
"POSIX" => LockType::Posix,
x => LockType::Other(x.to_string()),
}
}
}
/// The mode of a lock (advisory or mandatory)
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LockMode {
Advisory,
Mandatory,
/// Some other unknown lock mode
Other(String),
}
impl LockMode {
pub fn as_str(&self) -> &str {
match self {
LockMode::Advisory => "ADVISORY",
LockMode::Mandatory => "MANDATORY",
LockMode::Other(s) => s.as_ref(),
}
}
}
impl From<&str> for LockMode {
fn from(s: &str) -> LockMode {
match s {
"ADVISORY" => LockMode::Advisory,
"MANDATORY" => LockMode::Mandatory,
x => LockMode::Other(x.to_string()),
}
}
}
/// The kind of a lock (read or write)
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LockKind {
/// A read lock (or BSD shared lock)
Read,
/// A write lock (or a BSD exclusive lock)
Write,
/// Some other unknown lock kind
Other(String),
}
impl LockKind {
pub fn as_str(&self) -> &str {
match self {
LockKind::Read => "READ",
LockKind::Write => "WRITE",
LockKind::Other(s) => s.as_ref(),
}
}
}
impl From<&str> for LockKind {
fn from(s: &str) -> LockKind {
match s {
"READ" => LockKind::Read,
"WRITE" => LockKind::Write,
x => LockKind::Other(x.to_string()),
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
/// Details about an individual file lock
///
/// For an example, see the [lslocks.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
pub struct Lock {
/// The type of lock
pub lock_type: LockType,
/// The lock mode (advisory or mandatory)
pub mode: LockMode,
/// The kind of lock (read or write)
pub kind: LockKind,
/// The process that owns the lock
///
/// Because OFD locks are not owned by a single process (since multiple processes
/// may have file descriptors that refer to the same FD), this field may be `None`.
///
/// Before kernel 4.14 a bug meant that the PID of of the process that initially
/// acquired the lock was displayed instead of `None`.
pub pid: Option<i32>,
/// The major ID of the device containing the FS that contains this lock
pub devmaj: u32,
/// The minor ID of the device containing the FS that contains this lock
pub devmin: u32,
/// The inode of the locked file
pub inode: u64,
/// The offset (in bytes) of the first byte of the lock.
///
/// For BSD locks, this value is always 0.
pub offset_first: u64,
/// The offset (in bytes) of the last byte of the lock.
///
/// `None` means the lock extends to the end of the file. For BSD locks,
/// the value is always `None`.
pub offset_last: Option<u64>,
}
impl Lock {
fn from_line(line: &str) -> ProcResult<Lock> {
let mut s = line.split_whitespace();
let _ = expect!(s.next());
let typ = {
let t = expect!(s.next());
if t == "->" {
// some locks start a "->" which apparently means they are "blocked" (but i'm not sure what that actually means)
From::from(expect!(s.next()))
} else {
From::from(t)
}
};
let mode = From::from(expect!(s.next()));
let kind = From::from(expect!(s.next()));
let pid = expect!(s.next());
let disk_inode = expect!(s.next());
let offset_first = from_str!(u64, expect!(s.next()));
let offset_last = expect!(s.next());
let mut dis = disk_inode.split(':');
let devmaj = from_str!(u32, expect!(dis.next()), 16);
let devmin = from_str!(u32, expect!(dis.next()), 16);
let inode = from_str!(u64, expect!(dis.next()));
Ok(Lock {
lock_type: typ,
mode,
kind,
pid: if pid == "-1" { None } else { Some(from_str!(i32, pid)) },
devmaj,
devmin,
inode,
offset_first,
offset_last: if offset_last == "EOF" {
None
} else {
Some(from_str!(u64, offset_last))
},
})
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
/// Details about file locks
pub struct Locks(pub Vec<Lock>);
impl super::FromBufRead for Locks {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut v = Vec::new();
for line in r.lines() {
let line = line?;
v.push(Lock::from_line(&line)?);
}
Ok(Locks(v))
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_blocked() {
let data = r#"1: POSIX ADVISORY WRITE 723 00:14:16845 0 EOF
2: FLOCK ADVISORY WRITE 652 00:14:16763 0 EOF
3: FLOCK ADVISORY WRITE 1594 fd:00:396528 0 EOF
4: FLOCK ADVISORY WRITE 1594 fd:00:396527 0 EOF
5: FLOCK ADVISORY WRITE 2851 fd:00:529372 0 EOF
6: POSIX ADVISORY WRITE 1280 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1281 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1279 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1282 00:14:16200 0 0
6: -> POSIX ADVISORY WRITE 1283 00:14:16200 0 0
7: OFDLCK ADVISORY READ -1 00:06:1028 0 EOF
8: FLOCK ADVISORY WRITE 6471 fd:00:529426 0 EOF
9: FLOCK ADVISORY WRITE 6471 fd:00:529424 0 EOF
10: FLOCK ADVISORY WRITE 6471 fd:00:529420 0 EOF
11: FLOCK ADVISORY WRITE 6471 fd:00:529418 0 EOF
12: POSIX ADVISORY WRITE 1279 00:14:23553 0 EOF
13: FLOCK ADVISORY WRITE 6471 fd:00:393838 0 EOF
14: POSIX ADVISORY WRITE 655 00:14:16146 0 EOF"#;
for line in data.lines() {
super::Lock::from_line(line.trim()).unwrap();
}
}
}

View File

@ -0,0 +1,407 @@
use super::{expect, from_str, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, io};
fn convert_to_kibibytes(num: u64, unit: &str) -> ProcResult<u64> {
match unit {
"B" => Ok(num),
"KiB" | "kiB" | "kB" | "KB" => Ok(num * 1024),
"MiB" | "miB" | "MB" | "mB" => Ok(num * 1024 * 1024),
"GiB" | "giB" | "GB" | "gB" => Ok(num * 1024 * 1024 * 1024),
unknown => Err(build_internal_error!(format!("Unknown unit type {}", unknown))),
}
}
/// This struct reports statistics about memory usage on the system, based on
/// the `/proc/meminfo` file.
///
/// It is used by `free(1)` to report the amount of free and used memory (both
/// physical and swap) on the system as well as the shared memory and
/// buffers used by the kernel. Each struct member is generally reported in
/// bytes, but a few are unitless values.
///
/// Except as noted below, all of the fields have been present since at least
/// Linux 2.6.0. Some fields are optional and are present only if the kernel
/// was configured with various options; those dependencies are noted in the list.
///
/// **Notes**
///
/// While the file shows kilobytes (kB; 1 kB equals 1000 B),
/// it is actually kibibytes (KiB; 1 KiB equals 1024 B).
///
/// All sizes are converted to bytes. Unitless values, like `hugepages_total` are not affected.
///
/// This imprecision in /proc/meminfo is known,
/// but is not corrected due to legacy concerns -
/// programs rely on /proc/meminfo to specify size with the "kB" string.
///
/// New fields to this struct may be added at any time (even without a major or minor semver bump).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[allow(non_snake_case)]
#[non_exhaustive]
pub struct Meminfo {
/// Total usable RAM (i.e., physical RAM minus a few reserved bits and the kernel binary code).
pub mem_total: u64,
/// The sum of [LowFree](#structfield.low_free) + [HighFree](#structfield.high_free).
pub mem_free: u64,
/// An estimate of how much memory is available for starting new applications, without swapping.
///
/// (since Linux 3.14)
pub mem_available: Option<u64>,
/// Relatively temporary storage for raw disk blocks that shouldn't get tremendously large (20MB or so).
pub buffers: u64,
/// In-memory cache for files read from the disk (the page cache). Doesn't include SwapCached.
pub cached: u64,
/// Memory that once was swapped out, is swapped back in but still also is in the swap
/// file.
///
/// (If memory pressure is high, these pages don't need to be swapped out again
/// because they are already in the swap file. This saves I/O.)
pub swap_cached: u64,
/// Memory that has been used more recently and usually not reclaimed unless absolutely
/// necessary.
pub active: u64,
/// Memory which has been less recently used. It is more eligible to be reclaimed for other
/// purposes.
pub inactive: u64,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub active_anon: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub inactive_anon: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub active_file: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.28)
pub inactive_file: Option<u64>,
/// [To be documented.]
///
/// (From Linux 2.6.28 to 2.6.30, CONFIG_UNEVICTABLE_LRU was required.)
pub unevictable: Option<u64>,
/// [To be documented.]
///
/// (From Linux 2.6.28 to 2.6.30, CONFIG_UNEVICTABLE_LRU was required.)
pub mlocked: Option<u64>,
/// Total amount of highmem.
///
/// Highmem is all memory above ~860MB of physical memory. Highmem areas are for use by
/// user-space programs, or for the page cache. The kernel must use tricks to access this
/// memory, making it slower to access than lowmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub high_total: Option<u64>,
/// Amount of free highmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub high_free: Option<u64>,
/// Total amount of lowmem.
///
/// Lowmem is memory which can be used for every thing that highmem can be used for,
/// but it is also available for the kernel's use for its own data structures.
/// Among many other things, it is where everything from Slab is allocated.
/// Bad things happen when you're out of lowmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub low_total: Option<u64>,
/// Amount of free lowmem.
///
/// (Starting with Linux 2.6.19, CONFIG_HIGHMEM is required.)
pub low_free: Option<u64>,
/// [To be documented.]
///
/// (since Linux 2.6.29. CONFIG_MMU is required.)
pub mmap_copy: Option<u64>,
/// Total amount of swap space available.
pub swap_total: u64,
/// Amount of swap space that is currently unused.
pub swap_free: u64,
/// Memory which is waiting to get written back to the disk.
pub dirty: u64,
/// Memory which is actively being written back to the disk.
pub writeback: u64,
/// Non-file backed pages mapped into user-space page tables.
///
/// (since Linux 2.6.18)
pub anon_pages: Option<u64>,
/// Files which have been mapped into memory (with mmap(2)), such as libraries.
pub mapped: u64,
/// Amount of memory consumed in tmpfs(5) filesystems.
///
/// (since Linux 2.6.32)
pub shmem: Option<u64>,
/// In-kernel data structures cache.
pub slab: u64,
/// Part of Slab, that cannot be reclaimed on memory pressure.
///
/// (since Linux 2.6.19)
pub s_reclaimable: Option<u64>,
/// Part of Slab, that cannot be reclaimed on memory pressure.
///
/// (since Linux 2.6.19)
pub s_unreclaim: Option<u64>,
/// Amount of memory allocated to kernel stacks.
///
/// (since Linux 2.6.32)
pub kernel_stack: Option<u64>,
/// Amount of memory dedicated to the lowest level of page tables.
///
/// (since Linux 2.6.18)
pub page_tables: Option<u64>,
/// Amount of memory allocated for seconary page tables. This currently includes KVM mmu
/// allocations on x86 and arm64.
///
/// (since Linux 6.1)
pub secondary_page_tables: Option<u64>,
/// [To be documented.]
///
/// (CONFIG_QUICKLIST is required. Since Linux 2.6.27)
pub quicklists: Option<u64>,
/// NFS pages sent to the server, but not yet committed to stable storage.
///
/// (since Linux 2.6.18)
pub nfs_unstable: Option<u64>,
/// Memory used for block device "bounce buffers".
///
/// (since Linux 2.6.18)
pub bounce: Option<u64>,
/// Memory used by FUSE for temporary writeback buffers.
///
/// (since Linux 2.6.26)
pub writeback_tmp: Option<u64>,
/// This is the total amount of memory currently available to be allocated on the system,
/// expressed in bytes.
///
/// This limit is adhered to only if strict overcommit
/// accounting is enabled (mode 2 in /proc/sys/vm/overcommit_memory). The limit is calculated
/// according to the formula described under /proc/sys/vm/overcommit_memory. For further
/// details, see the kernel source file
/// [Documentation/vm/overcommit-accounting](https://www.kernel.org/doc/Documentation/vm/overcommit-accounting).
///
/// (since Linux 2.6.10)
pub commit_limit: Option<u64>,
/// The amount of memory presently allocated on the system.
///
/// The committed memory is a sum of all of the memory which has been allocated
/// by processes, even if it has not been "used" by them as of yet. A process which allocates 1GB of memory (using malloc(3)
/// or similar), but touches only 300MB of that memory will show up as using only 300MB of memory even if it has the address space
/// allocated for the entire 1GB.
///
/// This 1GB is memory which has been "committed" to by the VM and can be used at any time by the allocating application. With
/// strict overcommit enabled on the system (mode 2 in /proc/sys/vm/overcommit_memory), allocations which would exceed the Committed_AS
/// mitLimit will not be permitted. This is useful if one needs to guarantee that processes will not fail due to lack of memory once
/// that memory has been successfully allocated.
pub committed_as: u64,
/// Total size of vmalloc memory area.
pub vmalloc_total: u64,
/// Amount of vmalloc area which is used.
pub vmalloc_used: u64,
/// Largest contiguous block of vmalloc area which is free.
pub vmalloc_chunk: u64,
/// [To be documented.]
///
/// (CONFIG_MEMORY_FAILURE is required. Since Linux 2.6.32)
pub hardware_corrupted: Option<u64>,
/// Non-file backed huge pages mapped into user-space page tables.
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 2.6.38)
pub anon_hugepages: Option<u64>,
/// Memory used by shared memory (shmem) and tmpfs(5) allocated with huge pages
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 4.8)
pub shmem_hugepages: Option<u64>,
/// Shared memory mapped into user space with huge pages.
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 4.8)
pub shmem_pmd_mapped: Option<u64>,
/// Total CMA (Contiguous Memory Allocator) pages.
///
/// (CONFIG_CMA is required. Since Linux 3.1)
pub cma_total: Option<u64>,
/// Free CMA (Contiguous Memory Allocator) pages.
///
/// (CONFIG_CMA is required. Since Linux 3.1)
pub cma_free: Option<u64>,
/// The size of the pool of huge pages.
///
/// CONFIG_HUGETLB_PAGE is required.)
pub hugepages_total: Option<u64>,
/// The number of huge pages in the pool that are not yet allocated.
///
/// (CONFIG_HUGETLB_PAGE is required.)
pub hugepages_free: Option<u64>,
/// This is the number of huge pages for which a commitment to allocate from the pool has been
/// made, but no allocation has yet been made.
///
/// These reserved huge pages guarantee that an application will be able to allocate a
/// huge page from the pool of huge pages at fault time.
///
/// (CONFIG_HUGETLB_PAGE is required. Since Linux 2.6.17)
pub hugepages_rsvd: Option<u64>,
/// This is the number of huge pages in the pool above the value in /proc/sys/vm/nr_hugepages.
///
/// The maximum number of surplus huge pages is controlled by /proc/sys/vm/nr_overcommit_hugepages.
///
/// (CONFIG_HUGETLB_PAGE is required. Since Linux 2.6.24)
pub hugepages_surp: Option<u64>,
/// The size of huge pages.
///
/// (CONFIG_HUGETLB_PAGE is required.)
pub hugepagesize: Option<u64>,
/// Number of bytes of RAM linearly mapped by kernel in 4kB pages. (x86.)
///
/// (since Linux 2.6.27)
pub direct_map_4k: Option<u64>,
/// Number of bytes of RAM linearly mapped by kernel in 4MB pages.
///
/// (x86 with CONFIG_X86_64 or CONFIG_X86_PAE enabled. Since Linux 2.6.27)
pub direct_map_4M: Option<u64>,
/// Number of bytes of RAM linearly mapped by kernel in 2MB pages.
///
/// (x86 with neither CONFIG_X86_64 nor CONFIG_X86_PAE enabled. Since Linux 2.6.27)
pub direct_map_2M: Option<u64>,
/// (x86 with CONFIG_X86_64 and CONFIG_X86_DIRECT_GBPAGES enabled. Since Linux 2.6.27)
pub direct_map_1G: Option<u64>,
/// needs documentation
pub hugetlb: Option<u64>,
/// Memory allocated to the per-cpu alloctor used to back per-cpu allocations.
///
/// This stat excludes the cost of metadata.
pub per_cpu: Option<u64>,
/// Kernel allocations that the kernel will attempt to reclaim under memory pressure.
///
/// Includes s_reclaimable, and other direct allocations with a shrinker.
pub k_reclaimable: Option<u64>,
/// Undocumented field
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is requried. Since Linux 5.4)
pub file_pmd_mapped: Option<u64>,
/// Undocumented field
///
/// (CONFIG_TRANSPARENT_HUGEPAGE is required. Since Linux 5.4)
pub file_huge_pages: Option<u64>,
/// Memory consumed by the zswap backend (compressed size).
///
/// (CONFIG_ZSWAP is required. Since Linux 5.19)
pub z_swap: Option<u64>,
/// Amount of anonymous memory stored in zswap (original size).
///
/// (CONFIG_ZSWAP is required. Since Linux 5.19)
pub z_swapped: Option<u64>,
}
impl super::FromBufRead for Meminfo {
fn from_buf_read<R: io::BufRead>(r: R) -> ProcResult<Self> {
let mut map = HashMap::new();
for line in r.lines() {
let line = expect!(line);
if line.is_empty() {
continue;
}
let mut s = line.split_whitespace();
let field = expect!(s.next(), "no field");
let value = expect!(s.next(), "no value");
let unit = s.next(); // optional
let value = from_str!(u64, value);
let value = if let Some(unit) = unit {
convert_to_kibibytes(value, unit)?
} else {
value
};
map.insert(field[..field.len() - 1].to_string(), value);
}
// use 'remove' to move the value out of the hashmap
// if there's anything still left in the map at the end, that
// means we probably have a bug/typo, or are out-of-date
let meminfo = Meminfo {
mem_total: expect!(map.remove("MemTotal")),
mem_free: expect!(map.remove("MemFree")),
mem_available: map.remove("MemAvailable"),
buffers: expect!(map.remove("Buffers")),
cached: expect!(map.remove("Cached")),
swap_cached: expect!(map.remove("SwapCached")),
active: expect!(map.remove("Active")),
inactive: expect!(map.remove("Inactive")),
active_anon: map.remove("Active(anon)"),
inactive_anon: map.remove("Inactive(anon)"),
active_file: map.remove("Active(file)"),
inactive_file: map.remove("Inactive(file)"),
unevictable: map.remove("Unevictable"),
mlocked: map.remove("Mlocked"),
high_total: map.remove("HighTotal"),
high_free: map.remove("HighFree"),
low_total: map.remove("LowTotal"),
low_free: map.remove("LowFree"),
mmap_copy: map.remove("MmapCopy"),
swap_total: expect!(map.remove("SwapTotal")),
swap_free: expect!(map.remove("SwapFree")),
dirty: expect!(map.remove("Dirty")),
writeback: expect!(map.remove("Writeback")),
anon_pages: map.remove("AnonPages"),
mapped: expect!(map.remove("Mapped")),
shmem: map.remove("Shmem"),
slab: expect!(map.remove("Slab")),
s_reclaimable: map.remove("SReclaimable"),
s_unreclaim: map.remove("SUnreclaim"),
kernel_stack: map.remove("KernelStack"),
page_tables: map.remove("PageTables"),
secondary_page_tables: map.remove("SecPageTables"),
quicklists: map.remove("Quicklists"),
nfs_unstable: map.remove("NFS_Unstable"),
bounce: map.remove("Bounce"),
writeback_tmp: map.remove("WritebackTmp"),
commit_limit: map.remove("CommitLimit"),
committed_as: expect!(map.remove("Committed_AS")),
vmalloc_total: expect!(map.remove("VmallocTotal")),
vmalloc_used: expect!(map.remove("VmallocUsed")),
vmalloc_chunk: expect!(map.remove("VmallocChunk")),
hardware_corrupted: map.remove("HardwareCorrupted"),
anon_hugepages: map.remove("AnonHugePages"),
shmem_hugepages: map.remove("ShmemHugePages"),
shmem_pmd_mapped: map.remove("ShmemPmdMapped"),
cma_total: map.remove("CmaTotal"),
cma_free: map.remove("CmaFree"),
hugepages_total: map.remove("HugePages_Total"),
hugepages_free: map.remove("HugePages_Free"),
hugepages_rsvd: map.remove("HugePages_Rsvd"),
hugepages_surp: map.remove("HugePages_Surp"),
hugepagesize: map.remove("Hugepagesize"),
direct_map_4k: map.remove("DirectMap4k"),
direct_map_4M: map.remove("DirectMap4M"),
direct_map_2M: map.remove("DirectMap2M"),
direct_map_1G: map.remove("DirectMap1G"),
k_reclaimable: map.remove("KReclaimable"),
per_cpu: map.remove("Percpu"),
hugetlb: map.remove("Hugetlb"),
file_pmd_mapped: map.remove("FilePmdMapped"),
file_huge_pages: map.remove("FileHugePages"),
z_swap: map.remove("Zswap"),
z_swapped: map.remove("Zswapped"),
};
if cfg!(test) {
assert!(map.is_empty(), "meminfo map is not empty: {:#?}", map);
}
Ok(meminfo)
}
}

View File

@ -0,0 +1,107 @@
use std::{collections::HashMap, io::BufRead};
use super::ProcResult;
use std::str::FromStr;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// A mountpoint entry under `/proc/mounts`
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[allow(non_snake_case)]
pub struct MountEntry {
/// Device
pub fs_spec: String,
/// Mountpoint
pub fs_file: String,
/// FS type
pub fs_vfstype: String,
/// Mount options
pub fs_mntops: HashMap<String, Option<String>>,
/// Dump
pub fs_freq: u8,
/// Check
pub fs_passno: u8,
}
impl super::FromBufRead for Vec<MountEntry> {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut vec = Vec::new();
for line in r.lines() {
let line = expect!(line);
let mut s = line.split_whitespace();
let fs_spec = unmangle_octal(expect!(s.next()));
let fs_file = unmangle_octal(expect!(s.next()));
let fs_vfstype = unmangle_octal(expect!(s.next()));
let fs_mntops = unmangle_octal(expect!(s.next()));
let fs_mntops: HashMap<String, Option<String>> = fs_mntops
.split(',')
.map(|s| {
let mut split = s.splitn(2, '=');
let k = split.next().unwrap().to_string(); // can not fail, splitn will always return at least 1 element
let v = split.next().map(|s| s.to_string());
(k, v)
})
.collect();
let fs_freq = expect!(u8::from_str(expect!(s.next())));
let fs_passno = expect!(u8::from_str(expect!(s.next())));
let mount_entry = MountEntry {
fs_spec,
fs_file,
fs_vfstype,
fs_mntops,
fs_freq,
fs_passno,
};
vec.push(mount_entry);
}
Ok(vec)
}
}
/// Unmangle spaces ' ', tabs '\t', line breaks '\n', backslashes '\\', and hashes '#'
///
/// See https://elixir.bootlin.com/linux/v6.2.8/source/fs/proc_namespace.c#L89
pub(crate) fn unmangle_octal(input: &str) -> String {
let mut input = input.to_string();
for (octal, c) in [(r"\011", "\t"), (r"\012", "\n"), (r"\134", "\\"), (r"\043", "#")] {
input = input.replace(octal, c);
}
input
}
#[test]
fn test_unmangle_octal() {
let tests = [
(r"a\134b\011c\012d\043e", "a\\b\tc\nd#e"), // all escaped chars with abcde in between
(r"abcd", r"abcd"), // do nothing
];
for (input, expected) in tests {
assert_eq!(unmangle_octal(input), expected);
}
}
#[test]
fn test_mounts() {
use crate::FromBufRead;
use std::io::Cursor;
let s = "proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/mapper/ol-root / xfs rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota 0 0
Downloads /media/sf_downloads vboxsf rw,nodev,relatime,iocharset=utf8,uid=0,gid=977,dmode=0770,fmode=0770,tag=VBoxAutomounter 0 0";
let cursor = Cursor::new(s);
let mounts = Vec::<MountEntry>::from_buf_read(cursor).unwrap();
assert_eq!(mounts.len(), 4);
}

712
third_party/rust/procfs-core/src/net.rs vendored Normal file
View File

@ -0,0 +1,712 @@
// Don't throw clippy warnings for manual string stripping.
// The suggested fix with `strip_prefix` removes support for Rust 1.33 and 1.38
#![allow(clippy::manual_strip)]
//! Information about the networking layer.
//!
//! This module corresponds to the `/proc/net` directory and contains various information about the
//! networking layer.
use crate::ProcResult;
use crate::{build_internal_error, expect, from_iter, from_str};
use std::collections::HashMap;
use bitflags::bitflags;
use std::io::BufRead;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::{path::PathBuf, str::FromStr};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum TcpState {
Established = 1,
SynSent,
SynRecv,
FinWait1,
FinWait2,
TimeWait,
Close,
CloseWait,
LastAck,
Listen,
Closing,
NewSynRecv,
}
impl TcpState {
pub fn from_u8(num: u8) -> Option<TcpState> {
match num {
0x01 => Some(TcpState::Established),
0x02 => Some(TcpState::SynSent),
0x03 => Some(TcpState::SynRecv),
0x04 => Some(TcpState::FinWait1),
0x05 => Some(TcpState::FinWait2),
0x06 => Some(TcpState::TimeWait),
0x07 => Some(TcpState::Close),
0x08 => Some(TcpState::CloseWait),
0x09 => Some(TcpState::LastAck),
0x0A => Some(TcpState::Listen),
0x0B => Some(TcpState::Closing),
0x0C => Some(TcpState::NewSynRecv),
_ => None,
}
}
pub fn to_u8(&self) -> u8 {
match self {
TcpState::Established => 0x01,
TcpState::SynSent => 0x02,
TcpState::SynRecv => 0x03,
TcpState::FinWait1 => 0x04,
TcpState::FinWait2 => 0x05,
TcpState::TimeWait => 0x06,
TcpState::Close => 0x07,
TcpState::CloseWait => 0x08,
TcpState::LastAck => 0x09,
TcpState::Listen => 0x0A,
TcpState::Closing => 0x0B,
TcpState::NewSynRecv => 0x0C,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum UdpState {
Established = 1,
Close = 7,
}
impl UdpState {
pub fn from_u8(num: u8) -> Option<UdpState> {
match num {
0x01 => Some(UdpState::Established),
0x07 => Some(UdpState::Close),
_ => None,
}
}
pub fn to_u8(&self) -> u8 {
match self {
UdpState::Established => 0x01,
UdpState::Close => 0x07,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum UnixState {
UNCONNECTED = 1,
CONNECTING = 2,
CONNECTED = 3,
DISCONNECTING = 4,
}
impl UnixState {
pub fn from_u8(num: u8) -> Option<UnixState> {
match num {
0x01 => Some(UnixState::UNCONNECTED),
0x02 => Some(UnixState::CONNECTING),
0x03 => Some(UnixState::CONNECTED),
0x04 => Some(UnixState::DISCONNECTING),
_ => None,
}
}
pub fn to_u8(&self) -> u8 {
match self {
UnixState::UNCONNECTED => 0x01,
UnixState::CONNECTING => 0x02,
UnixState::CONNECTED => 0x03,
UnixState::DISCONNECTING => 0x04,
}
}
}
/// An entry in the TCP socket table
#[derive(Debug, Clone)]
#[non_exhaustive]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct TcpNetEntry {
pub local_address: SocketAddr,
pub remote_address: SocketAddr,
pub state: TcpState,
pub rx_queue: u32,
pub tx_queue: u32,
pub uid: u32,
pub inode: u64,
}
/// An entry in the UDP socket table
#[derive(Debug, Clone)]
#[non_exhaustive]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct UdpNetEntry {
pub local_address: SocketAddr,
pub remote_address: SocketAddr,
pub state: UdpState,
pub rx_queue: u32,
pub tx_queue: u32,
pub uid: u32,
pub inode: u64,
}
/// An entry in the Unix socket table
#[derive(Debug, Clone)]
#[non_exhaustive]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct UnixNetEntry {
/// The number of users of the socket
pub ref_count: u32,
/// The socket type.
///
/// Possible values are `SOCK_STREAM`, `SOCK_DGRAM`, or `SOCK_SEQPACKET`. These constants can
/// be found in the libc crate.
pub socket_type: u16,
/// The state of the socket
pub state: UnixState,
/// The inode number of the socket
pub inode: u64,
/// The bound pathname (if any) of the socket.
///
/// Sockets in the abstract namespace are included, and are shown with a path that commences
/// with the '@' character.
pub path: Option<PathBuf>,
}
/// Parses an address in the form 00010203:1234
///
/// Also supports IPv6
fn parse_addressport_str(s: &str, little_endian: bool) -> ProcResult<SocketAddr> {
let mut las = s.split(':');
let ip_part = expect!(las.next(), "ip_part");
let port = expect!(las.next(), "port");
let port = from_str!(u16, port, 16);
use std::convert::TryInto;
let read_u32 = if little_endian {
u32::from_le_bytes
} else {
u32::from_be_bytes
};
if ip_part.len() == 8 {
let bytes = expect!(hex::decode(ip_part));
let ip_u32 = read_u32(bytes[..4].try_into().unwrap());
let ip = Ipv4Addr::from(ip_u32);
Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
} else if ip_part.len() == 32 {
let bytes = expect!(hex::decode(ip_part));
let ip_a = read_u32(bytes[0..4].try_into().unwrap());
let ip_b = read_u32(bytes[4..8].try_into().unwrap());
let ip_c = read_u32(bytes[8..12].try_into().unwrap());
let ip_d = read_u32(bytes[12..16].try_into().unwrap());
let ip = Ipv6Addr::new(
((ip_a >> 16) & 0xffff) as u16,
(ip_a & 0xffff) as u16,
((ip_b >> 16) & 0xffff) as u16,
(ip_b & 0xffff) as u16,
((ip_c >> 16) & 0xffff) as u16,
(ip_c & 0xffff) as u16,
((ip_d >> 16) & 0xffff) as u16,
(ip_d & 0xffff) as u16,
);
Ok(SocketAddr::V6(SocketAddrV6::new(ip, port, 0, 0)))
} else {
Err(build_internal_error!(format!(
"Unable to parse {:?} as an address:port",
s
)))
}
}
/// TCP socket entries.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct TcpNetEntries(pub Vec<TcpNetEntry>);
impl super::FromBufReadSI for TcpNetEntries {
fn from_buf_read<R: BufRead>(r: R, system_info: &crate::SystemInfo) -> ProcResult<Self> {
let mut vec = Vec::new();
// first line is a header we need to skip
for line in r.lines().skip(1) {
let line = line?;
let mut s = line.split_whitespace();
s.next();
let local_address = expect!(s.next(), "tcp::local_address");
let rem_address = expect!(s.next(), "tcp::rem_address");
let state = expect!(s.next(), "tcp::st");
let mut tx_rx_queue = expect!(s.next(), "tcp::tx_queue:rx_queue").splitn(2, ':');
let tx_queue = from_str!(u32, expect!(tx_rx_queue.next(), "tcp::tx_queue"), 16);
let rx_queue = from_str!(u32, expect!(tx_rx_queue.next(), "tcp::rx_queue"), 16);
s.next(); // skip tr and tm->when
s.next(); // skip retrnsmt
let uid = from_str!(u32, expect!(s.next(), "tcp::uid"));
s.next(); // skip timeout
let inode = expect!(s.next(), "tcp::inode");
vec.push(TcpNetEntry {
local_address: parse_addressport_str(local_address, system_info.is_little_endian())?,
remote_address: parse_addressport_str(rem_address, system_info.is_little_endian())?,
rx_queue,
tx_queue,
state: expect!(TcpState::from_u8(from_str!(u8, state, 16))),
uid,
inode: from_str!(u64, inode),
});
}
Ok(TcpNetEntries(vec))
}
}
/// UDP socket entries.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct UdpNetEntries(pub Vec<UdpNetEntry>);
impl super::FromBufReadSI for UdpNetEntries {
fn from_buf_read<R: BufRead>(r: R, system_info: &crate::SystemInfo) -> ProcResult<Self> {
let mut vec = Vec::new();
// first line is a header we need to skip
for line in r.lines().skip(1) {
let line = line?;
let mut s = line.split_whitespace();
s.next();
let local_address = expect!(s.next(), "udp::local_address");
let rem_address = expect!(s.next(), "udp::rem_address");
let state = expect!(s.next(), "udp::st");
let mut tx_rx_queue = expect!(s.next(), "udp::tx_queue:rx_queue").splitn(2, ':');
let tx_queue: u32 = from_str!(u32, expect!(tx_rx_queue.next(), "udp::tx_queue"), 16);
let rx_queue: u32 = from_str!(u32, expect!(tx_rx_queue.next(), "udp::rx_queue"), 16);
s.next(); // skip tr and tm->when
s.next(); // skip retrnsmt
let uid = from_str!(u32, expect!(s.next(), "udp::uid"));
s.next(); // skip timeout
let inode = expect!(s.next(), "udp::inode");
vec.push(UdpNetEntry {
local_address: parse_addressport_str(local_address, system_info.is_little_endian())?,
remote_address: parse_addressport_str(rem_address, system_info.is_little_endian())?,
rx_queue,
tx_queue,
state: expect!(UdpState::from_u8(from_str!(u8, state, 16))),
uid,
inode: from_str!(u64, inode),
});
}
Ok(UdpNetEntries(vec))
}
}
/// Unix socket entries.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct UnixNetEntries(pub Vec<UnixNetEntry>);
impl super::FromBufRead for UnixNetEntries {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut vec = Vec::new();
// first line is a header we need to skip
for line in r.lines().skip(1) {
let line = line?;
let mut s = line.split_whitespace();
s.next(); // skip table slot number
let ref_count = from_str!(u32, expect!(s.next()), 16);
s.next(); // skip protocol, always zero
s.next(); // skip internal kernel flags
let socket_type = from_str!(u16, expect!(s.next()), 16);
let state = from_str!(u8, expect!(s.next()), 16);
let inode = from_str!(u64, expect!(s.next()));
let path = s.next().map(PathBuf::from);
vec.push(UnixNetEntry {
ref_count,
socket_type,
inode,
state: expect!(UnixState::from_u8(state)),
path,
});
}
Ok(UnixNetEntries(vec))
}
}
/// An entry in the ARP table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ARPEntry {
/// IPv4 address
pub ip_address: Ipv4Addr,
/// Hardware type
///
/// This will almost always be ETHER (or maybe INFINIBAND)
pub hw_type: ARPHardware,
/// Internal kernel flags
pub flags: ARPFlags,
/// MAC Address
pub hw_address: Option<[u8; 6]>,
/// Device name
pub device: String,
}
bitflags! {
/// Hardware type for an ARP table entry.
// source: include/uapi/linux/if_arp.h
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct ARPHardware: u32 {
/// NET/ROM pseudo
const NETROM = 0;
/// Ethernet
const ETHER = 1;
/// Experimental ethernet
const EETHER = 2;
/// AX.25 Level 2
const AX25 = 3;
/// PROnet token ring
const PRONET = 4;
/// Chaosnet
const CHAOS = 5;
/// IEEE 802.2 Ethernet/TR/TB
const IEEE802 = 6;
/// Arcnet
const ARCNET = 7;
/// APPLEtalk
const APPLETLK = 8;
/// Frame Relay DLCI
const DLCI = 15;
/// ATM
const ATM = 19;
/// Metricom STRIP
const METRICOM = 23;
//// IEEE 1394 IPv4 - RFC 2734
const IEEE1394 = 24;
/// EUI-64
const EUI64 = 27;
/// InfiniBand
const INFINIBAND = 32;
}
}
bitflags! {
/// Flags for ARP entries
// source: include/uapi/linux/if_arp.h
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct ARPFlags: u32 {
/// Completed entry
const COM = 0x02;
/// Permanent entry
const PERM = 0x04;
/// Publish entry
const PUBL = 0x08;
/// Has requested trailers
const USETRAILERS = 0x10;
/// Want to use a netmask (only for proxy entries)
const NETMASK = 0x20;
// Don't answer this address
const DONTPUB = 0x40;
}
}
/// ARP table entries.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ArpEntries(pub Vec<ARPEntry>);
impl super::FromBufRead for ArpEntries {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut vec = Vec::new();
// First line is a header we need to skip
for line in r.lines().skip(1) {
// Check if there might have been an IO error.
let line = line?;
let mut line = line.split_whitespace();
let ip_address = expect!(Ipv4Addr::from_str(expect!(line.next())));
let hw = from_str!(u32, &expect!(line.next())[2..], 16);
let hw = ARPHardware::from_bits_truncate(hw);
let flags = from_str!(u32, &expect!(line.next())[2..], 16);
let flags = ARPFlags::from_bits_truncate(flags);
let mac = expect!(line.next());
let mut mac: Vec<Result<u8, _>> = mac.split(':').map(|s| Ok(from_str!(u8, s, 16))).collect();
let mac = if mac.len() == 6 {
let mac_block_f = mac.pop().unwrap()?;
let mac_block_e = mac.pop().unwrap()?;
let mac_block_d = mac.pop().unwrap()?;
let mac_block_c = mac.pop().unwrap()?;
let mac_block_b = mac.pop().unwrap()?;
let mac_block_a = mac.pop().unwrap()?;
if mac_block_a == 0
&& mac_block_b == 0
&& mac_block_c == 0
&& mac_block_d == 0
&& mac_block_e == 0
&& mac_block_f == 0
{
None
} else {
Some([
mac_block_a,
mac_block_b,
mac_block_c,
mac_block_d,
mac_block_e,
mac_block_f,
])
}
} else {
None
};
// mask is always "*"
let _mask = expect!(line.next());
let dev = expect!(line.next());
vec.push(ARPEntry {
ip_address,
hw_type: hw,
flags,
hw_address: mac,
device: dev.to_string(),
})
}
Ok(ArpEntries(vec))
}
}
/// General statistics for a network interface/device
///
/// For an example, see the [interface_stats.rs](https://github.com/eminence/procfs/tree/master/examples)
/// example in the source repo.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct DeviceStatus {
/// Name of the interface
pub name: String,
/// Total bytes received
pub recv_bytes: u64,
/// Total packets received
pub recv_packets: u64,
/// Bad packets received
pub recv_errs: u64,
/// Packets dropped
pub recv_drop: u64,
/// Fifo overrun
pub recv_fifo: u64,
/// Frame alignment errors
pub recv_frame: u64,
/// Number of compressed packets received
pub recv_compressed: u64,
/// Number of multicast packets received
pub recv_multicast: u64,
/// Total bytes transmitted
pub sent_bytes: u64,
/// Total packets transmitted
pub sent_packets: u64,
/// Number of transmission errors
pub sent_errs: u64,
/// Number of packets dropped during transmission
pub sent_drop: u64,
pub sent_fifo: u64,
/// Number of collisions
pub sent_colls: u64,
/// Number of packets not sent due to carrier errors
pub sent_carrier: u64,
/// Number of compressed packets transmitted
pub sent_compressed: u64,
}
impl DeviceStatus {
fn from_str(s: &str) -> ProcResult<DeviceStatus> {
let mut split = s.split_whitespace();
let name: String = expect!(from_iter(&mut split));
let recv_bytes = expect!(from_iter(&mut split));
let recv_packets = expect!(from_iter(&mut split));
let recv_errs = expect!(from_iter(&mut split));
let recv_drop = expect!(from_iter(&mut split));
let recv_fifo = expect!(from_iter(&mut split));
let recv_frame = expect!(from_iter(&mut split));
let recv_compressed = expect!(from_iter(&mut split));
let recv_multicast = expect!(from_iter(&mut split));
let sent_bytes = expect!(from_iter(&mut split));
let sent_packets = expect!(from_iter(&mut split));
let sent_errs = expect!(from_iter(&mut split));
let sent_drop = expect!(from_iter(&mut split));
let sent_fifo = expect!(from_iter(&mut split));
let sent_colls = expect!(from_iter(&mut split));
let sent_carrier = expect!(from_iter(&mut split));
let sent_compressed = expect!(from_iter(&mut split));
Ok(DeviceStatus {
name: name.trim_end_matches(':').to_owned(),
recv_bytes,
recv_packets,
recv_errs,
recv_drop,
recv_fifo,
recv_frame,
recv_compressed,
recv_multicast,
sent_bytes,
sent_packets,
sent_errs,
sent_drop,
sent_fifo,
sent_colls,
sent_carrier,
sent_compressed,
})
}
}
/// Device status information for all network interfaces.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct InterfaceDeviceStatus(pub HashMap<String, DeviceStatus>);
impl super::FromBufRead for InterfaceDeviceStatus {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut map = HashMap::new();
// the first two lines are headers, so skip them
for line in r.lines().skip(2) {
let dev = DeviceStatus::from_str(&line?)?;
map.insert(dev.name.clone(), dev);
}
Ok(InterfaceDeviceStatus(map))
}
}
/// An entry in the ipv4 route table
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct RouteEntry {
/// Interface to which packets for this route will be sent
pub iface: String,
/// The destination network or destination host
pub destination: Ipv4Addr,
pub gateway: Ipv4Addr,
pub flags: u16,
/// Number of references to this route
pub refcnt: u16,
/// Count of lookups for the route
pub in_use: u16,
/// The 'distance' to the target (usually counted in hops)
pub metrics: u32,
pub mask: Ipv4Addr,
/// Default maximum transmission unit for TCP connections over this route
pub mtu: u32,
/// Default window size for TCP connections over this route
pub window: u32,
/// Initial RTT (Round Trip Time)
pub irtt: u32,
}
/// A set of ipv4 routes.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct RouteEntries(pub Vec<RouteEntry>);
impl super::FromBufRead for RouteEntries {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut vec = Vec::new();
// First line is a header we need to skip
for line in r.lines().skip(1) {
// Check if there might have been an IO error.
let line = line?;
let mut line = line.split_whitespace();
// network interface name, e.g. eth0
let iface = expect!(line.next());
let destination = from_str!(u32, expect!(line.next()), 16).to_ne_bytes().into();
let gateway = from_str!(u32, expect!(line.next()), 16).to_ne_bytes().into();
let flags = from_str!(u16, expect!(line.next()), 16);
let refcnt = from_str!(u16, expect!(line.next()), 10);
let in_use = from_str!(u16, expect!(line.next()), 10);
let metrics = from_str!(u32, expect!(line.next()), 10);
let mask = from_str!(u32, expect!(line.next()), 16).to_ne_bytes().into();
let mtu = from_str!(u32, expect!(line.next()), 10);
let window = from_str!(u32, expect!(line.next()), 10);
let irtt = from_str!(u32, expect!(line.next()), 10);
vec.push(RouteEntry {
iface: iface.to_string(),
destination,
gateway,
flags,
refcnt,
in_use,
metrics,
mask,
mtu,
window,
irtt,
});
}
Ok(RouteEntries(vec))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
fn test_parse_ipaddr() {
use std::str::FromStr;
let addr = parse_addressport_str("0100007F:1234", true).unwrap();
assert_eq!(addr.port(), 0x1234);
match addr.ip() {
IpAddr::V4(addr) => assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1)),
_ => panic!("Not IPv4"),
}
// When you connect to [2a00:1450:4001:814::200e]:80 (ipv6.google.com) the entry with
// 5014002A14080140000000000E200000:0050 remote endpoint is created in /proc/net/tcp6
// on Linux 4.19.
let addr = parse_addressport_str("5014002A14080140000000000E200000:0050", true).unwrap();
assert_eq!(addr.port(), 80);
match addr.ip() {
IpAddr::V6(addr) => assert_eq!(addr, Ipv6Addr::from_str("2a00:1450:4001:814::200e").unwrap()),
_ => panic!("Not IPv6"),
}
// IPv6 test case from https://stackoverflow.com/questions/41940483/parse-ipv6-addresses-from-proc-net-tcp6-python-2-7/41948004#41948004
let addr = parse_addressport_str("B80D01200000000067452301EFCDAB89:0", true).unwrap();
assert_eq!(addr.port(), 0);
match addr.ip() {
IpAddr::V6(addr) => assert_eq!(addr, Ipv6Addr::from_str("2001:db8::123:4567:89ab:cdef").unwrap()),
_ => panic!("Not IPv6"),
}
let addr = parse_addressport_str("1234:1234", true);
assert!(addr.is_err());
}
#[test]
fn test_tcpstate_from() {
assert_eq!(TcpState::from_u8(0xA).unwrap(), TcpState::Listen);
}
}

View File

@ -0,0 +1,161 @@
//! Pressure stall information retreived from `/proc/pressure/cpu`,
//! `/proc/pressure/memory` and `/proc/pressure/io`
//! may not be available on kernels older than 4.20.0
//! For reference: <https://lwn.net/Articles/759781/>
//!
//! See also: <https://www.kernel.org/doc/Documentation/accounting/psi.txt>
use crate::{ProcError, ProcResult};
use std::collections::HashMap;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Pressure stall information for either CPU, memory, or IO.
///
/// See also: <https://www.kernel.org/doc/Documentation/accounting/psi.txt>
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct PressureRecord {
/// 10 second window
///
/// The percentage of time, over a 10 second window, that either some or all tasks were stalled
/// waiting for a resource.
pub avg10: f32,
/// 60 second window
///
/// The percentage of time, over a 60 second window, that either some or all tasks were stalled
/// waiting for a resource.
pub avg60: f32,
/// 300 second window
///
/// The percentage of time, over a 300 second window, that either some or all tasks were stalled
/// waiting for a resource.
pub avg300: f32,
/// Total stall time (in microseconds).
pub total: u64,
}
/// CPU pressure information
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct CpuPressure {
pub some: PressureRecord,
}
impl super::FromBufRead for CpuPressure {
fn from_buf_read<R: std::io::BufRead>(mut r: R) -> ProcResult<Self> {
let mut some = String::new();
r.read_line(&mut some)?;
Ok(CpuPressure {
some: parse_pressure_record(&some)?,
})
}
}
/// Memory pressure information
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MemoryPressure {
/// This record indicates the share of time in which at least some tasks are stalled
pub some: PressureRecord,
/// This record indicates this share of time in which all non-idle tasks are stalled
/// simultaneously.
pub full: PressureRecord,
}
impl super::FromBufRead for MemoryPressure {
fn from_buf_read<R: std::io::BufRead>(r: R) -> ProcResult<Self> {
let (some, full) = get_pressure(r)?;
Ok(MemoryPressure { some, full })
}
}
/// IO pressure information
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct IoPressure {
/// This record indicates the share of time in which at least some tasks are stalled
pub some: PressureRecord,
/// This record indicates this share of time in which all non-idle tasks are stalled
/// simultaneously.
pub full: PressureRecord,
}
impl super::FromBufRead for IoPressure {
fn from_buf_read<R: std::io::BufRead>(r: R) -> ProcResult<Self> {
let (some, full) = get_pressure(r)?;
Ok(IoPressure { some, full })
}
}
fn get_f32(map: &HashMap<&str, &str>, value: &str) -> ProcResult<f32> {
map.get(value).map_or_else(
|| Err(ProcError::Incomplete(None)),
|v| v.parse::<f32>().map_err(|_| ProcError::Incomplete(None)),
)
}
fn get_total(map: &HashMap<&str, &str>) -> ProcResult<u64> {
map.get("total").map_or_else(
|| Err(ProcError::Incomplete(None)),
|v| v.parse::<u64>().map_err(|_| ProcError::Incomplete(None)),
)
}
fn parse_pressure_record(line: &str) -> ProcResult<PressureRecord> {
let mut parsed = HashMap::new();
if !line.starts_with("some") && !line.starts_with("full") {
return Err(ProcError::Incomplete(None));
}
let values = &line[5..];
for kv_str in values.split_whitespace() {
let kv_split = kv_str.split('=');
let vec: Vec<&str> = kv_split.collect();
if vec.len() == 2 {
parsed.insert(vec[0], vec[1]);
}
}
Ok(PressureRecord {
avg10: get_f32(&parsed, "avg10")?,
avg60: get_f32(&parsed, "avg60")?,
avg300: get_f32(&parsed, "avg300")?,
total: get_total(&parsed)?,
})
}
fn get_pressure<R: std::io::BufRead>(mut r: R) -> ProcResult<(PressureRecord, PressureRecord)> {
let mut some = String::new();
r.read_line(&mut some)?;
let mut full = String::new();
r.read_line(&mut full)?;
Ok((parse_pressure_record(&some)?, parse_pressure_record(&full)?))
}
#[cfg(test)]
mod test {
use super::*;
use std::f32::EPSILON;
#[test]
fn test_parse_pressure_record() {
let record = parse_pressure_record("full avg10=2.10 avg60=0.12 avg300=0.00 total=391926").unwrap();
assert!(record.avg10 - 2.10 < EPSILON);
assert!(record.avg60 - 0.12 < EPSILON);
assert!(record.avg300 - 0.00 < EPSILON);
assert_eq!(record.total, 391_926);
}
#[test]
fn test_parse_pressure_record_errs() {
assert!(parse_pressure_record("avg10=2.10 avg60=0.12 avg300=0.00 total=391926").is_err());
assert!(parse_pressure_record("some avg10=2.10 avg300=0.00 total=391926").is_err());
assert!(parse_pressure_record("some avg10=2.10 avg60=0.00 avg300=0.00").is_err());
}
}

View File

@ -0,0 +1,90 @@
use std::fmt;
/// Clearing the PG_Referenced and ACCESSED/YOUNG bits
/// provides a method to measure approximately how much memory
/// a process is using. One first inspects the values in the
/// "Referenced" fields for the VMAs shown in
/// `/proc/[pid]/smaps` to get an idea of the memory footprint
/// of the process. One then clears the PG_Referenced and
/// ACCESSED/YOUNG bits and, after some measured time
/// interval, once again inspects the values in the
/// "Referenced" fields to get an idea of the change in memory
/// footprint of the process during the measured interval. If
/// one is interested only in inspecting the selected mapping
/// types, then the value 2 or 3 can be used instead of 1.
///
/// The `/proc/[pid]/clear_refs` file is present only if the
/// CONFIG_PROC_PAGE_MONITOR kernel configuration option is
/// enabled.
///
/// Only writable by the owner of the process
///
/// See `procfs::Process::clear_refs()` and `procfs::Process::pagemap()`
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum ClearRefs {
/// (since Linux 2.6.22)
///
/// Reset the PG_Referenced and ACCESSED/YOUNG bits for
/// all the pages associated with the process. (Before
/// kernel 2.6.32, writing any nonzero value to this
/// file had this effect.)
PGReferencedAll = 1,
/// (since Linux 2.6.32)
///
/// Reset the PG_Referenced and ACCESSED/YOUNG bits for
/// all anonymous pages associated with the process.
PGReferencedAnonymous = 2,
/// (since Linux 2.6.32)
///
/// Reset the PG_Referenced and ACCESSED/YOUNG bits for
/// all file-mapped pages associated with the process.
PGReferencedFile = 3,
/// (since Linux 3.11)
///
/// Clear the soft-dirty bit for all the pages
/// associated with the process. This is used (in
/// conjunction with `/proc/[pid]/pagemap`) by the check-
/// point restore system to discover which pages of a
/// process have been dirtied since the file
/// `/proc/[pid]/clear_refs` was written to.
SoftDirty = 4,
/// (since Linux 4.0)
///
/// Reset the peak resident set size ("high water
/// mark") to the process's current resident set size
/// value.
PeakRSS = 5,
}
impl fmt::Display for ClearRefs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match self {
ClearRefs::PGReferencedAll => 1,
ClearRefs::PGReferencedAnonymous => 2,
ClearRefs::PGReferencedFile => 3,
ClearRefs::SoftDirty => 4,
ClearRefs::PeakRSS => 5,
}
)
}
}
impl std::str::FromStr for ClearRefs {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse()
.map_err(|_| "Fail to parse clear refs value")
.and_then(|n| match n {
1 => Ok(ClearRefs::PGReferencedAll),
2 => Ok(ClearRefs::PGReferencedAnonymous),
3 => Ok(ClearRefs::PGReferencedFile),
4 => Ok(ClearRefs::SoftDirty),
5 => Ok(ClearRefs::PeakRSS),
_ => Err("Unknown clear refs value"),
})
}
}

View File

@ -0,0 +1,197 @@
use crate::{ProcError, ProcResult};
use std::collections::HashMap;
use std::io::BufRead;
use std::str::FromStr;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Process limits
///
/// For more details about each of these limits, see the `getrlimit` man page.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Limits {
/// Max Cpu Time
///
/// This is a limit, in seconds, on the amount of CPU time that the process can consume.
pub max_cpu_time: Limit,
/// Max file size
///
/// This is the maximum size in bytes of files that the process may create.
pub max_file_size: Limit,
/// Max data size
///
/// This is the maximum size of the process's data segment (initialized data, uninitialized
/// data, and heap).
pub max_data_size: Limit,
/// Max stack size
///
/// This is the maximum size of the process stack, in bytes.
pub max_stack_size: Limit,
/// Max core file size
///
/// This is the maximum size of a *core* file in bytes that the process may dump.
pub max_core_file_size: Limit,
/// Max resident set
///
/// This is a limit (in bytes) on the process's resident set (the number of virtual pages
/// resident in RAM).
pub max_resident_set: Limit,
/// Max processes
///
/// This is a limit on the number of extant process (or, more precisely on Linux, threads) for
/// the real user rID of the calling process.
pub max_processes: Limit,
/// Max open files
///
/// This specifies a value one greater than the maximum file descriptor number that can be
/// opened by this process.
pub max_open_files: Limit,
/// Max locked memory
///
/// This is the maximum number of bytes of memory that may be locked into RAM.
pub max_locked_memory: Limit,
/// Max address space
///
/// This is the maximum size of the process's virtual memory (address space).
pub max_address_space: Limit,
/// Max file locks
///
/// This is a limit on the combined number of flock locks and fcntl leases that this process
/// may establish.
pub max_file_locks: Limit,
/// Max pending signals
///
/// This is a limit on the number of signals that may be queued for the real user rID of the
/// calling process.
pub max_pending_signals: Limit,
/// Max msgqueue size
///
/// This is a limit on the number of bytes that can be allocated for POSIX message queues for
/// the real user rID of the calling process.
pub max_msgqueue_size: Limit,
/// Max nice priority
///
/// This specifies a ceiling to which the process's nice value can be raised using
/// `setpriority` or `nice`.
pub max_nice_priority: Limit,
/// Max realtime priority
///
/// This specifies a ceiling on the real-time priority that may be set for this process using
/// `sched_setscheduler` and `sched_setparam`.
pub max_realtime_priority: Limit,
/// Max realtime timeout
///
/// This is a limit (in microseconds) on the amount of CPU time that a process scheduled under
/// a real-time scheduling policy may consume without making a blocking system call.
pub max_realtime_timeout: Limit,
}
impl crate::FromBufRead for Limits {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut lines = r.lines();
let mut map = HashMap::new();
while let Some(Ok(line)) = lines.next() {
let line = line.trim();
if line.starts_with("Limit") {
continue;
}
let s: Vec<_> = line.split_whitespace().collect();
let l = s.len();
let (hard_limit, soft_limit, name) =
if line.starts_with("Max nice priority") || line.starts_with("Max realtime priority") {
// these two limits don't have units, and so need different offsets:
let hard_limit = expect!(s.get(l - 1)).to_owned();
let soft_limit = expect!(s.get(l - 2)).to_owned();
let name = s[0..l - 2].join(" ");
(hard_limit, soft_limit, name)
} else {
let hard_limit = expect!(s.get(l - 2)).to_owned();
let soft_limit = expect!(s.get(l - 3)).to_owned();
let name = s[0..l - 3].join(" ");
(hard_limit, soft_limit, name)
};
let _units = expect!(s.get(l - 1));
map.insert(name.to_owned(), (soft_limit.to_owned(), hard_limit.to_owned()));
}
let limits = Limits {
max_cpu_time: Limit::from_pair(expect!(map.remove("Max cpu time")))?,
max_file_size: Limit::from_pair(expect!(map.remove("Max file size")))?,
max_data_size: Limit::from_pair(expect!(map.remove("Max data size")))?,
max_stack_size: Limit::from_pair(expect!(map.remove("Max stack size")))?,
max_core_file_size: Limit::from_pair(expect!(map.remove("Max core file size")))?,
max_resident_set: Limit::from_pair(expect!(map.remove("Max resident set")))?,
max_processes: Limit::from_pair(expect!(map.remove("Max processes")))?,
max_open_files: Limit::from_pair(expect!(map.remove("Max open files")))?,
max_locked_memory: Limit::from_pair(expect!(map.remove("Max locked memory")))?,
max_address_space: Limit::from_pair(expect!(map.remove("Max address space")))?,
max_file_locks: Limit::from_pair(expect!(map.remove("Max file locks")))?,
max_pending_signals: Limit::from_pair(expect!(map.remove("Max pending signals")))?,
max_msgqueue_size: Limit::from_pair(expect!(map.remove("Max msgqueue size")))?,
max_nice_priority: Limit::from_pair(expect!(map.remove("Max nice priority")))?,
max_realtime_priority: Limit::from_pair(expect!(map.remove("Max realtime priority")))?,
max_realtime_timeout: Limit::from_pair(expect!(map.remove("Max realtime timeout")))?,
};
if cfg!(test) {
assert!(map.is_empty(), "Map isn't empty: {:?}", map);
}
Ok(limits)
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Limit {
pub soft_limit: LimitValue,
pub hard_limit: LimitValue,
}
impl Limit {
fn from_pair(l: (String, String)) -> ProcResult<Limit> {
let (soft, hard) = l;
Ok(Limit {
soft_limit: LimitValue::from_str(&soft)?,
hard_limit: LimitValue::from_str(&hard)?,
})
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum LimitValue {
Unlimited,
Value(u64),
}
impl FromStr for LimitValue {
type Err = ProcError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "unlimited" {
Ok(LimitValue::Unlimited)
} else {
Ok(LimitValue::Value(from_str!(u64, s)))
}
}
}

View File

@ -0,0 +1,829 @@
//! Functions and structs related to process information
//!
//! The primary source of data for functions in this module is the files in a `/proc/<pid>/`
//! directory.
use super::*;
use crate::from_iter;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::Read;
use std::path::PathBuf;
use std::str::FromStr;
mod limit;
pub use limit::*;
mod stat;
pub use stat::*;
mod mount;
pub use mount::*;
mod namespaces;
pub use namespaces::*;
mod status;
pub use status::*;
mod schedstat;
pub use schedstat::*;
mod smaps_rollup;
pub use smaps_rollup::*;
mod pagemap;
pub use pagemap::*;
mod clear_refs;
pub use clear_refs::*;
bitflags! {
/// Kernel flags for a process
///
/// See also the [Stat::flags()] method.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct StatFlags: u32 {
/// I am an IDLE thread
const PF_IDLE = 0x0000_0002;
/// Getting shut down
const PF_EXITING = 0x0000_0004;
/// PI exit done on shut down
const PF_EXITPIDONE = 0x0000_0008;
/// I'm a virtual CPU
const PF_VCPU = 0x0000_0010;
/// I'm a workqueue worker
const PF_WQ_WORKER = 0x0000_0020;
/// Forked but didn't exec
const PF_FORKNOEXEC = 0x0000_0040;
/// Process policy on mce errors;
const PF_MCE_PROCESS = 0x0000_0080;
/// Used super-user privileges
const PF_SUPERPRIV = 0x0000_0100;
/// Dumped core
const PF_DUMPCORE = 0x0000_0200;
/// Killed by a signal
const PF_SIGNALED = 0x0000_0400;
///Allocating memory
const PF_MEMALLOC = 0x0000_0800;
/// set_user() noticed that RLIMIT_NPROC was exceeded
const PF_NPROC_EXCEEDED = 0x0000_1000;
/// If unset the fpu must be initialized before use
const PF_USED_MATH = 0x0000_2000;
/// Used async_schedule*(), used by module init
const PF_USED_ASYNC = 0x0000_4000;
/// This thread should not be frozen
const PF_NOFREEZE = 0x0000_8000;
/// Frozen for system suspend
const PF_FROZEN = 0x0001_0000;
/// I am kswapd
const PF_KSWAPD = 0x0002_0000;
/// All allocation requests will inherit GFP_NOFS
const PF_MEMALLOC_NOFS = 0x0004_0000;
/// All allocation requests will inherit GFP_NOIO
const PF_MEMALLOC_NOIO = 0x0008_0000;
/// Throttle me less: I clean memory
const PF_LESS_THROTTLE = 0x0010_0000;
/// I am a kernel thread
const PF_KTHREAD = 0x0020_0000;
/// Randomize virtual address space
const PF_RANDOMIZE = 0x0040_0000;
/// Allowed to write to swap
const PF_SWAPWRITE = 0x0080_0000;
/// Stalled due to lack of memory
const PF_MEMSTALL = 0x0100_0000;
/// I'm an Usermodehelper process
const PF_UMH = 0x0200_0000;
/// Userland is not allowed to meddle with cpus_allowed
const PF_NO_SETAFFINITY = 0x0400_0000;
/// Early kill for mce process policy
const PF_MCE_EARLY = 0x0800_0000;
/// All allocation request will have _GFP_MOVABLE cleared
const PF_MEMALLOC_NOCMA = 0x1000_0000;
/// Thread belongs to the rt mutex tester
const PF_MUTEX_TESTER = 0x2000_0000;
/// Freezer should not count it as freezable
const PF_FREEZER_SKIP = 0x4000_0000;
/// This thread called freeze_processes() and should not be frozen
const PF_SUSPEND_TASK = 0x8000_0000;
}
}
bitflags! {
/// See the [coredump_filter()](struct.Process.html#method.coredump_filter) method.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct CoredumpFlags: u32 {
const ANONYMOUS_PRIVATE_MAPPINGS = 0x01;
const ANONYMOUS_SHARED_MAPPINGS = 0x02;
const FILEBACKED_PRIVATE_MAPPINGS = 0x04;
const FILEBACKED_SHARED_MAPPINGS = 0x08;
const ELF_HEADERS = 0x10;
const PROVATE_HUGEPAGES = 0x20;
const SHARED_HUGEPAGES = 0x40;
const PRIVATE_DAX_PAGES = 0x80;
const SHARED_DAX_PAGES = 0x100;
}
}
bitflags! {
/// The permissions a process has on memory map entries.
///
/// Note that the `SHARED` and `PRIVATE` are mutually exclusive, so while you can
/// use `MMPermissions::all()` to construct an instance that has all bits set,
/// this particular value would never been seen in procfs.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord, Default)]
pub struct MMPermissions: u8 {
/// No permissions
const NONE = 0;
/// Read permission
const READ = 1 << 0;
/// Write permission
const WRITE = 1 << 1;
/// Execute permission
const EXECUTE = 1 << 2;
/// Memory is shared with another process.
///
/// Mutually exclusive with PRIVATE.
const SHARED = 1 << 3;
/// Memory is private (and copy-on-write)
///
/// Mutually exclusive with SHARED.
const PRIVATE = 1 << 4;
}
}
impl MMPermissions {
fn from_ascii_char(b: u8) -> Self {
match b {
b'r' => Self::READ,
b'w' => Self::WRITE,
b'x' => Self::EXECUTE,
b's' => Self::SHARED,
b'p' => Self::PRIVATE,
_ => Self::NONE,
}
}
/// Returns this permission map as a 4-character string, similar to what you
/// might see in `/proc/\<pid\>/maps`.
///
/// Note that the SHARED and PRIVATE bits are mutually exclusive, so this
/// string is 4 characters long, not 5.
pub fn as_str(&self) -> String {
let mut s = String::with_capacity(4);
s.push(if self.contains(Self::READ) { 'r' } else { '-' });
s.push(if self.contains(Self::WRITE) { 'w' } else { '-' });
s.push(if self.contains(Self::EXECUTE) { 'x' } else { '-' });
s.push(if self.contains(Self::SHARED) {
's'
} else if self.contains(Self::PRIVATE) {
'p'
} else {
'-'
});
s
}
}
impl FromStr for MMPermissions {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Only operate on ASCII (byte) values
Ok(s.bytes()
.map(Self::from_ascii_char)
.fold(Self::default(), std::ops::BitOr::bitor))
}
}
bitflags! {
/// Represents the kernel flags associated with the virtual memory area.
/// The names of these flags are just those you'll find in the man page, but in upper case.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord, Default)]
pub struct VmFlags: u32 {
/// No flags
const NONE = 0;
/// Readable
const RD = 1 << 0;
/// Writable
const WR = 1 << 1;
/// Executable
const EX = 1 << 2;
/// Shared
const SH = 1 << 3;
/// May read
const MR = 1 << 4;
/// May write
const MW = 1 << 5;
/// May execute
const ME = 1 << 6;
/// May share
const MS = 1 << 7;
/// Stack segment grows down
const GD = 1 << 8;
/// Pure PFN range
const PF = 1 << 9;
/// Disable write to the mapped file
const DW = 1 << 10;
/// Pages are locked in memory
const LO = 1 << 11;
/// Memory mapped I/O area
const IO = 1 << 12;
/// Sequential read advise provided
const SR = 1 << 13;
/// Random read provided
const RR = 1 << 14;
/// Do not copy area on fork
const DC = 1 << 15;
/// Do not expand area on remapping
const DE = 1 << 16;
/// Area is accountable
const AC = 1 << 17;
/// Swap space is not reserved for the area
const NR = 1 << 18;
/// Area uses huge TLB pages
const HT = 1 << 19;
/// Perform synchronous page faults (since Linux 4.15)
const SF = 1 << 20;
/// Non-linear mapping (removed in Linux 4.0)
const NL = 1 << 21;
/// Architecture specific flag
const AR = 1 << 22;
/// Wipe on fork (since Linux 4.14)
const WF = 1 << 23;
/// Do not include area into core dump
const DD = 1 << 24;
/// Soft-dirty flag (since Linux 3.13)
const SD = 1 << 25;
/// Mixed map area
const MM = 1 << 26;
/// Huge page advise flag
const HG = 1 << 27;
/// No-huge page advise flag
const NH = 1 << 28;
/// Mergeable advise flag
const MG = 1 << 29;
/// Userfaultfd missing pages tracking (since Linux 4.3)
const UM = 1 << 30;
/// Userfaultfd wprotect pages tracking (since Linux 4.3)
const UW = 1 << 31;
}
}
impl VmFlags {
fn from_str(flag: &str) -> Self {
if flag.len() != 2 {
return VmFlags::NONE;
}
match flag {
"rd" => VmFlags::RD,
"wr" => VmFlags::WR,
"ex" => VmFlags::EX,
"sh" => VmFlags::SH,
"mr" => VmFlags::MR,
"mw" => VmFlags::MW,
"me" => VmFlags::ME,
"ms" => VmFlags::MS,
"gd" => VmFlags::GD,
"pf" => VmFlags::PF,
"dw" => VmFlags::DW,
"lo" => VmFlags::LO,
"io" => VmFlags::IO,
"sr" => VmFlags::SR,
"rr" => VmFlags::RR,
"dc" => VmFlags::DC,
"de" => VmFlags::DE,
"ac" => VmFlags::AC,
"nr" => VmFlags::NR,
"ht" => VmFlags::HT,
"sf" => VmFlags::SF,
"nl" => VmFlags::NL,
"ar" => VmFlags::AR,
"wf" => VmFlags::WF,
"dd" => VmFlags::DD,
"sd" => VmFlags::SD,
"mm" => VmFlags::MM,
"hg" => VmFlags::HG,
"nh" => VmFlags::NH,
"mg" => VmFlags::MG,
"um" => VmFlags::UM,
"uw" => VmFlags::UW,
_ => VmFlags::NONE,
}
}
}
/// Represents the state of a process.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ProcState {
/// Running (R)
Running,
/// Sleeping in an interruptible wait (S)
Sleeping,
/// Waiting in uninterruptible disk sleep (D)
Waiting,
/// Zombie (Z)
Zombie,
/// Stopped (on a signal) (T)
///
/// Or before Linux 2.6.33, trace stopped
Stopped,
/// Tracing stop (t) (Linux 2.6.33 onward)
Tracing,
/// Dead (X)
Dead,
/// Wakekill (K) (Linux 2.6.33 to 3.13 only)
Wakekill,
/// Waking (W) (Linux 2.6.33 to 3.13 only)
Waking,
/// Parked (P) (Linux 3.9 to 3.13 only)
Parked,
/// Idle (I)
Idle,
}
impl ProcState {
pub fn from_char(c: char) -> Option<ProcState> {
match c {
'R' => Some(ProcState::Running),
'S' => Some(ProcState::Sleeping),
'D' => Some(ProcState::Waiting),
'Z' => Some(ProcState::Zombie),
'T' => Some(ProcState::Stopped),
't' => Some(ProcState::Tracing),
'X' | 'x' => Some(ProcState::Dead),
'K' => Some(ProcState::Wakekill),
'W' => Some(ProcState::Waking),
'P' => Some(ProcState::Parked),
'I' => Some(ProcState::Idle),
_ => None,
}
}
}
impl FromStr for ProcState {
type Err = ProcError;
fn from_str(s: &str) -> Result<ProcState, ProcError> {
ProcState::from_char(expect!(s.chars().next(), "empty string"))
.ok_or_else(|| build_internal_error!("failed to convert"))
}
}
/// This struct contains I/O statistics for the process, built from `/proc/<pid>/io`
///
/// # Note
///
/// In the current implementation, things are a bit racy on 32-bit systems: if process A
/// reads process B's `/proc/<pid>/io` while process B is updating one of these 64-bit
/// counters, process A could see an intermediate result.
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Io {
/// Characters read
///
/// The number of bytes which this task has caused to be read from storage. This is simply the
/// sum of bytes which this process passed to read(2) and similar system calls. It includes
/// things such as terminal I/O and is unaffected by whether or not actual physical disk I/O
/// was required (the read might have been satisfied from pagecache).
pub rchar: u64,
/// characters written
///
/// The number of bytes which this task has caused, or shall cause to be written to disk.
/// Similar caveats apply here as with rchar.
pub wchar: u64,
/// read syscalls
///
/// Attempt to count the number of write I/O operations—that is, system calls such as write(2)
/// and pwrite(2).
pub syscr: u64,
/// write syscalls
///
/// Attempt to count the number of write I/O operations—that is, system calls such as write(2)
/// and pwrite(2).
pub syscw: u64,
/// bytes read
///
/// Attempt to count the number of bytes which this process really did cause to be fetched from
/// the storage layer. This is accurate for block-backed filesystems.
pub read_bytes: u64,
/// bytes written
///
/// Attempt to count the number of bytes which this process caused to be sent to the storage layer.
pub write_bytes: u64,
/// Cancelled write bytes.
///
/// The big inaccuracy here is truncate. If a process writes 1MB to a file and then deletes
/// the file, it will in fact perform no write out. But it will have been accounted as having
/// caused 1MB of write. In other words: this field represents the number of bytes which this
/// process caused to not happen, by truncating pagecache. A task can cause "negative" I/O too.
/// If this task truncates some dirty pagecache, some I/O which another task has been accounted
/// for (in its write_bytes) will not be happening.
pub cancelled_write_bytes: u64,
}
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum MMapPath {
/// The file that is backing the mapping.
Path(PathBuf),
/// The process's heap.
Heap,
/// The initial process's (also known as the main thread's) stack.
Stack,
/// A thread's stack (where the `<tid>` is a thread ID). It corresponds to the
/// `/proc/<pid>/task/<tid>/` path.
///
/// (since Linux 3.4)
TStack(u32),
/// The virtual dynamically linked shared object.
Vdso,
/// Shared kernel variables
Vvar,
/// obsolete virtual syscalls, succeeded by vdso
Vsyscall,
/// rollup memory mappings, from `/proc/<pid>/smaps_rollup`
Rollup,
/// An anonymous mapping as obtained via mmap(2).
Anonymous,
/// Shared memory segment. The i32 value corresponds to [Shm.key](Shm::key), while [MemoryMap.inode](MemoryMap::inode) corresponds to [Shm.shmid](Shm::shmid)
Vsys(i32),
/// Some other pseudo-path
Other(String),
}
impl MMapPath {
pub fn from(path: &str) -> ProcResult<MMapPath> {
Ok(match path.trim() {
"" => MMapPath::Anonymous,
"[heap]" => MMapPath::Heap,
"[stack]" => MMapPath::Stack,
"[vdso]" => MMapPath::Vdso,
"[vvar]" => MMapPath::Vvar,
"[vsyscall]" => MMapPath::Vsyscall,
"[rollup]" => MMapPath::Rollup,
x if x.starts_with("[stack:") => {
let mut s = x[1..x.len() - 1].split(':');
let tid = from_str!(u32, expect!(s.nth(1)));
MMapPath::TStack(tid)
}
x if x.starts_with('[') && x.ends_with(']') => MMapPath::Other(x[1..x.len() - 1].to_string()),
x if x.starts_with("/SYSV") => MMapPath::Vsys(u32::from_str_radix(&x[5..13], 16)? as i32), // 32bits signed hex. /SYSVaabbccdd (deleted)
x => MMapPath::Path(PathBuf::from(x)),
})
}
}
/// Represents all entries in a `/proc/<pid>/maps` or `/proc/<pid>/smaps` file.
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub struct MemoryMaps(pub Vec<MemoryMap>);
impl crate::FromBufRead for MemoryMaps {
/// The data should be formatted according to procfs /proc/pid/{maps,smaps,smaps_rollup}.
fn from_buf_read<R: BufRead>(reader: R) -> ProcResult<Self> {
let mut memory_maps = Vec::new();
let mut line_iter = reader.lines().map(|r| r.map_err(|_| ProcError::Incomplete(None)));
let mut current_memory_map: Option<MemoryMap> = None;
while let Some(line) = line_iter.next().transpose()? {
// Assumes all extension fields (in `/proc/<pid>/smaps`) start with a capital letter,
// which seems to be the case.
if line.starts_with(|c: char| c.is_ascii_uppercase()) {
match current_memory_map.as_mut() {
None => return Err(ProcError::Incomplete(None)),
Some(mm) => {
// This is probably an attribute
if line.starts_with("VmFlags") {
let flags = line.split_ascii_whitespace();
let flags = flags.skip(1); // Skips the `VmFlags:` part since we don't need it.
let flags = flags
.map(VmFlags::from_str)
// FUTURE: use `Iterator::reduce`
.fold(VmFlags::NONE, std::ops::BitOr::bitor);
mm.extension.vm_flags = flags;
} else {
let mut parts = line.split_ascii_whitespace();
let key = parts.next();
let value = parts.next();
if let (Some(k), Some(v)) = (key, value) {
// While most entries do have one, not all of them do.
let size_suffix = parts.next();
// Limited poking at /proc/<pid>/smaps and then checking if "MB", "GB", and "TB" appear in the C file that is
// supposedly responsible for creating smaps, has lead me to believe that the only size suffixes we'll ever encounter
// "kB", which is most likely kibibytes. Actually checking if the size suffix is any of the above is a way to
// future-proof the code, but I am not sure it is worth doing so.
let size_multiplier = if size_suffix.is_some() { 1024 } else { 1 };
let v = v.parse::<u64>().map_err(|_| {
ProcError::Other("Value in `Key: Value` pair was not actually a number".into())
})?;
// This ignores the case when our Key: Value pairs are really Key Value pairs. Is this a good idea?
let k = k.trim_end_matches(':');
mm.extension.map.insert(k.into(), v * size_multiplier);
}
}
}
}
} else {
if let Some(mm) = current_memory_map.take() {
memory_maps.push(mm);
}
current_memory_map = Some(MemoryMap::from_line(&line)?);
}
}
if let Some(mm) = current_memory_map.take() {
memory_maps.push(mm);
}
Ok(MemoryMaps(memory_maps))
}
}
impl MemoryMaps {
/// Return an iterator over [MemoryMap].
pub fn iter(&self) -> std::slice::Iter<MemoryMap> {
self.0.iter()
}
pub fn len(&self) -> usize {
self.0.len()
}
}
impl<'a> IntoIterator for &'a MemoryMaps {
type IntoIter = std::slice::Iter<'a, MemoryMap>;
type Item = &'a MemoryMap;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl IntoIterator for MemoryMaps {
type IntoIter = std::vec::IntoIter<MemoryMap>;
type Item = MemoryMap;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
/// Represents an entry in a `/proc/<pid>/maps` or `/proc/<pid>/smaps` file.
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MemoryMap {
/// The address space in the process that the mapping occupies.
pub address: (u64, u64),
pub perms: MMPermissions,
/// The offset into the file/whatever
pub offset: u64,
/// The device (major, minor)
pub dev: (i32, i32),
/// The inode on that device
///
/// 0 indicates that no inode is associated with the memory region, as would be the case with
/// BSS (uninitialized data).
pub inode: u64,
pub pathname: MMapPath,
/// Memory mapping extension information, populated when parsing `/proc/<pid>/smaps`.
///
/// The members will be `Default::default()` (empty/none) when the information isn't available.
pub extension: MMapExtension,
}
impl MemoryMap {
fn from_line(line: &str) -> ProcResult<MemoryMap> {
let mut s = line.splitn(6, ' ');
let address = expect!(s.next());
let perms = expect!(s.next());
let offset = expect!(s.next());
let dev = expect!(s.next());
let inode = expect!(s.next());
let path = expect!(s.next());
Ok(MemoryMap {
address: split_into_num(address, '-', 16)?,
perms: perms.parse()?,
offset: from_str!(u64, offset, 16),
dev: split_into_num(dev, ':', 16)?,
inode: from_str!(u64, inode),
pathname: MMapPath::from(path)?,
extension: Default::default(),
})
}
}
/// Represents the information about a specific mapping as presented in /proc/\<pid\>/smaps
#[derive(Default, Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MMapExtension {
/// Key-value pairs that may represent statistics about memory usage, or other interesting things,
/// such a "ProtectionKey" (if you're on X86 and that kernel config option was specified).
///
/// Note that should a key-value pair represent a memory usage statistic, it will be in bytes.
///
/// Check your manpage for more information
pub map: HashMap<String, u64>,
/// Kernel flags associated with the virtual memory area
///
/// (since Linux 3.8)
pub vm_flags: VmFlags,
}
impl MMapExtension {
/// Return whether the extension information is empty.
pub fn is_empty(&self) -> bool {
self.map.is_empty() && self.vm_flags == VmFlags::NONE
}
}
impl crate::FromBufRead for Io {
fn from_buf_read<R: BufRead>(reader: R) -> ProcResult<Self> {
let mut map = HashMap::new();
for line in reader.lines() {
let line = line?;
if line.is_empty() || !line.contains(' ') {
continue;
}
let mut s = line.split_whitespace();
let field = expect!(s.next());
let value = expect!(s.next());
let value = from_str!(u64, value);
map.insert(field[..field.len() - 1].to_string(), value);
}
let io = Io {
rchar: expect!(map.remove("rchar")),
wchar: expect!(map.remove("wchar")),
syscr: expect!(map.remove("syscr")),
syscw: expect!(map.remove("syscw")),
read_bytes: expect!(map.remove("read_bytes")),
write_bytes: expect!(map.remove("write_bytes")),
cancelled_write_bytes: expect!(map.remove("cancelled_write_bytes")),
};
assert!(!cfg!(test) || map.is_empty(), "io map is not empty: {:#?}", map);
Ok(io)
}
}
/// Describes a file descriptor opened by a process.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum FDTarget {
/// A file or device
Path(PathBuf),
/// A socket type, with an inode
Socket(u64),
Net(u64),
Pipe(u64),
/// A file descriptor that have no corresponding inode.
AnonInode(String),
/// A memfd file descriptor with a name.
MemFD(String),
/// Some other file descriptor type, with an inode.
Other(String, u64),
}
impl FromStr for FDTarget {
type Err = ProcError;
fn from_str(s: &str) -> Result<FDTarget, ProcError> {
// helper function that removes the first and last character
fn strip_first_last(s: &str) -> ProcResult<&str> {
if s.len() > 2 {
let mut c = s.chars();
// remove the first and last characters
let _ = c.next();
let _ = c.next_back();
Ok(c.as_str())
} else {
Err(ProcError::Incomplete(None))
}
}
if !s.starts_with('/') && s.contains(':') {
let mut s = s.split(':');
let fd_type = expect!(s.next());
match fd_type {
"socket" => {
let inode = expect!(s.next(), "socket inode");
let inode = expect!(u64::from_str_radix(strip_first_last(inode)?, 10));
Ok(FDTarget::Socket(inode))
}
"net" => {
let inode = expect!(s.next(), "net inode");
let inode = expect!(u64::from_str_radix(strip_first_last(inode)?, 10));
Ok(FDTarget::Net(inode))
}
"pipe" => {
let inode = expect!(s.next(), "pipe inode");
let inode = expect!(u64::from_str_radix(strip_first_last(inode)?, 10));
Ok(FDTarget::Pipe(inode))
}
"anon_inode" => Ok(FDTarget::AnonInode(expect!(s.next(), "anon inode").to_string())),
"" => Err(ProcError::Incomplete(None)),
x => {
let inode = expect!(s.next(), "other inode");
let inode = expect!(u64::from_str_radix(strip_first_last(inode)?, 10));
Ok(FDTarget::Other(x.to_string(), inode))
}
}
} else if let Some(s) = s.strip_prefix("/memfd:") {
Ok(FDTarget::MemFD(s.to_string()))
} else {
Ok(FDTarget::Path(PathBuf::from(s)))
}
}
}
/// Provides information about memory usage, measured in pages.
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct StatM {
/// Total program size, measured in pages
///
/// (same as VmSize in /proc/\<pid\>/status)
pub size: u64,
/// Resident set size, measured in pages
///
/// (same as VmRSS in /proc/\<pid\>/status)
pub resident: u64,
/// number of resident shared pages (i.e., backed by a file)
///
/// (same as RssFile+RssShmem in /proc/\<pid\>/status)
pub shared: u64,
/// Text (code)
pub text: u64,
/// library (unused since Linux 2.6; always 0)
pub lib: u64,
/// data + stack
pub data: u64,
/// dirty pages (unused since Linux 2.6; always 0)
pub dt: u64,
}
impl crate::FromRead for StatM {
fn from_read<R: Read>(mut r: R) -> ProcResult<Self> {
let mut line = String::new();
r.read_to_string(&mut line)?;
let mut s = line.split_whitespace();
let size = expect!(from_iter(&mut s));
let resident = expect!(from_iter(&mut s));
let shared = expect!(from_iter(&mut s));
let text = expect!(from_iter(&mut s));
let lib = expect!(from_iter(&mut s));
let data = expect!(from_iter(&mut s));
let dt = expect!(from_iter(&mut s));
if cfg!(test) {
assert!(s.next().is_none());
}
Ok(StatM {
size,
resident,
shared,
text,
lib,
data,
dt,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_memory_map_permissions() {
use MMPermissions as P;
assert_eq!("rw-p".parse(), Ok(P::READ | P::WRITE | P::PRIVATE));
assert_eq!("r-xs".parse(), Ok(P::READ | P::EXECUTE | P::SHARED));
assert_eq!("----".parse(), Ok(P::NONE));
assert_eq!((P::READ | P::WRITE | P::PRIVATE).as_str(), "rw-p");
assert_eq!((P::READ | P::EXECUTE | P::SHARED).as_str(), "r-xs");
assert_eq!(P::NONE.as_str(), "----");
}
}

View File

@ -0,0 +1,648 @@
use bitflags::bitflags;
use crate::{from_iter, ProcResult};
use std::collections::HashMap;
use std::io::{BufRead, Lines};
use std::path::PathBuf;
use std::time::Duration;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
bitflags! {
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct NFSServerCaps: u32 {
const NFS_CAP_READDIRPLUS = 1;
const NFS_CAP_HARDLINKS = (1 << 1);
const NFS_CAP_SYMLINKS = (1 << 2);
const NFS_CAP_ACLS = (1 << 3);
const NFS_CAP_ATOMIC_OPEN = (1 << 4);
const NFS_CAP_LGOPEN = (1 << 5);
const NFS_CAP_FILEID = (1 << 6);
const NFS_CAP_MODE = (1 << 7);
const NFS_CAP_NLINK = (1 << 8);
const NFS_CAP_OWNER = (1 << 9);
const NFS_CAP_OWNER_GROUP = (1 << 10);
const NFS_CAP_ATIME = (1 << 11);
const NFS_CAP_CTIME = (1 << 12);
const NFS_CAP_MTIME = (1 << 13);
const NFS_CAP_POSIX_LOCK = (1 << 14);
const NFS_CAP_UIDGID_NOMAP = (1 << 15);
const NFS_CAP_STATEID_NFSV41 = (1 << 16);
const NFS_CAP_ATOMIC_OPEN_V1 = (1 << 17);
const NFS_CAP_SECURITY_LABEL = (1 << 18);
const NFS_CAP_SEEK = (1 << 19);
const NFS_CAP_ALLOCATE = (1 << 20);
const NFS_CAP_DEALLOCATE = (1 << 21);
const NFS_CAP_LAYOUTSTATS = (1 << 22);
const NFS_CAP_CLONE = (1 << 23);
const NFS_CAP_COPY = (1 << 24);
const NFS_CAP_OFFLOAD_CANCEL = (1 << 25);
}
}
/// Information about a all mounts in a process's mount namespace.
///
/// This data is taken from the `/proc/[pid]/mountinfo` file.
pub struct MountInfos(pub Vec<MountInfo>);
impl crate::FromBufRead for MountInfos {
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let lines = r.lines();
let mut vec = Vec::new();
for line in lines {
vec.push(MountInfo::from_line(&line?)?);
}
Ok(MountInfos(vec))
}
}
impl IntoIterator for MountInfos {
type IntoIter = std::vec::IntoIter<MountInfo>;
type Item = MountInfo;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a> IntoIterator for &'a MountInfos {
type IntoIter = std::slice::Iter<'a, MountInfo>;
type Item = &'a MountInfo;
fn into_iter(self) -> Self::IntoIter {
self.0.iter()
}
}
/// Information about a specific mount in a process's mount namespace.
///
/// This data is taken from the `/proc/[pid]/mountinfo` file.
///
/// For an example, see the
/// [mountinfo.rs](https://github.com/eminence/procfs/tree/master/procfs/examples) example in the
/// source repo.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountInfo {
/// Mount ID. A unique ID for the mount (but may be reused after `unmount`)
pub mnt_id: i32,
/// Parent mount ID. The ID of the parent mount (or of self for the root of the mount
/// namespace's mount tree).
///
/// If the parent mount point lies outside the process's root directory, the ID shown here
/// won't have a corresponding record in mountinfo whose mount ID matches this parent mount
/// ID (because mount points that lie outside the process's root directory are not shown in
/// mountinfo). As a special case of this point, the process's root mount point may have a
/// parent mount (for the initramfs filesystem) that lies outside the process's root
/// directory, and an entry for that mount point will not appear in mountinfo.
pub pid: i32,
/// The value of `st_dev` for files on this filesystem
pub majmin: String,
/// The pathname of the directory in the filesystem which forms the root of this mount.
pub root: String,
/// The pathname of the mount point relative to the process's root directory.
pub mount_point: PathBuf,
/// Per-mount options
pub mount_options: HashMap<String, Option<String>>,
/// Optional fields
pub opt_fields: Vec<MountOptFields>,
/// Filesystem type
pub fs_type: String,
/// Mount source
pub mount_source: Option<String>,
/// Per-superblock options.
pub super_options: HashMap<String, Option<String>>,
}
impl MountInfo {
pub fn from_line(line: &str) -> ProcResult<MountInfo> {
let mut split = line.split_whitespace();
let mnt_id = expect!(from_iter(&mut split));
let pid = expect!(from_iter(&mut split));
let majmin: String = expect!(from_iter(&mut split));
let root = expect!(from_iter(&mut split));
let mount_point = expect!(from_iter(&mut split));
let mount_options = {
let mut map = HashMap::new();
let all_opts = expect!(split.next());
for opt in all_opts.split(',') {
let mut s = opt.splitn(2, '=');
let opt_name = expect!(s.next());
map.insert(opt_name.to_owned(), s.next().map(|s| s.to_owned()));
}
map
};
let mut opt_fields = Vec::new();
loop {
let f = expect!(split.next());
if f == "-" {
break;
}
let mut s = f.split(':');
let opt = match expect!(s.next()) {
"shared" => {
let val = expect!(from_iter(&mut s));
MountOptFields::Shared(val)
}
"master" => {
let val = expect!(from_iter(&mut s));
MountOptFields::Master(val)
}
"propagate_from" => {
let val = expect!(from_iter(&mut s));
MountOptFields::PropagateFrom(val)
}
"unbindable" => MountOptFields::Unbindable,
_ => continue,
};
opt_fields.push(opt);
}
let fs_type: String = expect!(from_iter(&mut split));
let mount_source = match expect!(split.next()) {
"none" => None,
x => Some(x.to_owned()),
};
let super_options = {
let mut map = HashMap::new();
let all_opts = expect!(split.next());
for opt in all_opts.split(',') {
let mut s = opt.splitn(2, '=');
let opt_name = expect!(s.next());
map.insert(opt_name.to_owned(), s.next().map(|s| s.to_owned()));
}
map
};
Ok(MountInfo {
mnt_id,
pid,
majmin,
root,
mount_point,
mount_options,
opt_fields,
fs_type,
mount_source,
super_options,
})
}
}
/// Optional fields used in [MountInfo]
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum MountOptFields {
/// This mount point is shared in peer group. Each peer group has a unique ID that is
/// automatically generated by the kernel, and all mount points in the same peer group will
/// show the same ID
Shared(u32),
/// THis mount is a slave to the specified shared peer group.
Master(u32),
/// This mount is a slave and receives propagation from the shared peer group
PropagateFrom(u32),
/// This is an unbindable mount
Unbindable,
}
/// A single entry in [MountStats].
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountStat {
/// The name of the mounted device
pub device: Option<String>,
/// The mountpoint within the filesystem tree
pub mount_point: PathBuf,
/// The filesystem type
pub fs: String,
/// If the mount is NFS, this will contain various NFS statistics
pub statistics: Option<MountNFSStatistics>,
}
/// Mount information from `/proc/<pid>/mountstats`.
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountStats(pub Vec<MountStat>);
impl crate::FromBufRead for MountStats {
/// This should correspond to data in `/proc/<pid>/mountstats`.
fn from_buf_read<R: BufRead>(r: R) -> ProcResult<Self> {
let mut v = Vec::new();
let mut lines = r.lines();
while let Some(Ok(line)) = lines.next() {
if line.starts_with("device ") {
// line will be of the format:
// device proc mounted on /proc with fstype proc
let mut s = line.split_whitespace();
let device = Some(expect!(s.nth(1)).to_owned());
let mount_point = PathBuf::from(expect!(s.nth(2)));
let fs = expect!(s.nth(2)).to_owned();
let statistics = match s.next() {
Some(stats) if stats.starts_with("statvers=") => {
Some(MountNFSStatistics::from_lines(&mut lines, &stats[9..])?)
}
_ => None,
};
v.push(MountStat {
device,
mount_point,
fs,
statistics,
});
}
}
Ok(MountStats(v))
}
}
impl IntoIterator for MountStats {
type IntoIter = std::vec::IntoIter<MountStat>;
type Item = MountStat;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
/// Only NFS mounts provide additional statistics in `MountStat` entries.
//
// Thank you to Chris Siebenmann for their helpful work in documenting these structures:
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct MountNFSStatistics {
/// The version of the NFS statistics block. Either "1.0" or "1.1".
pub version: String,
/// The mount options.
///
/// The meaning of these can be found in the manual pages for mount(5) and nfs(5)
pub opts: Vec<String>,
/// Duration the NFS mount has been in existence.
pub age: Duration,
// * fsc (?)
// * impl_id (NFSv4): Option<HashMap<String, Some(String)>>
/// NFS Capabilities.
///
/// See `include/linux/nfs_fs_sb.h`
///
/// Some known values:
/// * caps: server capabilities. See [NFSServerCaps].
/// * wtmult: server disk block size
/// * dtsize: readdir size
/// * bsize: server block size
pub caps: Vec<String>,
// * nfsv4 (NFSv4): Option<HashMap<String, Some(String)>>
pub sec: Vec<String>,
pub events: NFSEventCounter,
pub bytes: NFSByteCounter,
// * RPC iostats version:
// * xprt
// * per-op statistics
pub per_op_stats: NFSPerOpStats,
}
impl MountNFSStatistics {
// Keep reading lines until we get to a blank line
fn from_lines<B: BufRead>(r: &mut Lines<B>, statsver: &str) -> ProcResult<MountNFSStatistics> {
let mut parsing_per_op = false;
let mut opts: Option<Vec<String>> = None;
let mut age = None;
let mut caps = None;
let mut sec = None;
let mut bytes = None;
let mut events = None;
let mut per_op = HashMap::new();
while let Some(Ok(line)) = r.next() {
let line = line.trim();
if line.trim() == "" {
break;
}
if !parsing_per_op {
if let Some(stripped) = line.strip_prefix("opts:") {
opts = Some(stripped.trim().split(',').map(|s| s.to_string()).collect());
} else if let Some(stripped) = line.strip_prefix("age:") {
age = Some(Duration::from_secs(from_str!(u64, stripped.trim())));
} else if let Some(stripped) = line.strip_prefix("caps:") {
caps = Some(stripped.trim().split(',').map(|s| s.to_string()).collect());
} else if let Some(stripped) = line.strip_prefix("sec:") {
sec = Some(stripped.trim().split(',').map(|s| s.to_string()).collect());
} else if let Some(stripped) = line.strip_prefix("bytes:") {
bytes = Some(NFSByteCounter::from_str(stripped.trim())?);
} else if let Some(stripped) = line.strip_prefix("events:") {
events = Some(NFSEventCounter::from_str(stripped.trim())?);
}
if line == "per-op statistics" {
parsing_per_op = true;
}
} else {
let mut split = line.split(':');
let name = expect!(split.next()).to_string();
let stats = NFSOperationStat::from_str(expect!(split.next()))?;
per_op.insert(name, stats);
}
}
Ok(MountNFSStatistics {
version: statsver.to_string(),
opts: expect!(opts, "Failed to find opts field in nfs stats"),
age: expect!(age, "Failed to find age field in nfs stats"),
caps: expect!(caps, "Failed to find caps field in nfs stats"),
sec: expect!(sec, "Failed to find sec field in nfs stats"),
events: expect!(events, "Failed to find events section in nfs stats"),
bytes: expect!(bytes, "Failed to find bytes section in nfs stats"),
per_op_stats: per_op,
})
}
/// Attempts to parse the caps= value from the [caps](struct.MountNFSStatistics.html#structfield.caps) field.
pub fn server_caps(&self) -> ProcResult<Option<NFSServerCaps>> {
for data in &self.caps {
if let Some(stripped) = data.strip_prefix("caps=0x") {
let val = from_str!(u32, stripped, 16);
return Ok(NFSServerCaps::from_bits(val));
}
}
Ok(None)
}
}
/// Represents NFS data from `/proc/<pid>/mountstats` under the section `events`.
///
/// The underlying data structure in the kernel can be found under *fs/nfs/iostat.h* `nfs_iostat`.
/// The fields are documented in the kernel source only under *include/linux/nfs_iostat.h* `enum
/// nfs_stat_eventcounters`.
#[derive(Debug, Copy, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSEventCounter {
pub inode_revalidate: u64,
pub deny_try_revalidate: u64,
pub data_invalidate: u64,
pub attr_invalidate: u64,
pub vfs_open: u64,
pub vfs_lookup: u64,
pub vfs_access: u64,
pub vfs_update_page: u64,
pub vfs_read_page: u64,
pub vfs_read_pages: u64,
pub vfs_write_page: u64,
pub vfs_write_pages: u64,
pub vfs_get_dents: u64,
pub vfs_set_attr: u64,
pub vfs_flush: u64,
pub vfs_fs_sync: u64,
pub vfs_lock: u64,
pub vfs_release: u64,
pub congestion_wait: u64,
pub set_attr_trunc: u64,
pub extend_write: u64,
pub silly_rename: u64,
pub short_read: u64,
pub short_write: u64,
pub delay: u64,
pub pnfs_read: u64,
pub pnfs_write: u64,
}
impl NFSEventCounter {
fn from_str(s: &str) -> ProcResult<NFSEventCounter> {
let mut s = s.split_whitespace();
Ok(NFSEventCounter {
inode_revalidate: from_str!(u64, expect!(s.next())),
deny_try_revalidate: from_str!(u64, expect!(s.next())),
data_invalidate: from_str!(u64, expect!(s.next())),
attr_invalidate: from_str!(u64, expect!(s.next())),
vfs_open: from_str!(u64, expect!(s.next())),
vfs_lookup: from_str!(u64, expect!(s.next())),
vfs_access: from_str!(u64, expect!(s.next())),
vfs_update_page: from_str!(u64, expect!(s.next())),
vfs_read_page: from_str!(u64, expect!(s.next())),
vfs_read_pages: from_str!(u64, expect!(s.next())),
vfs_write_page: from_str!(u64, expect!(s.next())),
vfs_write_pages: from_str!(u64, expect!(s.next())),
vfs_get_dents: from_str!(u64, expect!(s.next())),
vfs_set_attr: from_str!(u64, expect!(s.next())),
vfs_flush: from_str!(u64, expect!(s.next())),
vfs_fs_sync: from_str!(u64, expect!(s.next())),
vfs_lock: from_str!(u64, expect!(s.next())),
vfs_release: from_str!(u64, expect!(s.next())),
congestion_wait: from_str!(u64, expect!(s.next())),
set_attr_trunc: from_str!(u64, expect!(s.next())),
extend_write: from_str!(u64, expect!(s.next())),
silly_rename: from_str!(u64, expect!(s.next())),
short_read: from_str!(u64, expect!(s.next())),
short_write: from_str!(u64, expect!(s.next())),
delay: from_str!(u64, expect!(s.next())),
pnfs_read: from_str!(u64, expect!(s.next())),
pnfs_write: from_str!(u64, expect!(s.next())),
})
}
}
/// Represents NFS data from `/proc/<pid>/mountstats` under the section `bytes`.
///
/// The underlying data structure in the kernel can be found under *fs/nfs/iostat.h* `nfs_iostat`.
/// The fields are documented in the kernel source only under *include/linux/nfs_iostat.h* `enum
/// nfs_stat_bytecounters`
#[derive(Debug, Copy, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSByteCounter {
pub normal_read: u64,
pub normal_write: u64,
pub direct_read: u64,
pub direct_write: u64,
pub server_read: u64,
pub server_write: u64,
pub pages_read: u64,
pub pages_write: u64,
}
impl NFSByteCounter {
fn from_str(s: &str) -> ProcResult<NFSByteCounter> {
let mut s = s.split_whitespace();
Ok(NFSByteCounter {
normal_read: from_str!(u64, expect!(s.next())),
normal_write: from_str!(u64, expect!(s.next())),
direct_read: from_str!(u64, expect!(s.next())),
direct_write: from_str!(u64, expect!(s.next())),
server_read: from_str!(u64, expect!(s.next())),
server_write: from_str!(u64, expect!(s.next())),
pages_read: from_str!(u64, expect!(s.next())),
pages_write: from_str!(u64, expect!(s.next())),
})
}
}
/// Represents NFS data from `/proc/<pid>/mountstats` under the section of `per-op statistics`.
///
/// Here is what the Kernel says about the attributes:
///
/// Regarding `operations`, `transmissions` and `major_timeouts`:
///
/// > These counters give an idea about how many request
/// > transmissions are required, on average, to complete that
/// > particular procedure. Some procedures may require more
/// > than one transmission because the server is unresponsive,
/// > the client is retransmitting too aggressively, or the
/// > requests are large and the network is congested.
///
/// Regarding `bytes_sent` and `bytes_recv`:
///
/// > These count how many bytes are sent and received for a
/// > given RPC procedure type. This indicates how much load a
/// > particular procedure is putting on the network. These
/// > counts include the RPC and ULP headers, and the request
/// > payload.
///
/// Regarding `cum_queue_time`, `cum_resp_time` and `cum_total_req_time`:
///
/// > The length of time an RPC request waits in queue before
/// > transmission, the network + server latency of the request,
/// > and the total time the request spent from init to release
/// > are measured.
///
/// (source: *include/linux/sunrpc/metrics.h* `struct rpc_iostats`)
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct NFSOperationStat {
/// Count of rpc operations.
pub operations: u64,
/// Count of rpc transmissions
pub transmissions: u64,
/// Count of rpc major timeouts
pub major_timeouts: u64,
/// Count of bytes send. Does not only include the RPC payload but the RPC headers as well.
pub bytes_sent: u64,
/// Count of bytes received as `bytes_sent`.
pub bytes_recv: u64,
/// How long all requests have spend in the queue before being send.
pub cum_queue_time: Duration,
/// How long it took to get a response back.
pub cum_resp_time: Duration,
/// How long all requests have taken from beeing queued to the point they where completely
/// handled.
pub cum_total_req_time: Duration,
}
impl NFSOperationStat {
fn from_str(s: &str) -> ProcResult<NFSOperationStat> {
let mut s = s.split_whitespace();
let operations = from_str!(u64, expect!(s.next()));
let transmissions = from_str!(u64, expect!(s.next()));
let major_timeouts = from_str!(u64, expect!(s.next()));
let bytes_sent = from_str!(u64, expect!(s.next()));
let bytes_recv = from_str!(u64, expect!(s.next()));
let cum_queue_time_ms = from_str!(u64, expect!(s.next()));
let cum_resp_time_ms = from_str!(u64, expect!(s.next()));
let cum_total_req_time_ms = from_str!(u64, expect!(s.next()));
Ok(NFSOperationStat {
operations,
transmissions,
major_timeouts,
bytes_sent,
bytes_recv,
cum_queue_time: Duration::from_millis(cum_queue_time_ms),
cum_resp_time: Duration::from_millis(cum_resp_time_ms),
cum_total_req_time: Duration::from_millis(cum_total_req_time_ms),
})
}
}
pub type NFSPerOpStats = HashMap<String, NFSOperationStat>;
#[cfg(test)]
mod tests {
use super::*;
use crate::FromRead;
use std::time::Duration;
#[test]
fn test_mountinfo() {
let s = "25 0 8:1 / / rw,relatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro";
let stat = MountInfo::from_line(s).unwrap();
println!("{:?}", stat);
}
#[test]
fn test_proc_mountstats() {
let MountStats(simple) = FromRead::from_read(
"device /dev/md127 mounted on /boot with fstype ext2
device /dev/md124 mounted on /home with fstype ext4
device tmpfs mounted on /run/user/0 with fstype tmpfs
"
.as_bytes(),
)
.unwrap();
let simple_parsed = vec![
MountStat {
device: Some("/dev/md127".to_string()),
mount_point: PathBuf::from("/boot"),
fs: "ext2".to_string(),
statistics: None,
},
MountStat {
device: Some("/dev/md124".to_string()),
mount_point: PathBuf::from("/home"),
fs: "ext4".to_string(),
statistics: None,
},
MountStat {
device: Some("tmpfs".to_string()),
mount_point: PathBuf::from("/run/user/0"),
fs: "tmpfs".to_string(),
statistics: None,
},
];
assert_eq!(simple, simple_parsed);
let MountStats(mountstats) = FromRead::from_read("device elwe:/space mounted on /srv/elwe/space with fstype nfs4 statvers=1.1
opts: rw,vers=4.1,rsize=131072,wsize=131072,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=krb5,clientaddr=10.0.1.77,local_lock=none
age: 3542
impl_id: name='',domain='',date='0,0'
caps: caps=0x3ffdf,wtmult=512,dtsize=32768,bsize=0,namlen=255
nfsv4: bm0=0xfdffbfff,bm1=0x40f9be3e,bm2=0x803,acl=0x3,sessions,pnfs=not configured
sec: flavor=6,pseudoflavor=390003
events: 114 1579 5 3 132 20 3019 1 2 3 4 5 115 1 4 1 2 4 3 4 5 6 7 8 9 0 1
bytes: 1 2 3 4 5 6 7 8
RPC iostats version: 1.0 p/v: 100003/4 (nfs)
xprt: tcp 909 0 1 0 2 294 294 0 294 0 2 0 0
per-op statistics
NULL: 0 0 0 0 0 0 0 0
READ: 1 2 3 4 5 6 7 8
WRITE: 0 0 0 0 0 0 0 0
COMMIT: 0 0 0 0 0 0 0 0
OPEN: 1 1 0 320 420 0 124 124
".as_bytes()).unwrap();
let nfs_v4 = &mountstats[0];
match &nfs_v4.statistics {
Some(stats) => {
assert_eq!("1.1".to_string(), stats.version, "mountstats version wrongly parsed.");
assert_eq!(Duration::from_secs(3542), stats.age);
assert_eq!(1, stats.bytes.normal_read);
assert_eq!(114, stats.events.inode_revalidate);
assert!(stats.server_caps().unwrap().is_some());
}
None => {
panic!("Failed to retrieve nfs statistics");
}
}
}
}

View File

@ -0,0 +1,32 @@
use std::collections::HashMap;
use std::ffi::OsString;
use std::path::PathBuf;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Information about a namespace
#[derive(Debug, Clone, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Namespace {
/// Namespace type
pub ns_type: OsString,
/// Handle to the namespace
pub path: PathBuf,
/// Namespace identifier (inode number)
pub identifier: u64,
/// Device id of the namespace
pub device_id: u64,
}
impl PartialEq for Namespace {
fn eq(&self, other: &Self) -> bool {
// see https://lore.kernel.org/lkml/87poky5ca9.fsf@xmission.com/
self.identifier == other.identifier && self.device_id == other.device_id
}
}
/// All namespaces of a process.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Namespaces(pub HashMap<OsString, Namespace>);

View File

@ -0,0 +1,163 @@
use bitflags::bitflags;
use std::{fmt, mem::size_of};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
const fn genmask(high: usize, low: usize) -> u64 {
let mask_bits = size_of::<u64>() * 8;
(!0 - (1 << low) + 1) & (!0 >> (mask_bits - 1 - high))
}
// source: include/linux/swap.h
const MAX_SWAPFILES_SHIFT: usize = 5;
// source: fs/proc/task_mmu.c
bitflags! {
/// Represents the fields and flags in a page table entry for a swapped page.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct SwapPageFlags: u64 {
/// Swap type if swapped
#[doc(hidden)]
const SWAP_TYPE = genmask(MAX_SWAPFILES_SHIFT - 1, 0);
/// Swap offset if swapped
#[doc(hidden)]
const SWAP_OFFSET = genmask(54, MAX_SWAPFILES_SHIFT);
/// PTE is soft-dirty
const SOFT_DIRTY = 1 << 55;
/// Page is exclusively mapped
const MMAP_EXCLUSIVE = 1 << 56;
/// Page is file-page or shared-anon
const FILE = 1 << 61;
/// Page is swapped
#[doc(hidden)]
const SWAP = 1 << 62;
/// Page is present
const PRESENT = 1 << 63;
}
}
impl SwapPageFlags {
/// Returns the swap type recorded in this entry.
pub fn get_swap_type(&self) -> u64 {
(*self & Self::SWAP_TYPE).bits()
}
/// Returns the swap offset recorded in this entry.
pub fn get_swap_offset(&self) -> u64 {
(*self & Self::SWAP_OFFSET).bits() >> MAX_SWAPFILES_SHIFT
}
}
bitflags! {
/// Represents the fields and flags in a page table entry for a memory page.
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct MemoryPageFlags: u64 {
/// Page frame number if present
#[doc(hidden)]
const PFN = genmask(54, 0);
/// PTE is soft-dirty
const SOFT_DIRTY = 1 << 55;
/// Page is exclusively mapped
const MMAP_EXCLUSIVE = 1 << 56;
/// Page is file-page or shared-anon
const FILE = 1 << 61;
/// Page is swapped
#[doc(hidden)]
const SWAP = 1 << 62;
/// Page is present
const PRESENT = 1 << 63;
}
}
impl MemoryPageFlags {
/// Returns the page frame number recorded in this entry.
pub fn get_page_frame_number(&self) -> Pfn {
Pfn((*self & Self::PFN).bits())
}
}
/// A Page Frame Number, representing a 4 kiB physical memory page
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Pfn(pub u64);
impl fmt::UpperHex for Pfn {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let val = self.0;
fmt::UpperHex::fmt(&val, f)
}
}
impl fmt::LowerHex for Pfn {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let val = self.0;
fmt::LowerHex::fmt(&val, f)
}
}
/// Represents a page table entry in `/proc/<pid>/pagemap`.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum PageInfo {
/// Entry referring to a memory page
MemoryPage(MemoryPageFlags),
/// Entry referring to a swapped page
SwapPage(SwapPageFlags),
}
impl PageInfo {
pub fn parse_info(info: u64) -> Self {
let flags = MemoryPageFlags::from_bits_retain(info);
if flags.contains(MemoryPageFlags::SWAP) {
Self::SwapPage(SwapPageFlags::from_bits_retain(info))
} else {
Self::MemoryPage(flags)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_genmask() {
let mask = genmask(3, 1);
assert_eq!(mask, 0b1110);
let mask = genmask(3, 0);
assert_eq!(mask, 0b1111);
let mask = genmask(63, 62);
assert_eq!(mask, 0b11 << 62);
}
#[test]
fn test_page_info() {
let pagemap_entry: u64 = 0b1000000110000000000000000000000000000000000000000000000000000011;
let info = PageInfo::parse_info(pagemap_entry);
if let PageInfo::MemoryPage(memory_flags) = info {
assert!(memory_flags
.contains(MemoryPageFlags::PRESENT | MemoryPageFlags::MMAP_EXCLUSIVE | MemoryPageFlags::SOFT_DIRTY));
assert_eq!(memory_flags.get_page_frame_number(), Pfn(0b11));
} else {
panic!("Wrong SWAP decoding");
}
let pagemap_entry: u64 = 0b1100000110000000000000000000000000000000000000000000000001100010;
let info = PageInfo::parse_info(pagemap_entry);
if let PageInfo::SwapPage(swap_flags) = info {
assert!(
swap_flags.contains(SwapPageFlags::PRESENT | SwapPageFlags::MMAP_EXCLUSIVE | SwapPageFlags::SOFT_DIRTY)
);
assert_eq!(swap_flags.get_swap_type(), 0b10);
assert_eq!(swap_flags.get_swap_offset(), 0b11);
} else {
panic!("Wrong SWAP decoding");
}
}
}

View File

@ -0,0 +1,42 @@
use crate::from_iter;
use crate::ProcResult;
use std::io::Read;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Provides scheduler statistics of the process, based on the `/proc/<pid>/schedstat` file.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Schedstat {
/// Time spent on the cpu.
///
/// Measured in nanoseconds.
pub sum_exec_runtime: u64,
/// Time spent waiting on a runqueue.
///
/// Measured in nanoseconds.
pub run_delay: u64,
/// \# of timeslices run on this cpu.
pub pcount: u64,
}
impl crate::FromRead for Schedstat {
fn from_read<R: Read>(mut r: R) -> ProcResult<Self> {
let mut line = String::new();
r.read_to_string(&mut line)?;
let mut s = line.split_whitespace();
let schedstat = Schedstat {
sum_exec_runtime: expect!(from_iter(&mut s)),
run_delay: expect!(from_iter(&mut s)),
pcount: expect!(from_iter(&mut s)),
};
if cfg!(test) {
assert!(s.next().is_none());
}
Ok(schedstat)
}
}

View File

@ -0,0 +1,13 @@
use super::MemoryMaps;
use crate::ProcResult;
#[derive(Debug)]
pub struct SmapsRollup {
pub memory_map_rollup: MemoryMaps,
}
impl crate::FromBufRead for SmapsRollup {
fn from_buf_read<R: std::io::BufRead>(r: R) -> ProcResult<Self> {
MemoryMaps::from_buf_read(r).map(|m| SmapsRollup { memory_map_rollup: m })
}
}

View File

@ -0,0 +1,411 @@
use super::ProcState;
use super::StatFlags;
use crate::{from_iter, from_iter_optional, ProcResult};
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
use std::io::Read;
use std::str::FromStr;
/// Status information about the process, based on the `/proc/<pid>/stat` file.
///
/// Not all fields are available in every kernel. These fields have `Option<T>` types.
///
/// New fields to this struct may be added at any time (even without a major or minor semver bump).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub struct Stat {
/// The process ID.
pub pid: i32,
/// The filename of the executable, without the parentheses.
///
/// This is visible whether or not the executable is swapped out.
///
/// Note that if the actual comm field contains invalid UTF-8 characters, they will be replaced
/// here by the U+FFFD replacement character.
pub comm: String,
/// Process State.
///
/// See [state()](#method.state) to get the process state as an enum.
pub state: char,
/// The PID of the parent of this process.
pub ppid: i32,
/// The process group ID of the process.
pub pgrp: i32,
/// The session ID of the process.
pub session: i32,
/// The controlling terminal of the process.
///
/// The minor device number is contained in the combination of bits 31 to 20 and 7 to 0;
/// the major device number is in bits 15 to 8.
///
/// See [tty_nr()](#method.tty_nr) to get this value decoded into a (major, minor) tuple
pub tty_nr: i32,
/// The ID of the foreground process group of the controlling terminal of the process.
pub tpgid: i32,
/// The kernel flags word of the process.
///
/// For bit meanings, see the PF_* defines in the Linux kernel source file
/// [`include/linux/sched.h`](https://github.com/torvalds/linux/blob/master/include/linux/sched.h).
///
/// See [flags()](#method.flags) to get a [`StatFlags`](struct.StatFlags.html) bitfield object.
pub flags: u32,
/// The number of minor faults the process has made which have not required loading a memory
/// page from disk.
pub minflt: u64,
/// The number of minor faults that the process's waited-for children have made.
pub cminflt: u64,
/// The number of major faults the process has made which have required loading a memory page
/// from disk.
pub majflt: u64,
/// The number of major faults that the process's waited-for children have made.
pub cmajflt: u64,
/// Amount of time that this process has been scheduled in user mode, measured in clock ticks
/// (divide by `ticks_per_second()`).
///
/// This includes guest time, guest_time (time spent running a virtual CPU, see below), so that
/// applications that are not aware of the guest time field do not lose that time from their
/// calculations.
pub utime: u64,
/// Amount of time that this process has been scheduled in kernel mode, measured in clock ticks
/// (divide by `ticks_per_second()`).
pub stime: u64,
/// Amount of time that this process's waited-for children have been scheduled in
/// user mode, measured in clock ticks (divide by `ticks_per_second()`).
///
/// This includes guest time, cguest_time (time spent running a virtual CPU, see below).
pub cutime: i64,
/// Amount of time that this process's waited-for children have been scheduled in kernel
/// mode, measured in clock ticks (divide by `ticks_per_second()`).
pub cstime: i64,
/// For processes running a real-time scheduling policy (policy below; see sched_setscheduler(2)),
/// this is the negated scheduling priority, minus one;
///
/// That is, a number in the range -2 to -100,
/// corresponding to real-time priority 1 to 99. For processes running under a non-real-time
/// scheduling policy, this is the raw nice value (setpriority(2)) as represented in the kernel.
/// The kernel stores nice values as numbers in the range 0 (high) to 39 (low), corresponding
/// to the user-visible nice range of -20 to 19.
/// (This explanation is for Linux 2.6)
///
/// Before Linux 2.6, this was a scaled value based on the scheduler weighting given to this process.
pub priority: i64,
/// The nice value (see `setpriority(2)`), a value in the range 19 (low priority) to -20 (high priority).
pub nice: i64,
/// Number of threads in this process (since Linux 2.6). Before kernel 2.6, this field was
/// hard coded to 0 as a placeholder for an earlier removed field.
pub num_threads: i64,
/// The time in jiffies before the next SIGALRM is sent to the process due to an interval
/// timer.
///
/// Since kernel 2.6.17, this field is no longer maintained, and is hard coded as 0.
pub itrealvalue: i64,
/// The time the process started after system boot.
///
/// In kernels before Linux 2.6, this value was expressed in jiffies. Since Linux 2.6, the
/// value is expressed in clock ticks (divide by `sysconf(_SC_CLK_TCK)`).
///
#[cfg_attr(
feature = "chrono",
doc = "See also the [Stat::starttime()] method to get the starttime as a `DateTime` object"
)]
#[cfg_attr(
not(feature = "chrono"),
doc = "If you compile with the optional `chrono` feature, you can use the `starttime()` method to get the starttime as a `DateTime` object"
)]
pub starttime: u64,
/// Virtual memory size in bytes.
pub vsize: u64,
/// Resident Set Size: number of pages the process has in real memory.
///
/// This is just the pages which count toward text, data, or stack space.
/// This does not include pages which have not been demand-loaded in, or which are swapped out.
pub rss: u64,
/// Current soft limit in bytes on the rss of the process; see the description of RLIMIT_RSS in
/// getrlimit(2).
pub rsslim: u64,
/// The address above which program text can run.
pub startcode: u64,
/// The address below which program text can run.
pub endcode: u64,
/// The address of the start (i.e., bottom) of the stack.
pub startstack: u64,
/// The current value of ESP (stack pointer), as found in the kernel stack page for the
/// process.
pub kstkesp: u64,
/// The current EIP (instruction pointer).
pub kstkeip: u64,
/// The bitmap of pending signals, displayed as a decimal number. Obsolete, because it does
/// not provide information on real-time signals; use `/proc/<pid>/status` instead.
pub signal: u64,
/// The bitmap of blocked signals, displayed as a decimal number. Obsolete, because it does
/// not provide information on real-time signals; use `/proc/<pid>/status` instead.
pub blocked: u64,
/// The bitmap of ignored signals, displayed as a decimal number. Obsolete, because it does
/// not provide information on real-time signals; use `/proc/<pid>/status` instead.
pub sigignore: u64,
/// The bitmap of caught signals, displayed as a decimal number. Obsolete, because it does not
/// provide information on real-time signals; use `/proc/<pid>/status` instead.
pub sigcatch: u64,
/// This is the "channel" in which the process is waiting. It is the address of a location
/// in the kernel where the process is sleeping. The corresponding symbolic name can be found in
/// `/proc/<pid>/wchan`.
pub wchan: u64,
/// Number of pages swapped **(not maintained)**.
pub nswap: u64,
/// Cumulative nswap for child processes **(not maintained)**.
pub cnswap: u64,
/// Signal to be sent to parent when we die.
///
/// (since Linux 2.1.22)
pub exit_signal: Option<i32>,
/// CPU number last executed on.
///
/// (since Linux 2.2.8)
pub processor: Option<i32>,
/// Real-time scheduling priority
///
/// Real-time scheduling priority, a number in the range 1 to 99 for processes scheduled under a real-time policy, or 0, for non-real-time processes
///
/// (since Linux 2.5.19)
pub rt_priority: Option<u32>,
/// Scheduling policy (see sched_setscheduler(2)).
///
/// Decode using the `SCHED_*` constants in `linux/sched.h`.
///
/// (since Linux 2.5.19)
pub policy: Option<u32>,
/// Aggregated block I/O delays, measured in clock ticks (centiseconds).
///
/// (since Linux 2.6.18)
pub delayacct_blkio_ticks: Option<u64>,
/// Guest time of the process (time spent running a virtual CPU for a guest operating system),
/// measured in clock ticks (divide by `ticks_per_second()`)
///
/// (since Linux 2.6.24)
pub guest_time: Option<u64>,
/// Guest time of the process's children, measured in clock ticks (divide by
/// `ticks_per_second()`).
///
/// (since Linux 2.6.24)
pub cguest_time: Option<i64>,
/// Address above which program initialized and uninitialized (BSS) data are placed.
///
/// (since Linux 3.3)
pub start_data: Option<u64>,
/// Address below which program initialized and uninitialized (BSS) data are placed.
///
/// (since Linux 3.3)
pub end_data: Option<u64>,
/// Address above which program heap can be expanded with brk(2).
///
/// (since Linux 3.3)
pub start_brk: Option<u64>,
/// Address above which program command-line arguments (argv) are placed.
///
/// (since Linux 3.5)
pub arg_start: Option<u64>,
/// Address below program command-line arguments (argv) are placed.
///
/// (since Linux 3.5)
pub arg_end: Option<u64>,
/// Address above which program environment is placed.
///
/// (since Linux 3.5)
pub env_start: Option<u64>,
/// Address below which program environment is placed.
///
/// (since Linux 3.5)
pub env_end: Option<u64>,
/// The thread's exit status in the form reported by waitpid(2).
///
/// (since Linux 3.5)
pub exit_code: Option<i32>,
}
impl crate::FromRead for Stat {
#[allow(clippy::cognitive_complexity)]
fn from_read<R: Read>(mut r: R) -> ProcResult<Self> {
// read in entire thing, this is only going to be 1 line
let mut buf = Vec::with_capacity(512);
r.read_to_end(&mut buf)?;
let line = String::from_utf8_lossy(&buf);
let buf = line.trim();
// find the first opening paren, and split off the first part (pid)
let start_paren = expect!(buf.find('('));
let end_paren = expect!(buf.rfind(')'));
let pid_s = &buf[..start_paren - 1];
let comm = buf[start_paren + 1..end_paren].to_string();
let rest = &buf[end_paren + 2..];
let pid = expect!(FromStr::from_str(pid_s));
let mut rest = rest.split(' ');
let state = expect!(expect!(rest.next()).chars().next());
let ppid = expect!(from_iter(&mut rest));
let pgrp = expect!(from_iter(&mut rest));
let session = expect!(from_iter(&mut rest));
let tty_nr = expect!(from_iter(&mut rest));
let tpgid = expect!(from_iter(&mut rest));
let flags = expect!(from_iter(&mut rest));
let minflt = expect!(from_iter(&mut rest));
let cminflt = expect!(from_iter(&mut rest));
let majflt = expect!(from_iter(&mut rest));
let cmajflt = expect!(from_iter(&mut rest));
let utime = expect!(from_iter(&mut rest));
let stime = expect!(from_iter(&mut rest));
let cutime = expect!(from_iter(&mut rest));
let cstime = expect!(from_iter(&mut rest));
let priority = expect!(from_iter(&mut rest));
let nice = expect!(from_iter(&mut rest));
let num_threads = expect!(from_iter(&mut rest));
let itrealvalue = expect!(from_iter(&mut rest));
let starttime = expect!(from_iter(&mut rest));
let vsize = expect!(from_iter(&mut rest));
let rss = expect!(from_iter(&mut rest));
let rsslim = expect!(from_iter(&mut rest));
let startcode = expect!(from_iter(&mut rest));
let endcode = expect!(from_iter(&mut rest));
let startstack = expect!(from_iter(&mut rest));
let kstkesp = expect!(from_iter(&mut rest));
let kstkeip = expect!(from_iter(&mut rest));
let signal = expect!(from_iter(&mut rest));
let blocked = expect!(from_iter(&mut rest));
let sigignore = expect!(from_iter(&mut rest));
let sigcatch = expect!(from_iter(&mut rest));
let wchan = expect!(from_iter(&mut rest));
let nswap = expect!(from_iter(&mut rest));
let cnswap = expect!(from_iter(&mut rest));
// Since 2.1.22
let exit_signal = expect!(from_iter_optional(&mut rest));
// Since 2.2.8
let processor = expect!(from_iter_optional(&mut rest));
// Since 2.5.19
let rt_priority = expect!(from_iter_optional(&mut rest));
let policy = expect!(from_iter_optional(&mut rest));
// Since 2.6.18
let delayacct_blkio_ticks = expect!(from_iter_optional(&mut rest));
// Since 2.6.24
let guest_time = expect!(from_iter_optional(&mut rest));
let cguest_time = expect!(from_iter_optional(&mut rest));
// Since 3.3.0
let start_data = expect!(from_iter_optional(&mut rest));
let end_data = expect!(from_iter_optional(&mut rest));
let start_brk = expect!(from_iter_optional(&mut rest));
// Since 3.5.0
let arg_start = expect!(from_iter_optional(&mut rest));
let arg_end = expect!(from_iter_optional(&mut rest));
let env_start = expect!(from_iter_optional(&mut rest));
let env_end = expect!(from_iter_optional(&mut rest));
let exit_code = expect!(from_iter_optional(&mut rest));
Ok(Stat {
pid,
comm,
state,
ppid,
pgrp,
session,
tty_nr,
tpgid,
flags,
minflt,
cminflt,
majflt,
cmajflt,
utime,
stime,
cutime,
cstime,
priority,
nice,
num_threads,
itrealvalue,
starttime,
vsize,
rss,
rsslim,
startcode,
endcode,
startstack,
kstkesp,
kstkeip,
signal,
blocked,
sigignore,
sigcatch,
wchan,
nswap,
cnswap,
exit_signal,
processor,
rt_priority,
policy,
delayacct_blkio_ticks,
guest_time,
cguest_time,
start_data,
end_data,
start_brk,
arg_start,
arg_end,
env_start,
env_end,
exit_code,
})
}
}
impl Stat {
pub fn state(&self) -> ProcResult<ProcState> {
ProcState::from_char(self.state)
.ok_or_else(|| build_internal_error!(format!("{:?} is not a recognized process state", self.state)))
}
pub fn tty_nr(&self) -> (i32, i32) {
// minor is bits 31-20 and 7-0
// major is 15-8
// mmmmmmmmmmmm____MMMMMMMMmmmmmmmm
// 11111111111100000000000000000000
let major = (self.tty_nr & 0xfff00) >> 8;
let minor = (self.tty_nr & 0x000ff) | ((self.tty_nr >> 12) & 0xfff00);
(major, minor)
}
/// The kernel flags word of the process, as a bitfield
///
/// See also the [Stat::flags](struct.Stat.html#structfield.flags) field.
pub fn flags(&self) -> ProcResult<StatFlags> {
StatFlags::from_bits(self.flags)
.ok_or_else(|| build_internal_error!(format!("Can't construct flags bitfield from {:?}", self.flags)))
}
/// Get the starttime of the process as a `DateTime` object.
///
/// See also the [`starttime`](struct.Stat.html#structfield.starttime) field.
///
/// This function requires the "chrono" features to be enabled (which it is by default).
#[cfg(feature = "chrono")]
pub fn starttime(&self) -> impl crate::WithSystemInfo<Output = ProcResult<chrono::DateTime<chrono::Local>>> {
move |si: &crate::SystemInfo| {
let seconds_since_boot = self.starttime as f32 / si.ticks_per_second() as f32;
Ok(si.boot_time()? + chrono::Duration::milliseconds((seconds_since_boot * 1000.0) as i64))
}
}
/// Gets the Resident Set Size (in bytes)
///
/// The `rss` field will return the same value in pages
pub fn rss_bytes(&self) -> impl crate::WithSystemInfo<Output = u64> {
move |si: &crate::SystemInfo| self.rss * si.page_size()
}
}

View File

@ -0,0 +1,340 @@
use crate::{FromStrRadix, ProcResult};
use std::collections::HashMap;
use std::io::BufRead;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// Status information about the process, based on the `/proc/<pid>/status` file.
///
/// Not all fields are available in every kernel. These fields have `Option<T>` types.
/// In general, the current kernel version will tell you what fields you can expect, but this
/// isn't totally reliable, since some kernels might backport certain fields, or fields might
/// only be present if certain kernel configuration options are enabled. Be prepared to
/// handle `None` values.
///
/// New fields to this struct may be added at any time (even without a major or minor semver bump).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub struct Status {
/// Command run by this process.
pub name: String,
/// Process umask, expressed in octal with a leading zero; see umask(2). (Since Linux 4.7.)
pub umask: Option<u32>,
/// Current state of the process.
pub state: String,
/// Thread group ID (i.e., Process ID).
pub tgid: i32,
/// NUMA group ID (0 if none; since Linux 3.13).
pub ngid: Option<i32>,
/// Thread ID (see gettid(2)).
pub pid: i32,
/// PID of parent process.
pub ppid: i32,
/// PID of process tracing this process (0 if not being traced).
pub tracerpid: i32,
/// Real UID.
pub ruid: u32,
/// Effective UID.
pub euid: u32,
/// Saved set UID.
pub suid: u32,
/// Filesystem UID.
pub fuid: u32,
/// Real GID.
pub rgid: u32,
/// Effective GID.
pub egid: u32,
/// Saved set GID.
pub sgid: u32,
/// Filesystem GID.
pub fgid: u32,
/// Number of file descriptor slots currently allocated.
pub fdsize: u32,
/// Supplementary group list.
pub groups: Vec<i32>,
/// Thread group ID (i.e., PID) in each of the PID
/// namespaces of which (pid)[struct.Status.html#structfield.pid] is a member. The leftmost entry
/// shows the value with respect to the PID namespace of the
/// reading process, followed by the value in successively
/// nested inner namespaces. (Since Linux 4.1.)
pub nstgid: Option<Vec<i32>>,
/// Thread ID in each of the PID namespaces of which
/// (pid)[struct.Status.html#structfield.pid] is a member. The fields are ordered as for NStgid.
/// (Since Linux 4.1.)
pub nspid: Option<Vec<i32>>,
/// Process group ID in each of the PID namespaces of
/// which (pid)[struct.Status.html#structfield.pid] is a member. The fields are ordered as for NStgid. (Since Linux 4.1.)
pub nspgid: Option<Vec<i32>>,
/// NSsid: descendant namespace session ID hierarchy Session ID
/// in each of the PID namespaces of which (pid)[struct.Status.html#structfield.pid] is a member.
/// The fields are ordered as for NStgid. (Since Linux 4.1.)
pub nssid: Option<Vec<i32>>,
/// Peak virtual memory size by kibibytes.
pub vmpeak: Option<u64>,
/// Virtual memory size by kibibytes.
pub vmsize: Option<u64>,
/// Locked memory size by kibibytes (see mlock(3)).
pub vmlck: Option<u64>,
/// Pinned memory size by kibibytes (since Linux 3.2). These are
/// pages that can't be moved because something needs to
/// directly access physical memory.
pub vmpin: Option<u64>,
/// Peak resident set size by kibibytes ("high water mark").
pub vmhwm: Option<u64>,
/// Resident set size by kibibytes. Note that the value here is the
/// sum of RssAnon, RssFile, and RssShmem.
pub vmrss: Option<u64>,
/// Size of resident anonymous memory by kibibytes. (since Linux 4.5).
pub rssanon: Option<u64>,
/// Size of resident file mappings by kibibytes. (since Linux 4.5).
pub rssfile: Option<u64>,
/// Size of resident shared memory by kibibytes (includes System V
/// shared memory, mappings from tmpfs(5), and shared anonymous
/// mappings). (since Linux 4.5).
pub rssshmem: Option<u64>,
/// Size of data by kibibytes.
pub vmdata: Option<u64>,
/// Size of stack by kibibytes.
pub vmstk: Option<u64>,
/// Size of text segments by kibibytes.
pub vmexe: Option<u64>,
/// Shared library code size by kibibytes.
pub vmlib: Option<u64>,
/// Page table entries size by kibibytes (since Linux 2.6.10).
pub vmpte: Option<u64>,
/// Swapped-out virtual memory size by anonymous private
/// pages by kibibytes; shmem swap usage is not included (since Linux 2.6.34).
pub vmswap: Option<u64>,
/// Size of hugetlb memory portions by kB. (since Linux 4.4).
pub hugetlbpages: Option<u64>,
/// Number of threads in process containing this thread.
pub threads: u64,
/// This field contains two slash-separated numbers that
/// relate to queued signals for the real user ID of this
/// process. The first of these is the number of currently
/// queued signals for this real user ID, and the second is the
/// resource limit on the number of queued signals for this
/// process (see the description of RLIMIT_SIGPENDING in
/// getrlimit(2)).
pub sigq: (u64, u64),
/// Number of signals pending for thread (see pthreads(7) and signal(7)).
pub sigpnd: u64,
/// Number of signals pending for process as a whole (see pthreads(7) and signal(7)).
pub shdpnd: u64,
/// Masks indicating signals being blocked (see signal(7)).
pub sigblk: u64,
/// Masks indicating signals being ignored (see signal(7)).
pub sigign: u64,
/// Masks indicating signals being caught (see signal(7)).
pub sigcgt: u64,
/// Masks of capabilities enabled in inheritable sets (see capabilities(7)).
pub capinh: u64,
/// Masks of capabilities enabled in permitted sets (see capabilities(7)).
pub capprm: u64,
/// Masks of capabilities enabled in effective sets (see capabilities(7)).
pub capeff: u64,
/// Capability Bounding set (since Linux 2.6.26, see capabilities(7)).
pub capbnd: Option<u64>,
/// Ambient capability set (since Linux 4.3, see capabilities(7)).
pub capamb: Option<u64>,
/// Value of the no_new_privs bit (since Linux 4.10, see prctl(2)).
pub nonewprivs: Option<u64>,
/// Seccomp mode of the process (since Linux 3.8, see
/// seccomp(2)). 0 means SECCOMP_MODE_DISABLED; 1 means SEC
/// COMP_MODE_STRICT; 2 means SECCOMP_MODE_FILTER. This field
/// is provided only if the kernel was built with the CON
/// FIG_SECCOMP kernel configuration option enabled.
pub seccomp: Option<u32>,
/// Speculative store bypass mitigation status.
pub speculation_store_bypass: Option<String>,
/// Mask of CPUs on which this process may run (since Linux 2.6.24, see cpuset(7)).
pub cpus_allowed: Option<Vec<u32>>,
/// Same as previous, but in "list format" (since Linux 2.6.26, see cpuset(7)).
pub cpus_allowed_list: Option<Vec<(u32, u32)>>,
/// Mask of memory nodes allowed to this process (since Linux 2.6.24, see cpuset(7)).
pub mems_allowed: Option<Vec<u32>>,
/// Same as previous, but in "list format" (since Linux 2.6.26, see cpuset(7)).
pub mems_allowed_list: Option<Vec<(u32, u32)>>,
/// Number of voluntary context switches (since Linux 2.6.23).
pub voluntary_ctxt_switches: Option<u64>,
/// Number of involuntary context switches (since Linux 2.6.23).
pub nonvoluntary_ctxt_switches: Option<u64>,
/// Contains true if the process is currently dumping core.
///
/// This information can be used by a monitoring process to avoid killing a processing that is
/// currently dumping core, which could result in a corrupted core dump file.
///
/// (Since Linux 4.15)
pub core_dumping: Option<bool>,
/// Contains true if the process is allowed to use THP
///
/// (Since Linux 5.0)
pub thp_enabled: Option<bool>,
}
impl crate::FromBufRead for Status {
fn from_buf_read<R: BufRead>(reader: R) -> ProcResult<Self> {
let mut map = HashMap::new();
for line in reader.lines() {
let line = line?;
if line.is_empty() {
continue;
}
let mut s = line.split(':');
let field = expect!(s.next());
let value = expect!(s.next()).trim();
map.insert(field.to_string(), value.to_string());
}
let status = Status {
name: expect!(map.remove("Name")),
umask: map.remove("Umask").map(|x| Ok(from_str!(u32, &x, 8))).transpose()?,
state: expect!(map.remove("State")),
tgid: from_str!(i32, &expect!(map.remove("Tgid"))),
ngid: map.remove("Ngid").map(|x| Ok(from_str!(i32, &x))).transpose()?,
pid: from_str!(i32, &expect!(map.remove("Pid"))),
ppid: from_str!(i32, &expect!(map.remove("PPid"))),
tracerpid: from_str!(i32, &expect!(map.remove("TracerPid"))),
ruid: expect!(Status::parse_uid_gid(expect!(map.get("Uid")), 0)),
euid: expect!(Status::parse_uid_gid(expect!(map.get("Uid")), 1)),
suid: expect!(Status::parse_uid_gid(expect!(map.get("Uid")), 2)),
fuid: expect!(Status::parse_uid_gid(&expect!(map.remove("Uid")), 3)),
rgid: expect!(Status::parse_uid_gid(expect!(map.get("Gid")), 0)),
egid: expect!(Status::parse_uid_gid(expect!(map.get("Gid")), 1)),
sgid: expect!(Status::parse_uid_gid(expect!(map.get("Gid")), 2)),
fgid: expect!(Status::parse_uid_gid(&expect!(map.remove("Gid")), 3)),
fdsize: from_str!(u32, &expect!(map.remove("FDSize"))),
groups: Status::parse_list(&expect!(map.remove("Groups")))?,
nstgid: map.remove("NStgid").map(|x| Status::parse_list(&x)).transpose()?,
nspid: map.remove("NSpid").map(|x| Status::parse_list(&x)).transpose()?,
nspgid: map.remove("NSpgid").map(|x| Status::parse_list(&x)).transpose()?,
nssid: map.remove("NSsid").map(|x| Status::parse_list(&x)).transpose()?,
vmpeak: Status::parse_with_kb(map.remove("VmPeak"))?,
vmsize: Status::parse_with_kb(map.remove("VmSize"))?,
vmlck: Status::parse_with_kb(map.remove("VmLck"))?,
vmpin: Status::parse_with_kb(map.remove("VmPin"))?,
vmhwm: Status::parse_with_kb(map.remove("VmHWM"))?,
vmrss: Status::parse_with_kb(map.remove("VmRSS"))?,
rssanon: Status::parse_with_kb(map.remove("RssAnon"))?,
rssfile: Status::parse_with_kb(map.remove("RssFile"))?,
rssshmem: Status::parse_with_kb(map.remove("RssShmem"))?,
vmdata: Status::parse_with_kb(map.remove("VmData"))?,
vmstk: Status::parse_with_kb(map.remove("VmStk"))?,
vmexe: Status::parse_with_kb(map.remove("VmExe"))?,
vmlib: Status::parse_with_kb(map.remove("VmLib"))?,
vmpte: Status::parse_with_kb(map.remove("VmPTE"))?,
vmswap: Status::parse_with_kb(map.remove("VmSwap"))?,
hugetlbpages: Status::parse_with_kb(map.remove("HugetlbPages"))?,
threads: from_str!(u64, &expect!(map.remove("Threads"))),
sigq: expect!(Status::parse_sigq(&expect!(map.remove("SigQ")))),
sigpnd: from_str!(u64, &expect!(map.remove("SigPnd")), 16),
shdpnd: from_str!(u64, &expect!(map.remove("ShdPnd")), 16),
sigblk: from_str!(u64, &expect!(map.remove("SigBlk")), 16),
sigign: from_str!(u64, &expect!(map.remove("SigIgn")), 16),
sigcgt: from_str!(u64, &expect!(map.remove("SigCgt")), 16),
capinh: from_str!(u64, &expect!(map.remove("CapInh")), 16),
capprm: from_str!(u64, &expect!(map.remove("CapPrm")), 16),
capeff: from_str!(u64, &expect!(map.remove("CapEff")), 16),
capbnd: map.remove("CapBnd").map(|x| Ok(from_str!(u64, &x, 16))).transpose()?,
capamb: map.remove("CapAmb").map(|x| Ok(from_str!(u64, &x, 16))).transpose()?,
nonewprivs: map.remove("NoNewPrivs").map(|x| Ok(from_str!(u64, &x))).transpose()?,
seccomp: map.remove("Seccomp").map(|x| Ok(from_str!(u32, &x))).transpose()?,
speculation_store_bypass: map.remove("Speculation_Store_Bypass"),
cpus_allowed: map
.remove("Cpus_allowed")
.map(|x| Status::parse_allowed(&x))
.transpose()?,
cpus_allowed_list: map
.remove("Cpus_allowed_list")
.and_then(|x| Status::parse_allowed_list(&x).ok()),
mems_allowed: map
.remove("Mems_allowed")
.map(|x| Status::parse_allowed(&x))
.transpose()?,
mems_allowed_list: map
.remove("Mems_allowed_list")
.and_then(|x| Status::parse_allowed_list(&x).ok()),
voluntary_ctxt_switches: map
.remove("voluntary_ctxt_switches")
.map(|x| Ok(from_str!(u64, &x)))
.transpose()?,
nonvoluntary_ctxt_switches: map
.remove("nonvoluntary_ctxt_switches")
.map(|x| Ok(from_str!(u64, &x)))
.transpose()?,
core_dumping: map.remove("CoreDumping").map(|x| x == "1"),
thp_enabled: map.remove("THP_enabled").map(|x| x == "1"),
};
if cfg!(test) && !map.is_empty() {
// This isn't an error because different kernels may put different data here, and distros
// may backport these changes into older kernels. Too hard to keep track of
eprintln!("Warning: status map is not empty: {:#?}", map);
}
Ok(status)
}
}
impl Status {
fn parse_with_kb<T: FromStrRadix>(s: Option<String>) -> ProcResult<Option<T>> {
if let Some(s) = s {
Ok(Some(from_str!(T, &s.replace(" kB", ""))))
} else {
Ok(None)
}
}
#[doc(hidden)]
pub fn parse_uid_gid(s: &str, i: usize) -> ProcResult<u32> {
Ok(from_str!(u32, expect!(s.split_whitespace().nth(i))))
}
fn parse_sigq(s: &str) -> ProcResult<(u64, u64)> {
let mut iter = s.split('/');
let first = from_str!(u64, expect!(iter.next()));
let second = from_str!(u64, expect!(iter.next()));
Ok((first, second))
}
fn parse_list<T: FromStrRadix>(s: &str) -> ProcResult<Vec<T>> {
let mut ret = Vec::new();
for i in s.split_whitespace() {
ret.push(from_str!(T, i));
}
Ok(ret)
}
fn parse_allowed(s: &str) -> ProcResult<Vec<u32>> {
let mut ret = Vec::new();
for i in s.split(',') {
ret.push(from_str!(u32, i, 16));
}
Ok(ret)
}
fn parse_allowed_list(s: &str) -> ProcResult<Vec<(u32, u32)>> {
let mut ret = Vec::new();
for s in s.split(',') {
if s.contains('-') {
let mut s = s.split('-');
let beg = from_str!(u32, expect!(s.next()));
if let Some(x) = s.next() {
let end = from_str!(u32, x);
ret.push((beg, end));
}
} else {
let beg = from_str!(u32, s);
let end = from_str!(u32, s);
ret.push((beg, end));
}
}
Ok(ret)
}
}

View File

@ -0,0 +1,428 @@
//! Global kernel info / tuning miscellaneous stuff
//!
//! The files in this directory can be used to tune and monitor miscellaneous
//! and general things in the operation of the Linux kernel.
use std::cmp;
use std::collections::HashSet;
use std::str::FromStr;
use bitflags::bitflags;
use crate::{ProcError, ProcResult};
/// Represents a kernel version, in major.minor.release version.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct Version {
pub major: u8,
pub minor: u8,
pub patch: u16,
}
impl Version {
pub fn new(major: u8, minor: u8, patch: u16) -> Version {
Version { major, minor, patch }
}
/// Parses a kernel version string, in major.minor.release syntax.
///
/// Note that any extra information (stuff after a dash) is ignored.
///
/// # Example
///
/// ```
/// # use procfs_core::KernelVersion;
/// let a = KernelVersion::from_str("3.16.0-6-amd64").unwrap();
/// let b = KernelVersion::new(3, 16, 0);
/// assert_eq!(a, b);
///
/// ```
#[allow(clippy::should_implement_trait)]
pub fn from_str(s: &str) -> Result<Self, &'static str> {
let pos = s.find(|c: char| c != '.' && !c.is_ascii_digit());
let kernel = if let Some(pos) = pos {
let (s, _) = s.split_at(pos);
s
} else {
s
};
let mut kernel_split = kernel.split('.');
let major = kernel_split.next().ok_or("Missing major version component")?;
let minor = kernel_split.next().ok_or("Missing minor version component")?;
let patch = kernel_split.next().ok_or("Missing patch version component")?;
let major = major.parse().map_err(|_| "Failed to parse major version")?;
let minor = minor.parse().map_err(|_| "Failed to parse minor version")?;
let patch = patch.parse().map_err(|_| "Failed to parse patch version")?;
Ok(Version { major, minor, patch })
}
}
impl FromStr for Version {
type Err = &'static str;
/// Parses a kernel version string, in major.minor.release syntax.
///
/// Note that any extra information (stuff after a dash) is ignored.
///
/// # Example
///
/// ```
/// # use procfs_core::KernelVersion;
/// let a: KernelVersion = "3.16.0-6-amd64".parse().unwrap();
/// let b = KernelVersion::new(3, 16, 0);
/// assert_eq!(a, b);
///
/// ```
fn from_str(s: &str) -> Result<Self, Self::Err> {
Version::from_str(s)
}
}
impl cmp::Ord for Version {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match self.major.cmp(&other.major) {
cmp::Ordering::Equal => match self.minor.cmp(&other.minor) {
cmp::Ordering::Equal => self.patch.cmp(&other.patch),
x => x,
},
x => x,
}
}
}
impl cmp::PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
/// Represents a kernel type
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Type {
pub sysname: String,
}
impl Type {
pub fn new(sysname: String) -> Type {
Type { sysname }
}
}
impl FromStr for Type {
type Err = &'static str;
/// Parse a kernel type string
///
/// Notice that in Linux source code, it is defined as a single string.
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Type::new(s.to_string()))
}
}
/// Represents a kernel build information
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BuildInfo {
pub version: String,
pub flags: HashSet<String>,
/// This field contains any extra data from the /proc/sys/kernel/version file. It generally contains the build date of the kernel, but the format of the date can vary.
///
/// A method named `extra_date` is provided which would try to parse some date formats. When the date format is not supported, an error will be returned. It depends on chrono feature.
pub extra: String,
}
impl BuildInfo {
pub fn new(version: &str, flags: HashSet<String>, extra: String) -> BuildInfo {
BuildInfo {
version: version.to_string(),
flags,
extra,
}
}
/// Check if SMP is ON
pub fn smp(&self) -> bool {
self.flags.contains("SMP")
}
/// Check if PREEMPT is ON
pub fn preempt(&self) -> bool {
self.flags.contains("PREEMPT")
}
/// Check if PREEMPTRT is ON
pub fn preemptrt(&self) -> bool {
self.flags.contains("PREEMPTRT")
}
/// Return version number
///
/// This would parse number from first digits of version string. For example, #21~1 to 21.
pub fn version_number(&self) -> ProcResult<u32> {
let mut version_str = String::new();
for c in self.version.chars() {
if c.is_ascii_digit() {
version_str.push(c);
} else {
break;
}
}
let version_number: u32 = version_str.parse().map_err(|_| "Failed to parse version number")?;
Ok(version_number)
}
/// Parse extra field to `DateTime` object
///
/// This function may fail as TIMESTAMP can be various formats.
#[cfg(feature = "chrono")]
pub fn extra_date(&self) -> ProcResult<chrono::DateTime<chrono::Local>> {
if let Ok(dt) =
chrono::DateTime::parse_from_str(&format!("{} +0000", &self.extra), "%a %b %d %H:%M:%S UTC %Y %z")
{
return Ok(dt.with_timezone(&chrono::Local));
}
if let Ok(dt) = chrono::DateTime::parse_from_str(&self.extra, "%a, %d %b %Y %H:%M:%S %z") {
return Ok(dt.with_timezone(&chrono::Local));
}
Err(ProcError::Other("Failed to parse extra field to date".to_string()))
}
}
impl FromStr for BuildInfo {
type Err = &'static str;
/// Parse a kernel build information string
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut version = String::new();
let mut flags: HashSet<String> = HashSet::new();
let mut extra: String = String::new();
let mut splited = s.split(' ');
let version_str = splited.next();
if let Some(version_str) = version_str {
if let Some(stripped) = version_str.strip_prefix('#') {
version.push_str(stripped);
} else {
return Err("Failed to parse kernel build version");
}
} else {
return Err("Failed to parse kernel build version");
}
for s in &mut splited {
if s.chars().all(char::is_uppercase) {
flags.insert(s.to_string());
} else {
extra.push_str(s);
extra.push(' ');
break;
}
}
let remains: Vec<&str> = splited.collect();
extra.push_str(&remains.join(" "));
Ok(BuildInfo { version, flags, extra })
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
/// Represents the data from `/proc/sys/kernel/sem`
pub struct SemaphoreLimits {
/// The maximum semaphores per semaphore set
pub semmsl: u64,
/// A system-wide limit on the number of semaphores in all semaphore sets
pub semmns: u64,
/// The maximum number of operations that may be specified in a semop(2) call
pub semopm: u64,
/// A system-wide limit on the maximum number of semaphore identifiers
pub semmni: u64,
}
impl SemaphoreLimits {
fn from_str(s: &str) -> Result<Self, &'static str> {
let mut s = s.split_ascii_whitespace();
let semmsl = s.next().ok_or("Missing SEMMSL")?;
let semmns = s.next().ok_or("Missing SEMMNS")?;
let semopm = s.next().ok_or("Missing SEMOPM")?;
let semmni = s.next().ok_or("Missing SEMMNI")?;
let semmsl = semmsl.parse().map_err(|_| "Failed to parse SEMMSL")?;
let semmns = semmns.parse().map_err(|_| "Failed to parse SEMMNS")?;
let semopm = semopm.parse().map_err(|_| "Failed to parse SEMOPM")?;
let semmni = semmni.parse().map_err(|_| "Failed to parse SEMMNI")?;
Ok(SemaphoreLimits {
semmsl,
semmns,
semopm,
semmni,
})
}
}
impl FromStr for SemaphoreLimits {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SemaphoreLimits::from_str(s)
}
}
bitflags! {
/// Flags representing allowed sysrq functions
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct AllowedFunctions : u16 {
/// Enable control of console log level
const ENABLE_CONTROL_LOG_LEVEL = 2;
/// Enable control of keyboard (SAK, unraw)
const ENABLE_CONTROL_KEYBOARD = 4;
/// Enable debugging dumps of processes etc
const ENABLE_DEBUGGING_DUMPS = 8;
/// Enable sync command
const ENABLE_SYNC_COMMAND = 16;
/// Enable remound read-only
const ENABLE_REMOUNT_READ_ONLY = 32;
/// Enable signaling of processes (term, kill, oom-kill)
const ENABLE_SIGNALING_PROCESSES = 64;
/// Allow reboot/poweroff
const ALLOW_REBOOT_POWEROFF = 128;
/// Allow nicing of all real-time tasks
const ALLOW_NICING_REAL_TIME_TASKS = 256;
}
}
/// Values controlling functions allowed to be invoked by the SysRq key
#[derive(Copy, Clone, Debug)]
pub enum SysRq {
/// Disable sysrq completely
Disable,
/// Enable all functions of sysrq
Enable,
/// Bitmask of allowed sysrq functions
AllowedFunctions(AllowedFunctions),
}
impl SysRq {
pub fn to_number(self) -> u16 {
match self {
SysRq::Disable => 0,
SysRq::Enable => 1,
SysRq::AllowedFunctions(allowed) => allowed.bits(),
}
}
fn from_str(s: &str) -> ProcResult<Self> {
match s.parse::<u16>()? {
0 => Ok(SysRq::Disable),
1 => Ok(SysRq::Enable),
x => match AllowedFunctions::from_bits(x) {
Some(allowed) => Ok(SysRq::AllowedFunctions(allowed)),
None => Err("Invalid value".into()),
},
}
}
}
impl FromStr for SysRq {
type Err = ProcError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SysRq::from_str(s)
}
}
/// The minimum value that can be written to `/proc/sys/kernel/threads-max` on Linux 4.1 or later
pub const THREADS_MIN: u32 = 20;
/// The maximum value that can be written to `/proc/sys/kernel/threads-max` on Linux 4.1 or later
pub const THREADS_MAX: u32 = 0x3fff_ffff;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_version() {
let a = Version::from_str("3.16.0-6-amd64").unwrap();
let b = Version::new(3, 16, 0);
assert_eq!(a, b);
let a = Version::from_str("3.16.0").unwrap();
let b = Version::new(3, 16, 0);
assert_eq!(a, b);
let a = Version::from_str("3.16.0_1").unwrap();
let b = Version::new(3, 16, 0);
assert_eq!(a, b);
}
#[test]
fn test_type() {
let a = Type::from_str("Linux").unwrap();
assert_eq!(a.sysname, "Linux");
}
#[test]
fn test_build_info() {
// For Ubuntu, Manjaro, CentOS and others:
let a = BuildInfo::from_str("#1 SMP PREEMPT Thu Sep 30 15:29:01 UTC 2021").unwrap();
let mut flags: HashSet<String> = HashSet::new();
flags.insert("SMP".to_string());
flags.insert("PREEMPT".to_string());
assert_eq!(a.version, "1");
assert_eq!(a.version_number().unwrap(), 1);
assert_eq!(a.flags, flags);
assert!(a.smp());
assert!(a.preempt());
assert!(!a.preemptrt());
assert_eq!(a.extra, "Thu Sep 30 15:29:01 UTC 2021");
#[cfg(feature = "chrono")]
let _ = a.extra_date().unwrap();
// For Arch and others:
let b = BuildInfo::from_str("#1 SMP PREEMPT Fri, 12 Nov 2021 19:22:10 +0000").unwrap();
assert_eq!(b.version, "1");
assert_eq!(b.version_number().unwrap(), 1);
assert_eq!(b.flags, flags);
assert_eq!(b.extra, "Fri, 12 Nov 2021 19:22:10 +0000");
assert!(b.smp());
assert!(b.preempt());
assert!(!b.preemptrt());
#[cfg(feature = "chrono")]
let _ = b.extra_date().unwrap();
// For Debian and others:
let c = BuildInfo::from_str("#1 SMP Debian 5.10.46-4 (2021-08-03)").unwrap();
let mut flags: HashSet<String> = HashSet::new();
flags.insert("SMP".to_string());
assert_eq!(c.version, "1");
assert_eq!(c.version_number().unwrap(), 1);
assert_eq!(c.flags, flags);
assert_eq!(c.extra, "Debian 5.10.46-4 (2021-08-03)");
assert!(c.smp());
assert!(!c.preempt());
assert!(!c.preemptrt());
// Skip the date parsing for now
}
#[test]
fn test_semaphore_limits() {
// Note that the below string has tab characters in it. Make sure to not remove them.
let a = SemaphoreLimits::from_str("32000 1024000000 500 32000").unwrap();
let b = SemaphoreLimits {
semmsl: 32_000,
semmns: 1_024_000_000,
semopm: 500,
semmni: 32_000,
};
assert_eq!(a, b);
let a = SemaphoreLimits::from_str("1");
assert!(a.is_err() && a.err().unwrap() == "Missing SEMMNS");
let a = SemaphoreLimits::from_str("1 string 500 3200");
assert!(a.is_err() && a.err().unwrap() == "Failed to parse SEMMNS");
}
}

View File

@ -0,0 +1,9 @@
//! Sysctl is a means of configuring certain aspects of the kernel at run-time,
//! and the `/proc/sys/` directory is there so that you don't even need special tools to do it!
//!
//! This directory (present since 1.3.57) contains a number of files
//! and subdirectories corresponding to kernel variables.
//! These variables can be read and sometimes modified using the `/proc` filesystem,
//! and the (deprecated) sysctl(2) system call.
pub mod kernel;

View File

@ -0,0 +1,105 @@
use std::io;
use super::{expect, ProcResult};
use std::str::FromStr;
#[cfg(feature = "serde1")]
use serde::{Deserialize, Serialize};
/// A shared memory segment parsed from `/proc/sysvipc/shm`
/// Relation with [crate::process::MMapPath::Vsys]
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[allow(non_snake_case)]
pub struct Shm {
/// Segment key
pub key: i32,
/// Segment ID, unique
pub shmid: u64,
/// Access permissions, as octal
pub perms: u16,
/// Size in bytes
pub size: u64,
/// Creator PID
pub cpid: i32,
/// Last operator PID
pub lpid: i32,
/// Number of attached processes
pub nattch: u32,
/// User ID
pub uid: u16,
/// Group ID
pub gid: u16,
/// Creator UID
pub cuid: u16,
/// Creator GID
pub cgid: u16,
/// Time of last `shmat` (attach), epoch
pub atime: u64,
/// Time of last `shmdt` (detach), epoch
pub dtime: u64,
/// Time of last permission change, epoch
pub ctime: u64,
/// Current part of the shared memory resident in memory
pub rss: u64,
/// Current part of the shared memory in SWAP
pub swap: u64,
}
/// A set of shared memory segments parsed from `/proc/sysvipc/shm`
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct SharedMemorySegments(pub Vec<Shm>);
impl super::FromBufRead for SharedMemorySegments {
fn from_buf_read<R: io::BufRead>(r: R) -> ProcResult<Self> {
let mut vec = Vec::new();
// See printing code here:
// https://elixir.bootlin.com/linux/latest/source/ipc/shm.c#L1737
for line in r.lines().skip(1) {
let line = expect!(line);
let mut s = line.split_whitespace();
let key = expect!(i32::from_str(expect!(s.next())));
let shmid = expect!(u64::from_str(expect!(s.next())));
let perms = expect!(u16::from_str(expect!(s.next())));
let size = expect!(u64::from_str(expect!(s.next())));
let cpid = expect!(i32::from_str(expect!(s.next())));
let lpid = expect!(i32::from_str(expect!(s.next())));
let nattch = expect!(u32::from_str(expect!(s.next())));
let uid = expect!(u16::from_str(expect!(s.next())));
let gid = expect!(u16::from_str(expect!(s.next())));
let cuid = expect!(u16::from_str(expect!(s.next())));
let cgid = expect!(u16::from_str(expect!(s.next())));
let atime = expect!(u64::from_str(expect!(s.next())));
let dtime = expect!(u64::from_str(expect!(s.next())));
let ctime = expect!(u64::from_str(expect!(s.next())));
let rss = expect!(u64::from_str(expect!(s.next())));
let swap = expect!(u64::from_str(expect!(s.next())));
let shm = Shm {
key,
shmid,
perms,
size,
cpid,
lpid,
nattch,
uid,
gid,
cuid,
cgid,
atime,
dtime,
ctime,
rss,
swap,
};
vec.push(shm);
}
Ok(SharedMemorySegments(vec))
}
}

View File

@ -0,0 +1,68 @@
use crate::{expect, ProcResult};
use std::io::Read;
use std::str::FromStr;
use std::time::Duration;
/// The uptime of the system, based on the `/proc/uptime` file.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct Uptime {
/// The uptime of the system (including time spent in suspend).
pub uptime: f64,
/// The sum of how much time each core has spent idle.
pub idle: f64,
}
impl super::FromRead for Uptime {
fn from_read<R: Read>(mut r: R) -> ProcResult<Self> {
let mut buf = Vec::with_capacity(128);
r.read_to_end(&mut buf)?;
let line = String::from_utf8_lossy(&buf);
let buf = line.trim();
let mut s = buf.split(' ');
let uptime = expect!(f64::from_str(expect!(s.next())));
let idle = expect!(f64::from_str(expect!(s.next())));
Ok(Uptime { uptime, idle })
}
}
impl Uptime {
/// The uptime of the system (including time spent in suspend).
pub fn uptime_duration(&self) -> Duration {
let secs = self.uptime.trunc() as u64;
let csecs = (self.uptime.fract() * 100.0).round() as u32;
let nsecs = csecs * 10_000_000;
Duration::new(secs, nsecs)
}
/// The sum of how much time each core has spent idle.
pub fn idle_duration(&self) -> Duration {
let secs = self.idle.trunc() as u64;
let csecs = (self.idle.fract() * 100.0).round() as u32;
let nsecs = csecs * 10_000_000;
Duration::new(secs, nsecs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::FromRead;
use std::io::Cursor;
#[test]
fn test_uptime() {
let reader = Cursor::new(b"2578790.61 1999230.98\n");
let uptime = Uptime::from_read(reader).unwrap();
assert_eq!(uptime.uptime_duration(), Duration::new(2578790, 610_000_000));
assert_eq!(uptime.idle_duration(), Duration::new(1999230, 980_000_000));
}
}

View File

@ -297,7 +297,12 @@ CrashGenerationServer::ClientEvent(short revents)
// error. So we'll report that as well via the callback-functions.
bool res = write_minidump_linux_with_context(
minidump_filename.c_str(), crashing_pid, &breakpad_cc->context,
&breakpad_cc->float_state, &signalfd_si, breakpad_cc->tid, &error_msg);
# ifndef __arm__
reinterpret_cast<const fpregset_t *>(&breakpad_cc->float_state),
# else
nullptr,
# endif // __arm__
&signalfd_si, breakpad_cc->tid, &error_msg);
#else
if (!google_breakpad::WriteMinidump(minidump_filename.c_str(),
crashing_pid, crash_context,

View File

@ -8,7 +8,7 @@ license = "MPL-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
goblin = { version = "0.6", features = ["elf32", "elf64", "pe32", "pe64"] }
goblin = { version = "0.7", features = ["elf32", "elf64", "pe32", "pe64"] }
memoffset = "0.8"
mozannotation_client = { path = "../mozannotation_client/" }
nsstring = { path = "../../../xpcom/rust/nsstring/" }

View File

@ -3894,3 +3894,39 @@ void SetNotificationPipeForChild(int childCrashFd) {
#endif
} // namespace CrashReporter
#if ANDROID_NDK_MAJOR_VERSION && (ANDROID_NDK_MAJOR_VERSION < 24)
// Bionic introduced support for getgrgid_r() and getgrnam_r() only in version
// 24 (that is Android Nougat / 7.1.2). Since we build with NDK version 23c we
// can't link against those functions, but nix needs them and minidump-writer
// relies on nix. These functions should never be called in practice hence we
// implement them only to satisfy nix linking requirements but we crash if we
// accidentally enter them.
extern "C" {
int getgrgid_r(gid_t gid, struct group* grp, char* buf, size_t buflen,
struct group** result) {
MOZ_CRASH("getgrgid_r() is not available");
return EPERM;
}
int getgrnam_r(const char* name, struct group* grp, char* buf, size_t buflen,
struct group** result) {
MOZ_CRASH("getgrnam_r() is not available");
return EPERM;
}
int mlockall(int flags) {
MOZ_CRASH("mlockall() is not available");
return EPERM;
}
int munlockall(void) {
MOZ_CRASH("munlockall() is not available");
return EPERM;
}
}
#endif // ANDROID_NDK_MAJOR_VERSION && (ANDROID_NDK_MAJOR_VERSION < 24)

Some files were not shown because too many files have changed in this diff Show More